diff options
Diffstat (limited to 'deps/v8')
212 files changed, 11235 insertions, 5942 deletions
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS index de8cabb0b1..4fd7aa5b76 100644 --- a/deps/v8/AUTHORS +++ b/deps/v8/AUTHORS @@ -19,3 +19,4 @@ Rafal Krypa <rafal@krypa.net> Rene Rebe <rene@exactcode.de> Ryan Dahl <coldredlemur@gmail.com> Patrick Gansterer <paroga@paroga.com> +John Jozwiak <jjozwiak@codeaurora.org> diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog index 8ee83766a4..668cd9a920 100644 --- a/deps/v8/ChangeLog +++ b/deps/v8/ChangeLog @@ -1,3 +1,20 @@ +2009-11-18: Version 2.0.0 + + Added support for VFP on ARM. + + Added TryCatch::ReThrow method to the API. + + Reduced the size of snapshots and improved the snapshot load time. + + Improved heap profiler support. + + 64-bit version now supported on Windows. + + Fixed a number of debugger issues. + + Fixed bugs. + + 2009-10-29: Version 1.3.18 Reverted a change which caused crashes in RegExp replace. diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct index af8119bfdb..c2a7508917 100755 --- a/deps/v8/SConstruct +++ b/deps/v8/SConstruct @@ -272,7 +272,7 @@ V8_EXTRA_FLAGS = { 'WARNINGFLAGS': ['/W3'] }, 'arch:x64': { - 'WARNINGFLAGS': ['/W2'] + 'WARNINGFLAGS': ['/W3'] }, 'arch:arm': { 'CPPDEFINES': ['V8_TARGET_ARCH_ARM'], diff --git a/deps/v8/include/v8-debug.h b/deps/v8/include/v8-debug.h index d93e5543b4..b27bacc108 100644 --- a/deps/v8/include/v8-debug.h +++ b/deps/v8/include/v8-debug.h @@ -218,7 +218,7 @@ class EXPORT Debug { /** * Register a callback function to be called when a debug message has been - * received and is ready to be precessed. For the debug messages to be + * received and is ready to be processed. For the debug messages to be * processed V8 needs to be entered, and in certain embedding scenarios this * callback can be used to make sure V8 is entered for the debug message to * be processed. Note that debug messages will only be processed if there is diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index 5f3b68b227..78b46136fd 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -129,8 +129,9 @@ class Data; namespace internal { -class Object; class Arguments; +class Object; +class Top; } @@ -2473,6 +2474,15 @@ class V8EXPORT TryCatch { bool CanContinue() const; /** + * Throws the exception caught by this TryCatch in a way that avoids + * it being caught again by this same TryCatch. As with ThrowException + * it is illegal to execute any JavaScript operations after calling + * ReThrow; the caller must return immediately to where the exception + * is caught. + */ + Handle<Value> ReThrow(); + + /** * Returns the exception caught by this try/catch block. If no exception has * been caught an empty handle is returned. * @@ -2523,14 +2533,16 @@ class V8EXPORT TryCatch { */ void SetCaptureMessage(bool value); - public: - TryCatch* next_; + private: + void* next_; void* exception_; void* message_; - bool is_verbose_; - bool can_continue_; - bool capture_message_; - void* js_handler_; + bool is_verbose_ : 1; + bool can_continue_ : 1; + bool capture_message_ : 1; + bool rethrow_ : 1; + + friend class v8::internal::Top; }; diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript index 85fd724964..cfa462f50f 100755 --- a/deps/v8/src/SConscript +++ b/deps/v8/src/SConscript @@ -264,7 +264,6 @@ def ConfigureObjectFiles(): else: snapshot_cc = Command('snapshot.cc', [], []) snapshot_obj = context.ConfigureObject(env, snapshot_cc, CPPPATH=['.']) - libraries_obj = context.ConfigureObject(env, libraries_empty_src, CPPPATH=['.']) else: snapshot_obj = empty_snapshot_obj library_objs = [non_snapshot_files, libraries_obj, snapshot_obj] diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc index 82ae702fd9..734c36445d 100644 --- a/deps/v8/src/accessors.cc +++ b/deps/v8/src/accessors.cc @@ -315,7 +315,14 @@ Object* Accessors::ScriptGetLineEnds(Object* object, void*) { HandleScope scope; Handle<Script> script(Script::cast(JSValue::cast(object)->value())); InitScriptLineEnds(script); - return script->line_ends(); + if (script->line_ends_js_array()->IsUndefined()) { + Handle<FixedArray> line_ends_fixed_array( + FixedArray::cast(script->line_ends_fixed_array())); + Handle<FixedArray> copy = Factory::CopyFixedArray(line_ends_fixed_array); + Handle<JSArray> js_array = Factory::NewJSArrayWithElements(copy); + script->set_line_ends_js_array(*js_array); + } + return script->line_ends_js_array(); } diff --git a/deps/v8/src/allocation.cc b/deps/v8/src/allocation.cc index 41724b68cf..678f4fd7d2 100644 --- a/deps/v8/src/allocation.cc +++ b/deps/v8/src/allocation.cc @@ -80,7 +80,7 @@ void AllStatic::operator delete(void* p) { char* StrDup(const char* str) { - int length = strlen(str); + int length = StrLength(str); char* result = NewArray<char>(length + 1); memcpy(result, str, length * kCharSize); result[length] = '\0'; @@ -88,8 +88,8 @@ char* StrDup(const char* str) { } -char* StrNDup(const char* str, size_t n) { - size_t length = strlen(str); +char* StrNDup(const char* str, int n) { + int length = StrLength(str); if (n < length) length = n; char* result = NewArray<char>(length + 1); memcpy(result, str, length * kCharSize); diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index 2d11c49f16..220788ba52 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -37,6 +37,7 @@ #include "platform.h" #include "serialize.h" #include "snapshot.h" +#include "utils.h" #include "v8threads.h" #include "version.h" @@ -1191,19 +1192,26 @@ void Script::SetData(v8::Handle<Value> data) { v8::TryCatch::TryCatch() - : next_(i::Top::try_catch_handler()), + : next_(i::Top::try_catch_handler_address()), exception_(i::Heap::the_hole_value()), message_(i::Smi::FromInt(0)), is_verbose_(false), can_continue_(true), capture_message_(true), - js_handler_(NULL) { + rethrow_(false) { i::Top::RegisterTryCatchHandler(this); } v8::TryCatch::~TryCatch() { - i::Top::UnregisterTryCatchHandler(this); + if (rethrow_) { + v8::HandleScope scope; + v8::Local<v8::Value> exc = v8::Local<v8::Value>::New(Exception()); + i::Top::UnregisterTryCatchHandler(this); + v8::ThrowException(exc); + } else { + i::Top::UnregisterTryCatchHandler(this); + } } @@ -1217,6 +1225,13 @@ bool v8::TryCatch::CanContinue() const { } +v8::Handle<v8::Value> v8::TryCatch::ReThrow() { + if (!HasCaught()) return v8::Local<v8::Value>(); + rethrow_ = true; + return v8::Undefined(); +} + + v8::Local<Value> v8::TryCatch::Exception() const { if (HasCaught()) { // Check for out of memory exception. @@ -2032,11 +2047,11 @@ Local<String> v8::Object::ObjectProtoToString() { Local<String> str = Utils::ToLocal(class_name); const char* postfix = "]"; - size_t prefix_len = strlen(prefix); - size_t str_len = str->Length(); - size_t postfix_len = strlen(postfix); + int prefix_len = i::StrLength(prefix); + int str_len = str->Length(); + int postfix_len = i::StrLength(postfix); - size_t buf_len = prefix_len + str_len + postfix_len; + int buf_len = prefix_len + str_len + postfix_len; char* buf = i::NewArray<char>(buf_len); // Write prefix. @@ -2621,11 +2636,8 @@ bool v8::V8::Initialize() { if (i::V8::IsRunning()) return true; ENTER_V8; HandleScope scope; - if (i::Snapshot::Initialize()) { - return true; - } else { - return i::V8::Initialize(NULL); - } + if (i::Snapshot::Initialize()) return true; + return i::V8::Initialize(NULL); } @@ -2950,7 +2962,7 @@ Local<String> v8::String::New(const char* data, int length) { LOG_API("String::New(char)"); if (length == 0) return Empty(); ENTER_V8; - if (length == -1) length = strlen(data); + if (length == -1) length = i::StrLength(data); i::Handle<i::String> result = i::Factory::NewStringFromUtf8(i::Vector<const char>(data, length)); return Utils::ToLocal(result); @@ -2973,7 +2985,7 @@ Local<String> v8::String::NewUndetectable(const char* data, int length) { EnsureInitialized("v8::String::NewUndetectable()"); LOG_API("String::NewUndetectable(char)"); ENTER_V8; - if (length == -1) length = strlen(data); + if (length == -1) length = i::StrLength(data); i::Handle<i::String> result = i::Factory::NewStringFromUtf8(i::Vector<const char>(data, length)); result->MarkAsUndetectable(); @@ -3041,7 +3053,8 @@ static void DisposeExternalString(v8::Persistent<v8::Value> obj, v8::String::ExternalStringResource* resource = reinterpret_cast<v8::String::ExternalStringResource*>(parameter); if (resource != NULL) { - const size_t total_size = resource->length() * sizeof(*resource->data()); + const int total_size = + static_cast<int>(resource->length() * sizeof(*resource->data())); i::Counters::total_external_string_memory.Decrement(total_size); // The object will continue to live in the JavaScript heap until the @@ -3071,7 +3084,8 @@ static void DisposeExternalAsciiString(v8::Persistent<v8::Value> obj, v8::String::ExternalAsciiStringResource* resource = reinterpret_cast<v8::String::ExternalAsciiStringResource*>(parameter); if (resource != NULL) { - const size_t total_size = resource->length() * sizeof(*resource->data()); + const int total_size = + static_cast<int>(resource->length() * sizeof(*resource->data())); i::Counters::total_external_string_memory.Decrement(total_size); // The object will continue to live in the JavaScript heap until the @@ -3093,7 +3107,8 @@ Local<String> v8::String::NewExternal( EnsureInitialized("v8::String::NewExternal()"); LOG_API("String::NewExternal"); ENTER_V8; - const size_t total_size = resource->length() * sizeof(*resource->data()); + const int total_size = + static_cast<int>(resource->length() * sizeof(*resource->data())); i::Counters::total_external_string_memory.Increment(total_size); i::Handle<i::String> result = NewExternalStringHandle(resource); i::Handle<i::Object> handle = i::GlobalHandles::Create(*result); @@ -3128,7 +3143,8 @@ Local<String> v8::String::NewExternal( EnsureInitialized("v8::String::NewExternal()"); LOG_API("String::NewExternal"); ENTER_V8; - const size_t total_size = resource->length() * sizeof(*resource->data()); + const int total_size = + static_cast<int>(resource->length() * sizeof(*resource->data())); i::Counters::total_external_string_memory.Increment(total_size); i::Handle<i::String> result = NewExternalAsciiStringHandle(resource); i::Handle<i::Object> handle = i::GlobalHandles::Create(*result); @@ -3250,7 +3266,7 @@ Local<String> v8::String::NewSymbol(const char* data, int length) { EnsureInitialized("v8::String::NewSymbol()"); LOG_API("String::NewSymbol(char)"); ENTER_V8; - if (length == -1) length = strlen(data); + if (length == -1) length = i::StrLength(data); i::Handle<i::String> result = i::Factory::LookupSymbol(i::Vector<const char>(data, length)); return Utils::ToLocal(result); diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h index 1221f352cc..a28e1f0774 100644 --- a/deps/v8/src/api.h +++ b/deps/v8/src/api.h @@ -125,6 +125,15 @@ static inline v8::internal::Handle<v8::internal::Object> FromCData(T obj) { } +class ApiFunction { + public: + explicit ApiFunction(v8::internal::Address addr) : addr_(addr) { } + v8::internal::Address address() { return addr_; } + private: + v8::internal::Address addr_; +}; + + v8::Arguments::Arguments(v8::Local<v8::Value> data, v8::Local<v8::Object> holder, v8::Local<v8::Function> callee, diff --git a/deps/v8/src/arguments.h b/deps/v8/src/arguments.h index d2f1bfce54..3fed22311b 100644 --- a/deps/v8/src/arguments.h +++ b/deps/v8/src/arguments.h @@ -77,9 +77,9 @@ class Arguments BASE_EMBEDDED { // can. class CustomArguments : public Relocatable { public: - inline CustomArguments(Object *data, - JSObject *self, - JSObject *holder) { + inline CustomArguments(Object* data, + JSObject* self, + JSObject* holder) { values_[3] = self; values_[2] = holder; values_[1] = Smi::FromInt(0); diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h index d6046ec8db..5f47cb796f 100644 --- a/deps/v8/src/arm/assembler-arm-inl.h +++ b/deps/v8/src/arm/assembler-arm-inl.h @@ -85,7 +85,7 @@ Object* RelocInfo::target_object() { } -Handle<Object> RelocInfo::target_object_handle(Assembler *origin) { +Handle<Object> RelocInfo::target_object_handle(Assembler* origin) { ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); return Memory::Object_Handle_at(Assembler::target_address_address_at(pc_)); } diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc index bc3b8e6447..d9247288ca 100644 --- a/deps/v8/src/arm/assembler-arm.cc +++ b/deps/v8/src/arm/assembler-arm.cc @@ -42,6 +42,34 @@ namespace v8 { namespace internal { +// Safe default is no features. +unsigned CpuFeatures::supported_ = 0; +unsigned CpuFeatures::enabled_ = 0; +unsigned CpuFeatures::found_by_runtime_probing_ = 0; + +void CpuFeatures::Probe() { + // If the compiler is allowed to use vfp then we can use vfp too in our + // code generation. +#if !defined(__arm__) + // For the simulator=arm build, always use VFP since the arm simulator has + // VFP support. + supported_ |= 1u << VFP3; +#else + if (Serializer::enabled()) { + supported_ |= OS::CpuFeaturesImpliedByPlatform(); + return; // No features if we might serialize. + } + + if (OS::ArmCpuHasFeature(VFP3)) { + // This implementation also sets the VFP flags if + // runtime detection of VFP returns true. + supported_ |= 1u << VFP3; + found_by_runtime_probing_ |= 1u << VFP3; + } +#endif +} + + // ----------------------------------------------------------------------------- // Implementation of Register and CRegister @@ -84,6 +112,57 @@ CRegister cr13 = { 13 }; CRegister cr14 = { 14 }; CRegister cr15 = { 15 }; +// Support for the VFP registers s0 to s31 (d0 to d15). +// Note that "sN:sM" is the same as "dN/2". +Register s0 = { 0 }; +Register s1 = { 1 }; +Register s2 = { 2 }; +Register s3 = { 3 }; +Register s4 = { 4 }; +Register s5 = { 5 }; +Register s6 = { 6 }; +Register s7 = { 7 }; +Register s8 = { 8 }; +Register s9 = { 9 }; +Register s10 = { 10 }; +Register s11 = { 11 }; +Register s12 = { 12 }; +Register s13 = { 13 }; +Register s14 = { 14 }; +Register s15 = { 15 }; +Register s16 = { 16 }; +Register s17 = { 17 }; +Register s18 = { 18 }; +Register s19 = { 19 }; +Register s20 = { 20 }; +Register s21 = { 21 }; +Register s22 = { 22 }; +Register s23 = { 23 }; +Register s24 = { 24 }; +Register s25 = { 25 }; +Register s26 = { 26 }; +Register s27 = { 27 }; +Register s28 = { 28 }; +Register s29 = { 29 }; +Register s30 = { 30 }; +Register s31 = { 31 }; + +Register d0 = { 0 }; +Register d1 = { 1 }; +Register d2 = { 2 }; +Register d3 = { 3 }; +Register d4 = { 4 }; +Register d5 = { 5 }; +Register d6 = { 6 }; +Register d7 = { 7 }; +Register d8 = { 8 }; +Register d9 = { 9 }; +Register d10 = { 10 }; +Register d11 = { 11 }; +Register d12 = { 12 }; +Register d13 = { 13 }; +Register d14 = { 14 }; +Register d15 = { 15 }; // ----------------------------------------------------------------------------- // Implementation of RelocInfo @@ -203,10 +282,14 @@ enum { B4 = 1 << 4, B5 = 1 << 5, + B6 = 1 << 6, B7 = 1 << 7, B8 = 1 << 8, + B9 = 1 << 9, B12 = 1 << 12, B16 = 1 << 16, + B18 = 1 << 18, + B19 = 1 << 19, B20 = 1 << 20, B21 = 1 << 21, B22 = 1 << 22, @@ -523,6 +606,11 @@ static bool fits_shifter(uint32_t imm32, // encoded. static bool MustUseIp(RelocInfo::Mode rmode) { if (rmode == RelocInfo::EXTERNAL_REFERENCE) { +#ifdef DEBUG + if (!Serializer::enabled()) { + Serializer::TooLateToEnableNow(); + } +#endif return Serializer::enabled(); } else if (rmode == RelocInfo::NONE) { return false; @@ -1282,6 +1370,187 @@ void Assembler::stc2(Coprocessor coproc, } +// Support for VFP. +void Assembler::fmdrr(const Register dst, + const Register src1, + const Register src2, + const SBit s, + const Condition cond) { + // Dm = <Rt,Rt2>. + // Instruction details available in ARM DDI 0406A, A8-646. + // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) | + // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm + ASSERT(CpuFeatures::IsEnabled(VFP3)); + ASSERT(!src1.is(pc) && !src2.is(pc)); + emit(cond | 0xC*B24 | B22 | src2.code()*B16 | + src1.code()*B12 | 0xB*B8 | B4 | dst.code()); +} + + +void Assembler::fmrrd(const Register dst1, + const Register dst2, + const Register src, + const SBit s, + const Condition cond) { + // <Rt,Rt2> = Dm. + // Instruction details available in ARM DDI 0406A, A8-646. + // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) | + // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm + ASSERT(CpuFeatures::IsEnabled(VFP3)); + ASSERT(!dst1.is(pc) && !dst2.is(pc)); + emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 | + dst1.code()*B12 | 0xB*B8 | B4 | src.code()); +} + + +void Assembler::fmsr(const Register dst, + const Register src, + const SBit s, + const Condition cond) { + // Sn = Rt. + // Instruction details available in ARM DDI 0406A, A8-642. + // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) | + // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0) + ASSERT(CpuFeatures::IsEnabled(VFP3)); + ASSERT(!src.is(pc)); + emit(cond | 0xE*B24 | (dst.code() >> 1)*B16 | + src.code()*B12 | 0xA*B8 | (0x1 & dst.code())*B7 | B4); +} + + +void Assembler::fmrs(const Register dst, + const Register src, + const SBit s, + const Condition cond) { + // Rt = Sn. + // Instruction details available in ARM DDI 0406A, A8-642. + // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) | + // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0) + ASSERT(CpuFeatures::IsEnabled(VFP3)); + ASSERT(!dst.is(pc)); + emit(cond | 0xE*B24 | B20 | (src.code() >> 1)*B16 | + dst.code()*B12 | 0xA*B8 | (0x1 & src.code())*B7 | B4); +} + + +void Assembler::fsitod(const Register dst, + const Register src, + const SBit s, + const Condition cond) { + // Dd = Sm (integer in Sm converted to IEEE 64-bit doubles in Dd). + // Instruction details available in ARM DDI 0406A, A8-576. + // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) |opc2=000(18-16) | + // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=1 | 1(6) | M=?(5) | 0(4) | Vm(3-0) + ASSERT(CpuFeatures::IsEnabled(VFP3)); + emit(cond | 0xE*B24 | B23 | 0x3*B20 | B19 | + dst.code()*B12 | 0x5*B9 | B8 | B7 | B6 | + (0x1 & src.code())*B5 | (src.code() >> 1)); +} + + +void Assembler::ftosid(const Register dst, + const Register src, + const SBit s, + const Condition cond) { + // Sd = Dm (IEEE 64-bit doubles in Dm converted to 32 bit integer in Sd). + // Instruction details available in ARM DDI 0406A, A8-576. + // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=101(18-16)| + // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=? | 1(6) | M=?(5) | 0(4) | Vm(3-0) + ASSERT(CpuFeatures::IsEnabled(VFP3)); + emit(cond | 0xE*B24 | B23 |(0x1 & dst.code())*B22 | + 0x3*B20 | B19 | 0x5*B16 | (dst.code() >> 1)*B12 | + 0x5*B9 | B8 | B7 | B6 | src.code()); +} + + +void Assembler::faddd(const Register dst, + const Register src1, + const Register src2, + const SBit s, + const Condition cond) { + // Dd = faddd(Dn, Dm) double precision floating point addition. + // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm. + // Instruction details available in ARM DDI 0406A, A8-536. + // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) | + // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0) + ASSERT(CpuFeatures::IsEnabled(VFP3)); + emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 | + dst.code()*B12 | 0x5*B9 | B8 | src2.code()); +} + + +void Assembler::fsubd(const Register dst, + const Register src1, + const Register src2, + const SBit s, + const Condition cond) { + // Dd = fsubd(Dn, Dm) double precision floating point subtraction. + // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm. + // Instruction details available in ARM DDI 0406A, A8-784. + // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) | + // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0) + ASSERT(CpuFeatures::IsEnabled(VFP3)); + emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 | + dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code()); +} + + +void Assembler::fmuld(const Register dst, + const Register src1, + const Register src2, + const SBit s, + const Condition cond) { + // Dd = fmuld(Dn, Dm) double precision floating point multiplication. + // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm. + // Instruction details available in ARM DDI 0406A, A8-784. + // cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) | + // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0) + ASSERT(CpuFeatures::IsEnabled(VFP3)); + emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 | + dst.code()*B12 | 0x5*B9 | B8 | src2.code()); +} + + +void Assembler::fdivd(const Register dst, + const Register src1, + const Register src2, + const SBit s, + const Condition cond) { + // Dd = fdivd(Dn, Dm) double precision floating point division. + // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm. + // Instruction details available in ARM DDI 0406A, A8-584. + // cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) | + // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0) + ASSERT(CpuFeatures::IsEnabled(VFP3)); + emit(cond | 0xE*B24 | B23 | src1.code()*B16 | + dst.code()*B12 | 0x5*B9 | B8 | src2.code()); +} + + +void Assembler::fcmp(const Register src1, + const Register src2, + const SBit s, + const Condition cond) { + // vcmp(Dd, Dm) double precision floating point comparison. + // Instruction details available in ARM DDI 0406A, A8-570. + // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) | + // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=? | 1(6) | M(5)=? | 0(4) | Vm(3-0) + ASSERT(CpuFeatures::IsEnabled(VFP3)); + emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 | + src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code()); +} + + +void Assembler::vmrs(Register dst, Condition cond) { + // Instruction details available in ARM DDI 0406A, A8-652. + // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) | + // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0) + ASSERT(CpuFeatures::IsEnabled(VFP3)); + emit(cond | 0xE*B24 | 0xF*B20 | B16 | + dst.code()*B12 | 0xA*B8 | B4); +} + + // Pseudo instructions void Assembler::lea(Register dst, const MemOperand& x, @@ -1311,6 +1580,18 @@ void Assembler::lea(Register dst, } +bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) { + uint32_t dummy1; + uint32_t dummy2; + return fits_shifter(imm32, &dummy1, &dummy2, NULL); +} + + +void Assembler::BlockConstPoolFor(int instructions) { + BlockConstPoolBefore(pc_offset() + instructions * kInstrSize); +} + + // Debugging void Assembler::RecordJSReturn() { WriteRecordedPositions(); @@ -1429,10 +1710,15 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { } if (rinfo.rmode() != RelocInfo::NONE) { // Don't record external references unless the heap will be serialized. - if (rmode == RelocInfo::EXTERNAL_REFERENCE && - !Serializer::enabled() && - !FLAG_debug_code) { - return; + if (rmode == RelocInfo::EXTERNAL_REFERENCE) { +#ifdef DEBUG + if (!Serializer::enabled()) { + Serializer::TooLateToEnableNow(); + } +#endif + if (!Serializer::enabled() && !FLAG_debug_code) { + return; + } } ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here reloc_info_writer.Write(&rinfo); diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h index d617c7e18e..ca0184e359 100644 --- a/deps/v8/src/arm/assembler-arm.h +++ b/deps/v8/src/arm/assembler-arm.h @@ -41,6 +41,7 @@ #define V8_ARM_ASSEMBLER_ARM_H_ #include <stdio.h> #include "assembler.h" +#include "serialize.h" namespace v8 { namespace internal { @@ -102,6 +103,57 @@ extern Register sp; extern Register lr; extern Register pc; +// Support for VFP registers s0 to s32 (d0 to d16). +// Note that "sN:sM" is the same as "dN/2". +extern Register s0; +extern Register s1; +extern Register s2; +extern Register s3; +extern Register s4; +extern Register s5; +extern Register s6; +extern Register s7; +extern Register s8; +extern Register s9; +extern Register s10; +extern Register s11; +extern Register s12; +extern Register s13; +extern Register s14; +extern Register s15; +extern Register s16; +extern Register s17; +extern Register s18; +extern Register s19; +extern Register s20; +extern Register s21; +extern Register s22; +extern Register s23; +extern Register s24; +extern Register s25; +extern Register s26; +extern Register s27; +extern Register s28; +extern Register s29; +extern Register s30; +extern Register s31; + +extern Register d0; +extern Register d1; +extern Register d2; +extern Register d3; +extern Register d4; +extern Register d5; +extern Register d6; +extern Register d7; +extern Register d8; +extern Register d9; +extern Register d10; +extern Register d11; +extern Register d12; +extern Register d13; +extern Register d14; +extern Register d15; // Coprocessor register struct CRegister { @@ -372,6 +424,51 @@ class MemOperand BASE_EMBEDDED { friend class Assembler; }; +// CpuFeatures keeps track of which features are supported by the target CPU. +// Supported features must be enabled by a Scope before use. +class CpuFeatures : public AllStatic { + public: + // Detect features of the target CPU. Set safe defaults if the serializer + // is enabled (snapshots must be portable). + static void Probe(); + + // Check whether a feature is supported by the target CPU. + static bool IsSupported(CpuFeature f) { + if (f == VFP3 && !FLAG_enable_vfp3) return false; + return (supported_ & (1u << f)) != 0; + } + + // Check whether a feature is currently enabled. + static bool IsEnabled(CpuFeature f) { + return (enabled_ & (1u << f)) != 0; + } + + // Enable a specified feature within a scope. + class Scope BASE_EMBEDDED { +#ifdef DEBUG + public: + explicit Scope(CpuFeature f) { + ASSERT(CpuFeatures::IsSupported(f)); + ASSERT(!Serializer::enabled() || + (found_by_runtime_probing_ & (1u << f)) == 0); + old_enabled_ = CpuFeatures::enabled_; + CpuFeatures::enabled_ |= 1u << f; + } + ~Scope() { CpuFeatures::enabled_ = old_enabled_; } + private: + unsigned old_enabled_; +#else + public: + explicit Scope(CpuFeature f) {} +#endif + }; + + private: + static unsigned supported_; + static unsigned enabled_; + static unsigned found_by_runtime_probing_; +}; + typedef int32_t Instr; @@ -437,13 +534,22 @@ class Assembler : public Malloced { INLINE(static Address target_address_at(Address pc)); INLINE(static void set_target_address_at(Address pc, Address target)); - // Modify the code target address in a constant pool entry. + // This sets the branch destination (which is in the constant pool on ARM). + // This is for calls and branches within generated code. inline static void set_target_at(Address constant_pool_entry, Address target); + // This sets the branch destination (which is in the constant pool on ARM). + // This is for calls and branches to runtime code. + inline static void set_external_target_at(Address constant_pool_entry, + Address target) { + set_target_at(constant_pool_entry, target); + } + // Here we are patching the address in the constant pool, not the actual call // instruction. The address in the constant pool is the same size as a // pointer. static const int kCallTargetSize = kPointerSize; + static const int kExternalTargetSize = kPointerSize; // Size of an instruction. static const int kInstrSize = sizeof(Instr); @@ -646,6 +752,66 @@ class Assembler : public Malloced { void stc2(Coprocessor coproc, CRegister crd, Register base, int option, LFlag l = Short); // v5 and above + // Support for VFP. + // All these APIs support S0 to S31 and D0 to D15. + // Currently these APIs do not support extended D registers, i.e, D16 to D31. + // However, some simple modifications can allow + // these APIs to support D16 to D31. + + void fmdrr(const Register dst, + const Register src1, + const Register src2, + const SBit s = LeaveCC, + const Condition cond = al); + void fmrrd(const Register dst1, + const Register dst2, + const Register src, + const SBit s = LeaveCC, + const Condition cond = al); + void fmsr(const Register dst, + const Register src, + const SBit s = LeaveCC, + const Condition cond = al); + void fmrs(const Register dst, + const Register src, + const SBit s = LeaveCC, + const Condition cond = al); + void fsitod(const Register dst, + const Register src, + const SBit s = LeaveCC, + const Condition cond = al); + void ftosid(const Register dst, + const Register src, + const SBit s = LeaveCC, + const Condition cond = al); + + void faddd(const Register dst, + const Register src1, + const Register src2, + const SBit s = LeaveCC, + const Condition cond = al); + void fsubd(const Register dst, + const Register src1, + const Register src2, + const SBit s = LeaveCC, + const Condition cond = al); + void fmuld(const Register dst, + const Register src1, + const Register src2, + const SBit s = LeaveCC, + const Condition cond = al); + void fdivd(const Register dst, + const Register src1, + const Register src2, + const SBit s = LeaveCC, + const Condition cond = al); + void fcmp(const Register src1, + const Register src2, + const SBit s = LeaveCC, + const Condition cond = al); + void vmrs(const Register dst, + const Condition cond = al); + // Pseudo instructions void nop() { mov(r0, Operand(r0)); } @@ -673,6 +839,13 @@ class Assembler : public Malloced { return (pc_offset() - l->pos()) / kInstrSize; } + // Check whether an immediate fits an addressing mode 1 instruction. + bool ImmediateFitsAddrMode1Instruction(int32_t imm32); + + // Postpone the generation of the constant pool for the specified number of + // instructions. + void BlockConstPoolFor(int instructions); + // Debugging // Mark address of the ExitJSFrame code. diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc index 6db554a77c..5389a3c5f5 100644 --- a/deps/v8/src/arm/builtins-arm.cc +++ b/deps/v8/src/arm/builtins-arm.cc @@ -284,7 +284,7 @@ static void AllocateJSArray(MacroAssembler* masm, // Both registers are preserved by this code so no need to differentiate between // construct call and normal call. static void ArrayNativeCode(MacroAssembler* masm, - Label *call_generic_code) { + Label* call_generic_code) { Label argc_one_or_more, argc_two_or_more; // Check for array construction with zero arguments or one. @@ -1029,44 +1029,24 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { __ push(r0); __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_JS); - Label no_preemption, retry_preemption; - __ bind(&retry_preemption); - ExternalReference stack_guard_limit_address = - ExternalReference::address_of_stack_guard_limit(); - __ mov(r2, Operand(stack_guard_limit_address)); - __ ldr(r2, MemOperand(r2)); - __ cmp(sp, r2); - __ b(hi, &no_preemption); - - // We have encountered a preemption or stack overflow already before we push - // the array contents. Save r0 which is the Smi-tagged length of the array. - __ push(r0); - - // Runtime routines expect at least one argument, so give it a Smi. - __ mov(r0, Operand(Smi::FromInt(0))); - __ push(r0); - __ CallRuntime(Runtime::kStackGuard, 1); - - // Since we returned, it wasn't a stack overflow. Restore r0 and try again. - __ pop(r0); - __ b(&retry_preemption); - - __ bind(&no_preemption); - - // Eagerly check for stack-overflow before starting to push the arguments. - // r0: number of arguments. - // r2: stack limit. + // Check the stack for overflow. We are not trying need to catch + // interruptions (e.g. debug break and preemption) here, so the "real stack + // limit" is checked. Label okay; + __ LoadRoot(r2, Heap::kRealStackLimitRootIndex); + // Make r2 the space we have left. The stack might already be overflowed + // here which will cause r2 to become negative. __ sub(r2, sp, r2); - + // Check if the arguments will overflow the stack. __ cmp(r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ b(hi, &okay); + __ b(gt, &okay); // Signed comparison. // Out of stack space. __ ldr(r1, MemOperand(fp, kFunctionOffset)); __ push(r1); __ push(r0); __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_JS); + // End of stack check. // Push current limit and index. __ bind(&okay); diff --git a/deps/v8/src/arm/codegen-arm-inl.h b/deps/v8/src/arm/codegen-arm-inl.h index 9ff02cb4a9..749f32db0f 100644 --- a/deps/v8/src/arm/codegen-arm-inl.h +++ b/deps/v8/src/arm/codegen-arm-inl.h @@ -35,18 +35,15 @@ namespace internal { #define __ ACCESS_MASM(masm_) void CodeGenerator::LoadConditionAndSpill(Expression* expression, - TypeofState typeof_state, JumpTarget* true_target, JumpTarget* false_target, bool force_control) { - LoadCondition(expression, typeof_state, true_target, false_target, - force_control); + LoadCondition(expression, true_target, false_target, force_control); } -void CodeGenerator::LoadAndSpill(Expression* expression, - TypeofState typeof_state) { - Load(expression, typeof_state); +void CodeGenerator::LoadAndSpill(Expression* expression) { + Load(expression); } @@ -60,8 +57,8 @@ void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) { } -void Reference::GetValueAndSpill(TypeofState typeof_state) { - GetValue(typeof_state); +void Reference::GetValueAndSpill() { + GetValue(); } diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc index dd88515e7a..b08615e613 100644 --- a/deps/v8/src/arm/codegen-arm.cc +++ b/deps/v8/src/arm/codegen-arm.cc @@ -29,6 +29,7 @@ #include "bootstrapper.h" #include "codegen-inl.h" +#include "compiler.h" #include "debug.h" #include "parser.h" #include "register-allocator-inl.h" @@ -92,7 +93,6 @@ void DeferredCode::RestoreRegisters() { CodeGenState::CodeGenState(CodeGenerator* owner) : owner_(owner), - typeof_state_(NOT_INSIDE_TYPEOF), true_target_(NULL), false_target_(NULL), previous_(NULL) { @@ -101,11 +101,9 @@ CodeGenState::CodeGenState(CodeGenerator* owner) CodeGenState::CodeGenState(CodeGenerator* owner, - TypeofState typeof_state, JumpTarget* true_target, JumpTarget* false_target) : owner_(owner), - typeof_state_(typeof_state), true_target_(true_target), false_target_(false_target), previous_(owner->state()) { @@ -144,6 +142,9 @@ CodeGenerator::CodeGenerator(int buffer_size, Handle<Script> script, // cp: callee's context void CodeGenerator::GenCode(FunctionLiteral* fun) { + // Record the position for debugging purposes. + CodeForFunctionPosition(fun); + ZoneList<Statement*>* body = fun->body(); // Initialize state. @@ -322,18 +323,32 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) { Label check_exit_codesize; masm_->bind(&check_exit_codesize); + // Calculate the exact length of the return sequence and make sure that + // the constant pool is not emitted inside of the return sequence. + int32_t sp_delta = (scope_->num_parameters() + 1) * kPointerSize; + int return_sequence_length = Debug::kARMJSReturnSequenceLength; + if (!masm_->ImmediateFitsAddrMode1Instruction(sp_delta)) { + // Additional mov instruction generated. + return_sequence_length++; + } + masm_->BlockConstPoolFor(return_sequence_length); + // Tear down the frame which will restore the caller's frame pointer and // the link register. frame_->Exit(); // Here we use masm_-> instead of the __ macro to avoid the code coverage // tool from instrumenting as we rely on the code size here. - masm_->add(sp, sp, Operand((scope_->num_parameters() + 1) * kPointerSize)); + masm_->add(sp, sp, Operand(sp_delta)); masm_->Jump(lr); // Check that the size of the code used for returning matches what is - // expected by the debugger. - ASSERT_EQ(kJSReturnSequenceLength, + // expected by the debugger. The add instruction above is an addressing + // mode 1 instruction where there are restrictions on which immediate values + // can be encoded in the instruction and which immediate values requires + // use of an additional instruction for moving the immediate to a temporary + // register. + ASSERT_EQ(return_sequence_length, masm_->InstructionsGeneratedSince(&check_exit_codesize)); } @@ -442,14 +457,13 @@ MemOperand CodeGenerator::ContextSlotOperandCheckExtensions( // register was set, has_cc() is true and cc_reg_ contains the condition to // test for 'true'. void CodeGenerator::LoadCondition(Expression* x, - TypeofState typeof_state, JumpTarget* true_target, JumpTarget* false_target, bool force_cc) { ASSERT(!has_cc()); int original_height = frame_->height(); - { CodeGenState new_state(this, typeof_state, true_target, false_target); + { CodeGenState new_state(this, true_target, false_target); Visit(x); // If we hit a stack overflow, we may not have actually visited @@ -479,13 +493,13 @@ void CodeGenerator::LoadCondition(Expression* x, } -void CodeGenerator::Load(Expression* x, TypeofState typeof_state) { +void CodeGenerator::Load(Expression* expr) { #ifdef DEBUG int original_height = frame_->height(); #endif JumpTarget true_target; JumpTarget false_target; - LoadCondition(x, typeof_state, &true_target, &false_target, false); + LoadCondition(expr, &true_target, &false_target, false); if (has_cc()) { // Convert cc_reg_ into a boolean value. @@ -552,24 +566,27 @@ void CodeGenerator::LoadGlobalReceiver(Register scratch) { } -// TODO(1241834): Get rid of this function in favor of just using Load, now -// that we have the INSIDE_TYPEOF typeof state. => Need to handle global -// variables w/o reference errors elsewhere. -void CodeGenerator::LoadTypeofExpression(Expression* x) { +void CodeGenerator::LoadTypeofExpression(Expression* expr) { + // Special handling of identifiers as subexpressions of typeof. VirtualFrame::SpilledScope spilled_scope; - Variable* variable = x->AsVariableProxy()->AsVariable(); + Variable* variable = expr->AsVariableProxy()->AsVariable(); if (variable != NULL && !variable->is_this() && variable->is_global()) { - // NOTE: This is somewhat nasty. We force the compiler to load - // the variable as if through '<global>.<variable>' to make sure we - // do not get reference errors. + // For a global variable we build the property reference + // <global>.<variable> and perform a (regular non-contextual) property + // load to make sure we do not get reference errors. Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX); Literal key(variable->name()); - // TODO(1241834): Fetch the position from the variable instead of using - // no position. Property property(&global, &key, RelocInfo::kNoPosition); - LoadAndSpill(&property); + Reference ref(this, &property); + ref.GetValueAndSpill(); + } else if (variable != NULL && variable->slot() != NULL) { + // For a variable that rewrites to a slot, we signal it is the immediate + // subexpression of a typeof. + LoadFromSlot(variable->slot(), INSIDE_TYPEOF); + frame_->SpillAll(); } else { - LoadAndSpill(x, INSIDE_TYPEOF); + // Anything else can be handled normally. + LoadAndSpill(expr); } } @@ -1066,27 +1083,6 @@ void CodeGenerator::Comparison(Condition cc, } -class CallFunctionStub: public CodeStub { - public: - CallFunctionStub(int argc, InLoopFlag in_loop) - : argc_(argc), in_loop_(in_loop) {} - - void Generate(MacroAssembler* masm); - - private: - int argc_; - InLoopFlag in_loop_; - -#if defined(DEBUG) - void Print() { PrintF("CallFunctionStub (argc %d)\n", argc_); } -#endif // defined(DEBUG) - - Major MajorKey() { return CallFunction; } - int MinorKey() { return argc_; } - InLoopFlag InLoop() { return in_loop_; } -}; - - // Call the function on the stack with the given arguments. void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args, int position) { @@ -1297,8 +1293,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) { JumpTarget then; JumpTarget else_; // if (cond) - LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF, - &then, &else_, true); + LoadConditionAndSpill(node->condition(), &then, &else_, true); if (frame_ != NULL) { Branch(false, &else_); } @@ -1321,8 +1316,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) { ASSERT(!has_else_stm); JumpTarget then; // if (cond) - LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF, - &then, &exit, true); + LoadConditionAndSpill(node->condition(), &then, &exit, true); if (frame_ != NULL) { Branch(false, &exit); } @@ -1337,8 +1331,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) { ASSERT(!has_then_stm); JumpTarget else_; // if (!cond) - LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF, - &exit, &else_, true); + LoadConditionAndSpill(node->condition(), &exit, &else_, true); if (frame_ != NULL) { Branch(true, &exit); } @@ -1352,8 +1345,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) { Comment cmnt(masm_, "[ If"); ASSERT(!has_then_stm && !has_else_stm); // if (cond) - LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF, - &exit, &exit, false); + LoadConditionAndSpill(node->condition(), &exit, &exit, false); if (frame_ != NULL) { if (has_cc()) { cc_reg_ = al; @@ -1591,8 +1583,9 @@ void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) { node->continue_target()->Bind(); } if (has_valid_frame()) { - LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF, - &body, node->break_target(), true); + Comment cmnt(masm_, "[ DoWhileCondition"); + CodeForDoWhileConditionPosition(node); + LoadConditionAndSpill(node->cond(), &body, node->break_target(), true); if (has_valid_frame()) { // A invalid frame here indicates that control did not // fall out of the test expression. @@ -1631,8 +1624,7 @@ void CodeGenerator::VisitWhileStatement(WhileStatement* node) { if (info == DONT_KNOW) { JumpTarget body; - LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF, - &body, node->break_target(), true); + LoadConditionAndSpill(node->cond(), &body, node->break_target(), true); if (has_valid_frame()) { // A NULL frame indicates that control did not fall out of the // test expression. @@ -1691,8 +1683,7 @@ void CodeGenerator::VisitForStatement(ForStatement* node) { // If the test is always true, there is no need to compile it. if (info == DONT_KNOW) { JumpTarget body; - LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF, - &body, node->break_target(), true); + LoadConditionAndSpill(node->cond(), &body, node->break_target(), true); if (has_valid_frame()) { Branch(false, node->break_target()); } @@ -2270,7 +2261,8 @@ void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) { Comment cmnt(masm_, "[ FunctionLiteral"); // Build the function boilerplate and instantiate it. - Handle<JSFunction> boilerplate = BuildBoilerplate(node); + Handle<JSFunction> boilerplate = + Compiler::BuildBoilerplate(node, script_, this); // Check for stack-overflow exception. if (HasStackOverflow()) { ASSERT(frame_->height() == original_height); @@ -2301,20 +2293,19 @@ void CodeGenerator::VisitConditional(Conditional* node) { Comment cmnt(masm_, "[ Conditional"); JumpTarget then; JumpTarget else_; - LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF, - &then, &else_, true); + LoadConditionAndSpill(node->condition(), &then, &else_, true); if (has_valid_frame()) { Branch(false, &else_); } if (has_valid_frame() || then.is_linked()) { then.Bind(); - LoadAndSpill(node->then_expression(), typeof_state()); + LoadAndSpill(node->then_expression()); } if (else_.is_linked()) { JumpTarget exit; if (has_valid_frame()) exit.Jump(); else_.Bind(); - LoadAndSpill(node->else_expression(), typeof_state()); + LoadAndSpill(node->else_expression()); if (exit.is_linked()) exit.Bind(); } ASSERT(frame_->height() == original_height + 1); @@ -2381,10 +2372,6 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) { frame_->EmitPush(r0); } else { - // Note: We would like to keep the assert below, but it fires because of - // some nasty code in LoadTypeofExpression() which should be removed... - // ASSERT(!slot->var()->is_dynamic()); - // Special handling for locals allocated in registers. __ ldr(r0, SlotOperand(slot, r2)); frame_->EmitPush(r0); @@ -2479,7 +2466,7 @@ void CodeGenerator::VisitSlot(Slot* node) { #endif VirtualFrame::SpilledScope spilled_scope; Comment cmnt(masm_, "[ Slot"); - LoadFromSlot(node, typeof_state()); + LoadFromSlot(node, NOT_INSIDE_TYPEOF); ASSERT(frame_->height() == original_height + 1); } @@ -2498,7 +2485,7 @@ void CodeGenerator::VisitVariableProxy(VariableProxy* node) { } else { ASSERT(var->is_global()); Reference ref(this, node); - ref.GetValueAndSpill(typeof_state()); + ref.GetValueAndSpill(); } ASSERT(frame_->height() == original_height + 1); } @@ -2834,7 +2821,7 @@ void CodeGenerator::VisitAssignment(Assignment* node) { } else { // +=, *= and similar binary assignments. // Get the old value of the lhs. - target.GetValueAndSpill(NOT_INSIDE_TYPEOF); + target.GetValueAndSpill(); Literal* literal = node->value()->AsLiteral(); bool overwrite = (node->value()->AsBinaryOperation() != NULL && @@ -2899,7 +2886,7 @@ void CodeGenerator::VisitProperty(Property* node) { Comment cmnt(masm_, "[ Property"); { Reference property(this, node); - property.GetValueAndSpill(typeof_state()); + property.GetValueAndSpill(); } ASSERT(frame_->height() == original_height + 1); } @@ -3069,7 +3056,7 @@ void CodeGenerator::VisitCall(Call* node) { // Load the function to call from the property through a reference. Reference ref(this, property); - ref.GetValueAndSpill(NOT_INSIDE_TYPEOF); // receiver + ref.GetValueAndSpill(); // receiver // Pass receiver to called function. if (property->is_synthetic()) { @@ -3299,7 +3286,82 @@ void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) { void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) { VirtualFrame::SpilledScope spilled_scope; ASSERT(args->length() == 2); + Comment(masm_, "[ GenerateFastCharCodeAt"); + + LoadAndSpill(args->at(0)); + LoadAndSpill(args->at(1)); + frame_->EmitPop(r0); // Index. + frame_->EmitPop(r1); // String. + + Label slow, end, not_a_flat_string, ascii_string, try_again_with_new_string; + + __ tst(r1, Operand(kSmiTagMask)); + __ b(eq, &slow); // The 'string' was a Smi. + + ASSERT(kSmiTag == 0); + __ tst(r0, Operand(kSmiTagMask | 0x80000000u)); + __ b(ne, &slow); // The index was negative or not a Smi. + + __ bind(&try_again_with_new_string); + __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE); + __ b(ge, &slow); + + // Now r2 has the string type. + __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset)); + __ and_(r4, r2, Operand(kStringSizeMask)); + __ add(r4, r4, Operand(String::kLongLengthShift)); + __ mov(r3, Operand(r3, LSR, r4)); + // Now r3 has the length of the string. Compare with the index. + __ cmp(r3, Operand(r0, LSR, kSmiTagSize)); + __ b(le, &slow); + + // Here we know the index is in range. Check that string is sequential. + ASSERT_EQ(0, kSeqStringTag); + __ tst(r2, Operand(kStringRepresentationMask)); + __ b(ne, ¬_a_flat_string); + + // Check whether it is an ASCII string. + ASSERT_EQ(0, kTwoByteStringTag); + __ tst(r2, Operand(kStringEncodingMask)); + __ b(ne, &ascii_string); + + // 2-byte string. We can add without shifting since the Smi tag size is the + // log2 of the number of bytes in a two-byte character. + ASSERT_EQ(1, kSmiTagSize); + ASSERT_EQ(0, kSmiShiftSize); + __ add(r1, r1, Operand(r0)); + __ ldrh(r0, FieldMemOperand(r1, SeqTwoByteString::kHeaderSize)); + __ mov(r0, Operand(r0, LSL, kSmiTagSize)); + __ jmp(&end); + + __ bind(&ascii_string); + __ add(r1, r1, Operand(r0, LSR, kSmiTagSize)); + __ ldrb(r0, FieldMemOperand(r1, SeqAsciiString::kHeaderSize)); + __ mov(r0, Operand(r0, LSL, kSmiTagSize)); + __ jmp(&end); + + __ bind(¬_a_flat_string); + __ and_(r2, r2, Operand(kStringRepresentationMask)); + __ cmp(r2, Operand(kConsStringTag)); + __ b(ne, &slow); + + // ConsString. + // Check that the right hand side is the empty string (ie if this is really a + // flat string in a cons string). If that is not the case we would rather go + // to the runtime system now, to flatten the string. + __ ldr(r2, FieldMemOperand(r1, ConsString::kSecondOffset)); + __ LoadRoot(r3, Heap::kEmptyStringRootIndex); + __ cmp(r2, Operand(r3)); + __ b(ne, &slow); + + // Get the first of the two strings. + __ ldr(r1, FieldMemOperand(r1, ConsString::kFirstOffset)); + __ jmp(&try_again_with_new_string); + + __ bind(&slow); __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); + + __ bind(&end); frame_->EmitPush(r0); } @@ -3474,7 +3536,6 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { if (op == Token::NOT) { LoadConditionAndSpill(node->expression(), - NOT_INSIDE_TYPEOF, false_target(), true_target(), true); @@ -3635,7 +3696,7 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) { ASSERT(frame_->height() == original_height + 1); return; } - target.GetValueAndSpill(NOT_INSIDE_TYPEOF); + target.GetValueAndSpill(); frame_->EmitPop(r0); JumpTarget slow; @@ -3729,7 +3790,6 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { if (op == Token::AND) { JumpTarget is_true; LoadConditionAndSpill(node->left(), - NOT_INSIDE_TYPEOF, &is_true, false_target(), false); @@ -3765,7 +3825,6 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { } is_true.Bind(); LoadConditionAndSpill(node->right(), - NOT_INSIDE_TYPEOF, true_target(), false_target(), false); @@ -3777,7 +3836,6 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { } else if (op == Token::OR) { JumpTarget is_false; LoadConditionAndSpill(node->left(), - NOT_INSIDE_TYPEOF, true_target(), &is_false, false); @@ -3813,7 +3871,6 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { } is_false.Bind(); LoadConditionAndSpill(node->right(), - NOT_INSIDE_TYPEOF, true_target(), false_target(), false); @@ -3998,28 +4055,35 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) { } else if (check->Equals(Heap::function_symbol())) { __ tst(r1, Operand(kSmiTagMask)); false_target()->Branch(eq); - __ CompareObjectType(r1, r1, r1, JS_FUNCTION_TYPE); + Register map_reg = r2; + __ CompareObjectType(r1, map_reg, r1, JS_FUNCTION_TYPE); + true_target()->Branch(eq); + // Regular expressions are callable so typeof == 'function'. + __ CompareInstanceType(map_reg, r1, JS_REGEXP_TYPE); cc_reg_ = eq; } else if (check->Equals(Heap::object_symbol())) { __ tst(r1, Operand(kSmiTagMask)); false_target()->Branch(eq); - __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); __ LoadRoot(ip, Heap::kNullValueRootIndex); __ cmp(r1, ip); true_target()->Branch(eq); + Register map_reg = r2; + __ CompareObjectType(r1, map_reg, r1, JS_REGEXP_TYPE); + false_target()->Branch(eq); + // It can be an undetectable object. - __ ldrb(r1, FieldMemOperand(r2, Map::kBitFieldOffset)); + __ ldrb(r1, FieldMemOperand(map_reg, Map::kBitFieldOffset)); __ and_(r1, r1, Operand(1 << Map::kIsUndetectable)); __ cmp(r1, Operand(1 << Map::kIsUndetectable)); false_target()->Branch(eq); - __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset)); - __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE)); + __ ldrb(r1, FieldMemOperand(map_reg, Map::kInstanceTypeOffset)); + __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE)); false_target()->Branch(lt); - __ cmp(r2, Operand(LAST_JS_OBJECT_TYPE)); + __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE)); cc_reg_ = le; } else { @@ -4112,7 +4176,7 @@ Handle<String> Reference::GetName() { } -void Reference::GetValue(TypeofState typeof_state) { +void Reference::GetValue() { ASSERT(cgen_->HasValidEntryRegisters()); ASSERT(!is_illegal()); ASSERT(!cgen_->has_cc()); @@ -4127,16 +4191,11 @@ void Reference::GetValue(TypeofState typeof_state) { Comment cmnt(masm, "[ Load from Slot"); Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot(); ASSERT(slot != NULL); - cgen_->LoadFromSlot(slot, typeof_state); + cgen_->LoadFromSlot(slot, NOT_INSIDE_TYPEOF); break; } case NAMED: { - // TODO(1241834): Make sure that this it is safe to ignore the - // distinction between expressions in a typeof and not in a typeof. If - // there is a chance that reference errors can be thrown below, we - // must distinguish between the two kinds of loads (typeof expression - // loads must not throw a reference error). VirtualFrame* frame = cgen_->frame(); Comment cmnt(masm, "[ Load from named Property"); Handle<String> name(GetName()); @@ -4155,9 +4214,6 @@ void Reference::GetValue(TypeofState typeof_state) { } case KEYED: { - // TODO(1241834): Make sure that this it is safe to ignore the - // distinction between expressions in a typeof and not in a typeof. - // TODO(181): Implement inlined version of array indexing once // loop nesting is properly tracked on ARM. VirtualFrame* frame = cgen_->frame(); @@ -4493,7 +4549,7 @@ class WriteInt32ToHeapNumberStub : public CodeStub { // See comment for class. -void WriteInt32ToHeapNumberStub::Generate(MacroAssembler *masm) { +void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { Label max_negative_int; // the_int_ has the answer which is a signed int32 but not a Smi. // We test for the special value that has a different exponent. This test @@ -4566,6 +4622,22 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, if (cc != eq) { __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE)); __ b(ge, slow); + // Normally here we fall through to return_equal, but undefined is + // special: (undefined == undefined) == true, but (undefined <= undefined) + // == false! See ECMAScript 11.8.5. + if (cc == le || cc == ge) { + __ cmp(r4, Operand(ODDBALL_TYPE)); + __ b(ne, &return_equal); + __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); + __ cmp(r0, Operand(r2)); + __ b(ne, &return_equal); + if (cc == le) { + __ mov(r0, Operand(GREATER)); // undefined <= undefined should fail. + } else { + __ mov(r0, Operand(LESS)); // undefined >= undefined should fail. + } + __ mov(pc, Operand(lr)); // Return. + } } } __ bind(&return_equal); @@ -4643,9 +4715,17 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, // Rhs is a smi, lhs is a number. __ push(lr); - __ mov(r7, Operand(r1)); - ConvertToDoubleStub stub1(r3, r2, r7, r6); - __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); + + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + __ IntegerToDoubleConversionWithVFP3(r1, r3, r2); + } else { + __ mov(r7, Operand(r1)); + ConvertToDoubleStub stub1(r3, r2, r7, r6); + __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); + } + + // r3 and r2 are rhs as double. __ ldr(r1, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize)); __ ldr(r0, FieldMemOperand(r0, HeapNumber::kValueOffset)); @@ -4673,9 +4753,16 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, __ push(lr); __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset)); __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize)); - __ mov(r7, Operand(r0)); - ConvertToDoubleStub stub2(r1, r0, r7, r6); - __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); + + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + __ IntegerToDoubleConversionWithVFP3(r0, r1, r0); + } else { + __ mov(r7, Operand(r0)); + ConvertToDoubleStub stub2(r1, r0, r7, r6); + __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); + } + __ pop(lr); // Fall through to both_loaded_as_doubles. } @@ -4878,9 +4965,23 @@ void CompareStub::Generate(MacroAssembler* masm) { // fall through if neither is a NaN. Also binds rhs_not_nan. EmitNanCheck(masm, &rhs_not_nan, cc_); - // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the - // answer. Never falls through. - EmitTwoNonNanDoubleComparison(masm, cc_); + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + // ARMv7 VFP3 instructions to implement double precision comparison. + __ fmdrr(d6, r0, r1); + __ fmdrr(d7, r2, r3); + + __ fcmp(d6, d7); + __ vmrs(pc); + __ mov(r0, Operand(0), LeaveCC, eq); + __ mov(r0, Operand(1), LeaveCC, lt); + __ mvn(r0, Operand(0), LeaveCC, gt); + __ mov(pc, Operand(lr)); + } else { + // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the + // answer. Never falls through. + EmitTwoNonNanDoubleComparison(masm, cc_); + } __ bind(¬_smis); // At this point we know we are dealing with two different objects, @@ -4980,16 +5081,24 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm, // Since both are Smis there is no heap number to overwrite, so allocate. // The new heap number is in r5. r6 and r7 are scratch. AllocateHeapNumber(masm, &slow, r5, r6, r7); - // Write Smi from r0 to r3 and r2 in double format. r6 is scratch. - __ mov(r7, Operand(r0)); - ConvertToDoubleStub stub1(r3, r2, r7, r6); - __ push(lr); - __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); - // Write Smi from r1 to r1 and r0 in double format. r6 is scratch. - __ mov(r7, Operand(r1)); - ConvertToDoubleStub stub2(r1, r0, r7, r6); - __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); - __ pop(lr); + + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + __ IntegerToDoubleConversionWithVFP3(r0, r3, r2); + __ IntegerToDoubleConversionWithVFP3(r1, r1, r0); + } else { + // Write Smi from r0 to r3 and r2 in double format. r6 is scratch. + __ mov(r7, Operand(r0)); + ConvertToDoubleStub stub1(r3, r2, r7, r6); + __ push(lr); + __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); + // Write Smi from r1 to r1 and r0 in double format. r6 is scratch. + __ mov(r7, Operand(r1)); + ConvertToDoubleStub stub2(r1, r0, r7, r6); + __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); + __ pop(lr); + } + __ jmp(&do_the_call); // Tail call. No return. // We jump to here if something goes wrong (one param is not a number of any @@ -5025,12 +5134,20 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm, // We can't overwrite a Smi so get address of new heap number into r5. AllocateHeapNumber(masm, &slow, r5, r6, r7); } - // Write Smi from r0 to r3 and r2 in double format. - __ mov(r7, Operand(r0)); - ConvertToDoubleStub stub3(r3, r2, r7, r6); - __ push(lr); - __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET); - __ pop(lr); + + + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + __ IntegerToDoubleConversionWithVFP3(r0, r3, r2); + } else { + // Write Smi from r0 to r3 and r2 in double format. + __ mov(r7, Operand(r0)); + ConvertToDoubleStub stub3(r3, r2, r7, r6); + __ push(lr); + __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET); + __ pop(lr); + } + __ bind(&finished_loading_r0); // Move r1 to a double in r0-r1. @@ -5050,12 +5167,19 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm, // We can't overwrite a Smi so get address of new heap number into r5. AllocateHeapNumber(masm, &slow, r5, r6, r7); } - // Write Smi from r1 to r1 and r0 in double format. - __ mov(r7, Operand(r1)); - ConvertToDoubleStub stub4(r1, r0, r7, r6); - __ push(lr); - __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET); - __ pop(lr); + + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + __ IntegerToDoubleConversionWithVFP3(r1, r1, r0); + } else { + // Write Smi from r1 to r1 and r0 in double format. + __ mov(r7, Operand(r1)); + ConvertToDoubleStub stub4(r1, r0, r7, r6); + __ push(lr); + __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET); + __ pop(lr); + } + __ bind(&finished_loading_r1); __ bind(&do_the_call); @@ -5064,6 +5188,38 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm, // r2: Right value (least significant part of mantissa). // r3: Right value (sign, exponent, top of mantissa). // r5: Address of heap number for result. + + if (CpuFeatures::IsSupported(VFP3) && + ((Token::MUL == operation) || + (Token::DIV == operation) || + (Token::ADD == operation) || + (Token::SUB == operation))) { + CpuFeatures::Scope scope(VFP3); + // ARMv7 VFP3 instructions to implement + // double precision, add, subtract, multiply, divide. + __ fmdrr(d6, r0, r1); + __ fmdrr(d7, r2, r3); + + if (Token::MUL == operation) { + __ fmuld(d5, d6, d7); + } else if (Token::DIV == operation) { + __ fdivd(d5, d6, d7); + } else if (Token::ADD == operation) { + __ faddd(d5, d6, d7); + } else if (Token::SUB == operation) { + __ fsubd(d5, d6, d7); + } else { + UNREACHABLE(); + } + + __ fmrrd(r0, r1, d5); + + __ str(r0, FieldMemOperand(r5, HeapNumber::kValueOffset)); + __ str(r1, FieldMemOperand(r5, HeapNumber::kValueOffset + 4)); + __ mov(r0, Operand(r5)); + __ mov(pc, lr); + return; + } __ push(lr); // For later. __ push(r5); // Address of heap number that is answer. __ AlignStack(0); @@ -5132,38 +5288,49 @@ static void GetInt32(MacroAssembler* masm, __ sub(scratch2, scratch2, Operand(zero_exponent), SetCC); // Dest already has a Smi zero. __ b(lt, &done); - // We have a shifted exponent between 0 and 30 in scratch2. - __ mov(dest, Operand(scratch2, LSR, HeapNumber::kExponentShift)); - // We now have the exponent in dest. Subtract from 30 to get - // how much to shift down. - __ rsb(dest, dest, Operand(30)); - + if (!CpuFeatures::IsSupported(VFP3)) { + // We have a shifted exponent between 0 and 30 in scratch2. + __ mov(dest, Operand(scratch2, LSR, HeapNumber::kExponentShift)); + // We now have the exponent in dest. Subtract from 30 to get + // how much to shift down. + __ rsb(dest, dest, Operand(30)); + } __ bind(&right_exponent); - // Get the top bits of the mantissa. - __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask)); - // Put back the implicit 1. - __ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift)); - // Shift up the mantissa bits to take up the space the exponent used to take. - // We just orred in the implicit bit so that took care of one and we want to - // leave the sign bit 0 so we subtract 2 bits from the shift distance. - const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; - __ mov(scratch2, Operand(scratch2, LSL, shift_distance)); - // Put sign in zero flag. - __ tst(scratch, Operand(HeapNumber::kSignMask)); - // Get the second half of the double. For some exponents we don't actually - // need this because the bits get shifted out again, but it's probably slower - // to test than just to do it. - __ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset)); - // Shift down 22 bits to get the last 10 bits. - __ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance)); - // Move down according to the exponent. - __ mov(dest, Operand(scratch, LSR, dest)); - // Fix sign if sign bit was set. - __ rsb(dest, dest, Operand(0), LeaveCC, ne); + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + // ARMv7 VFP3 instructions implementing double precision to integer + // conversion using round to zero. + __ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset)); + __ fmdrr(d7, scratch2, scratch); + __ ftosid(s15, d7); + __ fmrs(dest, s15); + } else { + // Get the top bits of the mantissa. + __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask)); + // Put back the implicit 1. + __ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift)); + // Shift up the mantissa bits to take up the space the exponent used to + // take. We just orred in the implicit bit so that took care of one and + // we want to leave the sign bit 0 so we subtract 2 bits from the shift + // distance. + const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; + __ mov(scratch2, Operand(scratch2, LSL, shift_distance)); + // Put sign in zero flag. + __ tst(scratch, Operand(HeapNumber::kSignMask)); + // Get the second half of the double. For some exponents we don't + // actually need this because the bits get shifted out again, but + // it's probably slower to test than just to do it. + __ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset)); + // Shift down 22 bits to get the last 10 bits. + __ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance)); + // Move down according to the exponent. + __ mov(dest, Operand(scratch, LSR, dest)); + // Fix sign if sign bit was set. + __ rsb(dest, dest, Operand(0), LeaveCC, ne); + } __ bind(&done); } - // For bitwise ops where the inputs are not both Smis we here try to determine // whether both inputs are either Smis or at least heap numbers that can be // represented by a 32 bit signed value. We truncate towards zero as required @@ -5180,7 +5347,7 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) { __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number. __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE); __ b(ne, &slow); - GetInt32(masm, r1, r3, r4, r5, &slow); + GetInt32(masm, r1, r3, r5, r4, &slow); __ jmp(&done_checking_r1); __ bind(&r1_is_smi); __ mov(r3, Operand(r1, ASR, 1)); @@ -5190,7 +5357,7 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) { __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number. __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); __ b(ne, &slow); - GetInt32(masm, r0, r2, r4, r5, &slow); + GetInt32(masm, r0, r2, r5, r4, &slow); __ jmp(&done_checking_r0); __ bind(&r0_is_smi); __ mov(r2, Operand(r0, ASR, 1)); @@ -5795,7 +5962,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, Label* throw_normal_exception, Label* throw_termination_exception, Label* throw_out_of_memory_exception, - StackFrame::Type frame_type, + ExitFrame::Mode mode, bool do_gc, bool always_allocate) { // r0: result parameter for PerformGC, if any @@ -5855,7 +6022,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, // r0:r1: result // sp: stack pointer // fp: frame pointer - __ LeaveExitFrame(frame_type); + __ LeaveExitFrame(mode); // check if we should retry or throw exception Label retry; @@ -5901,12 +6068,12 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) { // this by performing a garbage collection and retrying the // builtin once. - StackFrame::Type frame_type = is_debug_break - ? StackFrame::EXIT_DEBUG - : StackFrame::EXIT; + ExitFrame::Mode mode = is_debug_break + ? ExitFrame::MODE_DEBUG + : ExitFrame::MODE_NORMAL; // Enter the exit frame that transitions from JavaScript to C++. - __ EnterExitFrame(frame_type); + __ EnterExitFrame(mode); // r4: number of arguments (C callee-saved) // r5: pointer to builtin function (C callee-saved) @@ -5921,7 +6088,7 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) { &throw_normal_exception, &throw_termination_exception, &throw_out_of_memory_exception, - frame_type, + mode, false, false); @@ -5930,7 +6097,7 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) { &throw_normal_exception, &throw_termination_exception, &throw_out_of_memory_exception, - frame_type, + mode, true, false); @@ -5941,7 +6108,7 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) { &throw_normal_exception, &throw_termination_exception, &throw_out_of_memory_exception, - frame_type, + mode, true, true); diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h index e0799508e0..8cbf450f32 100644 --- a/deps/v8/src/arm/codegen-arm.h +++ b/deps/v8/src/arm/codegen-arm.h @@ -77,12 +77,12 @@ class Reference BASE_EMBEDDED { // Generate code to push the value of the reference on top of the // expression stack. The reference is expected to be already on top of // the expression stack, and it is left in place with its value above it. - void GetValue(TypeofState typeof_state); + void GetValue(); // Generate code to push the value of a reference on top of the expression // stack and then spill the stack frame. This function is used temporarily // while the code generator is being transformed. - inline void GetValueAndSpill(TypeofState typeof_state); + inline void GetValueAndSpill(); // Generate code to store the value on top of the expression stack in the // reference. The reference is expected to be immediately below the value @@ -112,10 +112,8 @@ class CodeGenState BASE_EMBEDDED { explicit CodeGenState(CodeGenerator* owner); // Create a code generator state based on a code generator's current - // state. The new state has its own typeof state and pair of branch - // labels. + // state. The new state has its own pair of branch labels. CodeGenState(CodeGenerator* owner, - TypeofState typeof_state, JumpTarget* true_target, JumpTarget* false_target); @@ -123,13 +121,11 @@ class CodeGenState BASE_EMBEDDED { // previous state. ~CodeGenState(); - TypeofState typeof_state() const { return typeof_state_; } JumpTarget* true_target() const { return true_target_; } JumpTarget* false_target() const { return false_target_; } private: CodeGenerator* owner_; - TypeofState typeof_state_; JumpTarget* true_target_; JumpTarget* false_target_; CodeGenState* previous_; @@ -169,8 +165,8 @@ class CodeGenerator: public AstVisitor { // Accessors MacroAssembler* masm() { return masm_; } - VirtualFrame* frame() const { return frame_; } + Handle<Script> script() { return script_; } bool has_valid_frame() const { return frame_ != NULL; } @@ -191,10 +187,6 @@ class CodeGenerator: public AstVisitor { static const int kUnknownIntValue = -1; - // Number of instructions used for the JS return sequence. The constant is - // used by the debugger to patch the JS return sequence. - static const int kJSReturnSequenceLength = 4; - private: // Construction/Destruction CodeGenerator(int buffer_size, Handle<Script> script, bool is_eval); @@ -210,7 +202,6 @@ class CodeGenerator: public AstVisitor { // State bool has_cc() const { return cc_reg_ != al; } - TypeofState typeof_state() const { return state_->typeof_state(); } JumpTarget* true_target() const { return state_->true_target(); } JumpTarget* false_target() const { return state_->false_target(); } @@ -259,25 +250,22 @@ class CodeGenerator: public AstVisitor { } void LoadCondition(Expression* x, - TypeofState typeof_state, JumpTarget* true_target, JumpTarget* false_target, bool force_cc); - void Load(Expression* x, TypeofState typeof_state = NOT_INSIDE_TYPEOF); + void Load(Expression* expr); void LoadGlobal(); void LoadGlobalReceiver(Register scratch); // Generate code to push the value of an expression on top of the frame // and then spill the frame fully to memory. This function is used // temporarily while the code generator is being transformed. - inline void LoadAndSpill(Expression* expression, - TypeofState typeof_state = NOT_INSIDE_TYPEOF); + inline void LoadAndSpill(Expression* expression); // Call LoadCondition and then spill the virtual frame unless control flow // cannot reach the end of the expression (ie, by emitting only // unconditional jumps to the control targets). inline void LoadConditionAndSpill(Expression* expression, - TypeofState typeof_state, JumpTarget* true_target, JumpTarget* false_target, bool force_control); @@ -331,7 +319,6 @@ class CodeGenerator: public AstVisitor { InlineRuntimeLUT* old_entry); static Handle<Code> ComputeLazyCompile(int argc); - Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node); void ProcessDeclarations(ZoneList<Declaration*>* declarations); static Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop); @@ -391,6 +378,7 @@ class CodeGenerator: public AstVisitor { void CodeForFunctionPosition(FunctionLiteral* fun); void CodeForReturnPosition(FunctionLiteral* fun); void CodeForStatementPosition(Statement* node); + void CodeForDoWhileConditionPosition(DoWhileStatement* stmt); void CodeForSourcePosition(int pos); #ifdef DEBUG @@ -433,6 +421,27 @@ class CodeGenerator: public AstVisitor { }; +class CallFunctionStub: public CodeStub { + public: + CallFunctionStub(int argc, InLoopFlag in_loop) + : argc_(argc), in_loop_(in_loop) {} + + void Generate(MacroAssembler* masm); + + private: + int argc_; + InLoopFlag in_loop_; + +#if defined(DEBUG) + void Print() { PrintF("CallFunctionStub (argc %d)\n", argc_); } +#endif // defined(DEBUG) + + Major MajorKey() { return CallFunction; } + int MinorKey() { return argc_; } + InLoopFlag InLoop() { return in_loop_; } +}; + + class GenericBinaryOpStub : public CodeStub { public: GenericBinaryOpStub(Token::Value op, diff --git a/deps/v8/src/arm/constants-arm.cc b/deps/v8/src/arm/constants-arm.cc index 964bfe14f0..89ff7c0857 100644 --- a/deps/v8/src/arm/constants-arm.cc +++ b/deps/v8/src/arm/constants-arm.cc @@ -67,6 +67,26 @@ const char* Registers::Name(int reg) { } +// Support for VFP registers s0 to s31 (d0 to d15). +// Note that "sN:sM" is the same as "dN/2" +// These register names are defined in a way to match the native disassembler +// formatting. See for example the command "objdump -d <binary file>". +const char* VFPRegisters::names_[kNumVFPRegisters] = { + "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", + "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15", + "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23", + "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31", + "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", + "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15" +}; + + +const char* VFPRegisters::Name(int reg) { + ASSERT((0 <= reg) && (reg < kNumVFPRegisters)); + return names_[reg]; +} + + int Registers::Number(const char* name) { // Look through the canonical names. for (int i = 0; i < kNumRegisters; i++) { diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h index 6bd0d00804..58396f8777 100644 --- a/deps/v8/src/arm/constants-arm.h +++ b/deps/v8/src/arm/constants-arm.h @@ -75,6 +75,9 @@ namespace arm { // Number of registers in normal ARM mode. static const int kNumRegisters = 16; +// VFP support. +static const int kNumVFPRegisters = 48; + // PC is register 15. static const int kPCRegister = 15; static const int kNoRegister = -1; @@ -231,6 +234,16 @@ class Instr { inline int RnField() const { return Bits(19, 16); } inline int RdField() const { return Bits(15, 12); } + // Support for VFP. + // Vn(19-16) | Vd(15-12) | Vm(3-0) + inline int VnField() const { return Bits(19, 16); } + inline int VmField() const { return Bits(3, 0); } + inline int VdField() const { return Bits(15, 12); } + inline int NField() const { return Bit(7); } + inline int MField() const { return Bit(5); } + inline int DField() const { return Bit(22); } + inline int RtField() const { return Bits(15, 12); } + // Fields used in Data processing instructions inline Opcode OpcodeField() const { return static_cast<Opcode>(Bits(24, 21)); @@ -307,7 +320,7 @@ class Registers { struct RegisterAlias { int reg; - const char *name; + const char* name; }; private: @@ -315,6 +328,15 @@ class Registers { static const RegisterAlias aliases_[]; }; +// Helper functions for converting between VFP register numbers and names. +class VFPRegisters { + public: + // Return the name of the register. + static const char* Name(int reg); + + private: + static const char* names_[kNumVFPRegisters]; +}; } } // namespace assembler::arm diff --git a/deps/v8/src/arm/cpu-arm.cc b/deps/v8/src/arm/cpu-arm.cc index cafefce45d..a5a358b3ac 100644 --- a/deps/v8/src/arm/cpu-arm.cc +++ b/deps/v8/src/arm/cpu-arm.cc @@ -33,12 +33,13 @@ #include "v8.h" #include "cpu.h" +#include "macro-assembler.h" namespace v8 { namespace internal { void CPU::Setup() { - // Nothing to do. + CpuFeatures::Probe(); } diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc index ef33653950..102952da7b 100644 --- a/deps/v8/src/arm/debug-arm.cc +++ b/deps/v8/src/arm/debug-arm.cc @@ -61,7 +61,7 @@ void BreakLocationIterator::SetDebugBreakAtReturn() { // Restore the JS frame exit code. void BreakLocationIterator::ClearDebugBreakAtReturn() { rinfo()->PatchCode(original_rinfo()->pc(), - CodeGenerator::kJSReturnSequenceLength); + Debug::kARMJSReturnSequenceLength); } diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc index 64314837d6..2f9e78f534 100644 --- a/deps/v8/src/arm/disasm-arm.cc +++ b/deps/v8/src/arm/disasm-arm.cc @@ -97,6 +97,10 @@ class Decoder { // Printing of common values. void PrintRegister(int reg); + void PrintSRegister(int reg); + void PrintDRegister(int reg); + int FormatVFPRegister(Instr* instr, const char* format); + int FormatVFPinstruction(Instr* instr, const char* format); void PrintCondition(Instr* instr); void PrintShiftRm(Instr* instr); void PrintShiftImm(Instr* instr); @@ -121,6 +125,10 @@ class Decoder { void DecodeType6(Instr* instr); void DecodeType7(Instr* instr); void DecodeUnconditional(Instr* instr); + // For VFP support. + void DecodeTypeVFP(Instr* instr); + void DecodeType6CoprocessorIns(Instr* instr); + const disasm::NameConverter& converter_; v8::internal::Vector<char> out_buffer_; @@ -171,6 +179,16 @@ void Decoder::PrintRegister(int reg) { Print(converter_.NameOfCPURegister(reg)); } +// Print the VFP S register name according to the active name converter. +void Decoder::PrintSRegister(int reg) { + Print(assembler::arm::VFPRegisters::Name(reg)); +} + +// Print the VFP D register name according to the active name converter. +void Decoder::PrintDRegister(int reg) { + Print(assembler::arm::VFPRegisters::Name(reg + 32)); +} + // These shift names are defined in a way to match the native disassembler // formatting. See for example the command "objdump -d <binary file>". @@ -290,6 +308,10 @@ int Decoder::FormatRegister(Instr* instr, const char* format) { int reg = instr->RmField(); PrintRegister(reg); return 2; + } else if (format[1] == 't') { // 'rt: Rt register + int reg = instr->RtField(); + PrintRegister(reg); + return 2; } else if (format[1] == 'l') { // 'rlist: register list for load and store multiple instructions ASSERT(STRING_STARTS_WITH(format, "rlist")); @@ -315,6 +337,39 @@ int Decoder::FormatRegister(Instr* instr, const char* format) { } +// Handle all VFP register based formatting in this function to reduce the +// complexity of FormatOption. +int Decoder::FormatVFPRegister(Instr* instr, const char* format) { + ASSERT((format[0] == 'S') || (format[0] == 'D')); + + if (format[1] == 'n') { + int reg = instr->VnField(); + if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->NField())); + if (format[0] == 'D') PrintDRegister(reg); + return 2; + } else if (format[1] == 'm') { + int reg = instr->VmField(); + if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->MField())); + if (format[0] == 'D') PrintDRegister(reg); + return 2; + } else if (format[1] == 'd') { + int reg = instr->VdField(); + if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->DField())); + if (format[0] == 'D') PrintDRegister(reg); + return 2; + } + + UNREACHABLE(); + return -1; +} + + +int Decoder::FormatVFPinstruction(Instr* instr, const char* format) { + Print(format); + return 0; +} + + // FormatOption takes a formatting string and interprets it based on // the current instructions. The format string points to the first // character of the option string (the option escape has already been @@ -459,6 +514,13 @@ int Decoder::FormatOption(Instr* instr, const char* format) { } return 1; } + case 'v': { + return FormatVFPinstruction(instr, format); + } + case 'S': + case 'D': { + return FormatVFPRegister(instr, format); + } case 'w': { // 'w: W field of load and store instructions if (instr->HasW()) { Print("!"); @@ -761,8 +823,7 @@ void Decoder::DecodeType5(Instr* instr) { void Decoder::DecodeType6(Instr* instr) { - // Coprocessor instructions currently not supported. - Unknown(instr); + DecodeType6CoprocessorIns(instr); } @@ -770,12 +831,10 @@ void Decoder::DecodeType7(Instr* instr) { if (instr->Bit(24) == 1) { Format(instr, "swi'cond 'swi"); } else { - // Coprocessor instructions currently not supported. - Unknown(instr); + DecodeTypeVFP(instr); } } - void Decoder::DecodeUnconditional(Instr* instr) { if (instr->Bits(7, 4) == 0xB && instr->Bits(27, 25) == 0 && instr->HasL()) { Format(instr, "'memop'h'pu 'rd, "); @@ -837,6 +896,136 @@ void Decoder::DecodeUnconditional(Instr* instr) { } +// void Decoder::DecodeTypeVFP(Instr* instr) +// Implements the following VFP instructions: +// fmsr: Sn = Rt +// fmrs: Rt = Sn +// fsitod: Dd = Sm +// ftosid: Sd = Dm +// Dd = faddd(Dn, Dm) +// Dd = fsubd(Dn, Dm) +// Dd = fmuld(Dn, Dm) +// Dd = fdivd(Dn, Dm) +// vcmp(Dd, Dm) +// VMRS +void Decoder::DecodeTypeVFP(Instr* instr) { + ASSERT((instr->TypeField() == 7) && (instr->Bit(24) == 0x0) ); + + if (instr->Bit(23) == 1) { + if ((instr->Bits(21, 19) == 0x7) && + (instr->Bits(18, 16) == 0x5) && + (instr->Bits(11, 9) == 0x5) && + (instr->Bit(8) == 1) && + (instr->Bit(6) == 1) && + (instr->Bit(4) == 0)) { + Format(instr, "vcvt.s32.f64'cond 'Sd, 'Dm"); + } else if ((instr->Bits(21, 19) == 0x7) && + (instr->Bits(18, 16) == 0x0) && + (instr->Bits(11, 9) == 0x5) && + (instr->Bit(8) == 1) && + (instr->Bit(7) == 1) && + (instr->Bit(6) == 1) && + (instr->Bit(4) == 0)) { + Format(instr, "vcvt.f64.s32'cond 'Dd, 'Sm"); + } else if ((instr->Bit(21) == 0x0) && + (instr->Bit(20) == 0x0) && + (instr->Bits(11, 9) == 0x5) && + (instr->Bit(8) == 1) && + (instr->Bit(6) == 0) && + (instr->Bit(4) == 0)) { + Format(instr, "vdiv.f64'cond 'Dd, 'Dn, 'Dm"); + } else if ((instr->Bits(21, 20) == 0x3) && + (instr->Bits(19, 16) == 0x4) && + (instr->Bits(11, 9) == 0x5) && + (instr->Bit(8) == 0x1) && + (instr->Bit(6) == 0x1) && + (instr->Bit(4) == 0x0)) { + Format(instr, "vcmp.f64'cond 'Dd, 'Dm"); + } else if ((instr->Bits(23, 20) == 0xF) && + (instr->Bits(19, 16) == 0x1) && + (instr->Bits(11, 8) == 0xA) && + (instr->Bits(7, 5) == 0x0) && + (instr->Bit(4) == 0x1) && + (instr->Bits(3, 0) == 0x0)) { + if (instr->Bits(15, 12) == 0xF) + Format(instr, "vmrs'cond APSR, FPSCR"); + else + Unknown(instr); // Not used by V8. + } else { + Unknown(instr); // Not used by V8. + } + } else if (instr->Bit(21) == 1) { + if ((instr->Bit(20) == 0x1) && + (instr->Bits(11, 9) == 0x5) && + (instr->Bit(8) == 0x1) && + (instr->Bit(6) == 0) && + (instr->Bit(4) == 0)) { + Format(instr, "vadd.f64'cond 'Dd, 'Dn, 'Dm"); + } else if ((instr->Bit(20) == 0x1) && + (instr->Bits(11, 9) == 0x5) && + (instr->Bit(8) == 0x1) && + (instr->Bit(6) == 1) && + (instr->Bit(4) == 0)) { + Format(instr, "vsub.f64'cond 'Dd, 'Dn, 'Dm"); + } else if ((instr->Bit(20) == 0x0) && + (instr->Bits(11, 9) == 0x5) && + (instr->Bit(8) == 0x1) && + (instr->Bit(6) == 0) && + (instr->Bit(4) == 0)) { + Format(instr, "vmul.f64'cond 'Dd, 'Dn, 'Dm"); + } else { + Unknown(instr); // Not used by V8. + } + } else { + if ((instr->Bit(20) == 0x0) && + (instr->Bits(11, 8) == 0xA) && + (instr->Bits(6, 5) == 0x0) && + (instr->Bit(4) == 1) && + (instr->Bits(3, 0) == 0x0)) { + Format(instr, "vmov'cond 'Sn, 'rt"); + } else if ((instr->Bit(20) == 0x1) && + (instr->Bits(11, 8) == 0xA) && + (instr->Bits(6, 5) == 0x0) && + (instr->Bit(4) == 1) && + (instr->Bits(3, 0) == 0x0)) { + Format(instr, "vmov'cond 'rt, 'Sn"); + } else { + Unknown(instr); // Not used by V8. + } + } +} + + +// Decode Type 6 coprocessor instructions. +// Dm = fmdrr(Rt, Rt2) +// <Rt, Rt2> = fmrrd(Dm) +void Decoder::DecodeType6CoprocessorIns(Instr* instr) { + ASSERT((instr->TypeField() == 6)); + + if (instr->Bit(23) == 1) { + Unknown(instr); // Not used by V8. + } else if (instr->Bit(22) == 1) { + if ((instr->Bits(27, 24) == 0xC) && + (instr->Bit(22) == 1) && + (instr->Bits(11, 8) == 0xB) && + (instr->Bits(7, 6) == 0x0) && + (instr->Bit(4) == 1)) { + if (instr->Bit(20) == 0) { + Format(instr, "vmov'cond 'Dm, 'rt, 'rn"); + } else if (instr->Bit(20) == 1) { + Format(instr, "vmov'cond 'rt, 'rn, 'Dm"); + } + } else { + Unknown(instr); // Not used by V8. + } + } else if (instr->Bit(21) == 1) { + Unknown(instr); // Not used by V8. + } else { + Unknown(instr); // Not used by V8. + } +} + + // Disassemble the instruction at *instr_ptr into the output buffer. int Decoder::InstructionDecode(byte* instr_ptr) { Instr* instr = Instr::At(instr_ptr); diff --git a/deps/v8/src/arm/fast-codegen-arm.cc b/deps/v8/src/arm/fast-codegen-arm.cc index 6540d40165..6d0510e324 100644 --- a/deps/v8/src/arm/fast-codegen-arm.cc +++ b/deps/v8/src/arm/fast-codegen-arm.cc @@ -28,6 +28,8 @@ #include "v8.h" #include "codegen-inl.h" +#include "compiler.h" +#include "debug.h" #include "fast-codegen.h" #include "parser.h" @@ -52,28 +54,77 @@ namespace internal { // frames-arm.h for its layout. void FastCodeGenerator::Generate(FunctionLiteral* fun) { function_ = fun; - // ARM does NOT call SetFunctionPosition. + SetFunctionPosition(fun); + int locals_count = fun->scope()->num_stack_slots(); __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit()); + if (locals_count > 0) { + // Load undefined value here, so the value is ready for the loop below. + __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); + } // Adjust fp to point to caller's fp. __ add(fp, sp, Operand(2 * kPointerSize)); { Comment cmnt(masm_, "[ Allocate locals"); - int locals_count = fun->scope()->num_stack_slots(); - if (locals_count > 0) { - __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); - } - __ LoadRoot(r2, Heap::kStackLimitRootIndex); for (int i = 0; i < locals_count; i++) { __ push(ip); } } + bool function_in_register = true; + + Variable* arguments = fun->scope()->arguments()->AsVariable(); + if (arguments != NULL) { + // Function uses arguments object. + Comment cmnt(masm_, "[ Allocate arguments object"); + __ mov(r3, r1); + // Receiver is just before the parameters on the caller's stack. + __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset + + fun->num_parameters() * kPointerSize)); + __ mov(r1, Operand(Smi::FromInt(fun->num_parameters()))); + __ stm(db_w, sp, r1.bit() | r2.bit() | r3.bit()); + + // Arguments to ArgumentsAccessStub: + // function, receiver address, parameter count. + // The stub will rewrite receiever and parameter count if the previous + // stack frame was an arguments adapter frame. + ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT); + __ CallStub(&stub); + __ str(r0, MemOperand(fp, SlotOffset(arguments->slot()))); + Slot* dot_arguments_slot = + fun->scope()->arguments_shadow()->AsVariable()->slot(); + __ str(r0, MemOperand(fp, SlotOffset(dot_arguments_slot))); + function_in_register = false; + } + + // Possibly allocate a local context. + if (fun->scope()->num_heap_slots() > 0) { + Comment cmnt(masm_, "[ Allocate local context"); + if (!function_in_register) { + // Load this again, if it's used by the local context below. + __ ldr(r1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); + } + // Argument to NewContext is the function, which is in r1. + __ push(r1); + __ CallRuntime(Runtime::kNewContext, 1); + // Context is returned in both r0 and cp. It replaces the context + // passed to us. It's saved in the stack and kept live in cp. + __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); +#ifdef DEBUG + // Assert we do not have to copy any parameters into the context. + for (int i = 0, len = fun->scope()->num_parameters(); i < len; i++) { + Slot* slot = fun->scope()->parameter(i)->slot(); + ASSERT(slot != NULL && slot->type() != Slot::CONTEXT); + } +#endif + } + // Check the stack for overflow or break request. // Put the lr setup instruction in the delay slot. The kInstrSize is // added to the implicit 8 byte offset that always applies to operations // with pc and gives a return address 12 bytes down. - Comment cmnt(masm_, "[ Stack check"); + { Comment cmnt(masm_, "[ Stack check"); + __ LoadRoot(r2, Heap::kStackLimitRootIndex); __ add(lr, pc, Operand(Assembler::kInstrSize)); __ cmp(sp, Operand(r2)); StackCheckStub stub; @@ -82,6 +133,7 @@ void FastCodeGenerator::Generate(FunctionLiteral* fun) { RelocInfo::CODE_TARGET), LeaveCC, lo); + } { Comment cmnt(masm_, "[ Declarations"); VisitDeclarations(fun->scope()->declarations()); @@ -92,14 +144,26 @@ void FastCodeGenerator::Generate(FunctionLiteral* fun) { } { Comment cmnt(masm_, "[ Body"); + ASSERT(loop_depth() == 0); VisitStatements(fun->body()); + ASSERT(loop_depth() == 0); } { Comment cmnt(masm_, "[ return <undefined>;"); // Emit a 'return undefined' in case control fell off the end of the // body. __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); - SetReturnPosition(fun); + } + EmitReturnSequence(function_->end_position()); +} + + +void FastCodeGenerator::EmitReturnSequence(int position) { + Comment cmnt(masm_, "[ Return sequence"); + if (return_label_.is_bound()) { + __ b(&return_label_); + } else { + __ bind(&return_label_); if (FLAG_trace) { // Push the return value on the stack as the parameter. // Runtime::TraceExit returns its parameter in r0. @@ -107,67 +171,238 @@ void FastCodeGenerator::Generate(FunctionLiteral* fun) { __ CallRuntime(Runtime::kTraceExit, 1); } + // Add a label for checking the size of the code used for returning. + Label check_exit_codesize; + masm_->bind(&check_exit_codesize); + + // Calculate the exact length of the return sequence and make sure that + // the constant pool is not emitted inside of the return sequence. + int num_parameters = function_->scope()->num_parameters(); + int32_t sp_delta = (num_parameters + 1) * kPointerSize; + int return_sequence_length = Debug::kARMJSReturnSequenceLength; + if (!masm_->ImmediateFitsAddrMode1Instruction(sp_delta)) { + // Additional mov instruction generated. + return_sequence_length++; + } + masm_->BlockConstPoolFor(return_sequence_length); + + CodeGenerator::RecordPositions(masm_, position); __ RecordJSReturn(); __ mov(sp, fp); __ ldm(ia_w, sp, fp.bit() | lr.bit()); - int num_parameters = function_->scope()->num_parameters(); - __ add(sp, sp, Operand((num_parameters + 1) * kPointerSize)); + __ add(sp, sp, Operand(sp_delta)); __ Jump(lr); + + // Check that the size of the code used for returning matches what is + // expected by the debugger. The add instruction above is an addressing + // mode 1 instruction where there are restrictions on which immediate values + // can be encoded in the instruction and which immediate values requires + // use of an additional instruction for moving the immediate to a temporary + // register. + ASSERT_EQ(return_sequence_length, + masm_->InstructionsGeneratedSince(&check_exit_codesize)); } } -void FastCodeGenerator::Move(Location destination, Slot* source) { - switch (destination.type()) { - case Location::kUninitialized: +void FastCodeGenerator::Move(Expression::Context context, Register source) { + switch (context) { + case Expression::kUninitialized: UNREACHABLE(); - case Location::kEffect: + case Expression::kEffect: break; - case Location::kValue: - __ ldr(ip, MemOperand(fp, SlotOffset(source))); - __ push(ip); + case Expression::kValue: + __ push(source); + break; + case Expression::kTest: + TestAndBranch(source, true_label_, false_label_); break; + case Expression::kValueTest: { + Label discard; + __ push(source); + TestAndBranch(source, true_label_, &discard); + __ bind(&discard); + __ pop(); + __ jmp(false_label_); + break; + } + case Expression::kTestValue: { + Label discard; + __ push(source); + TestAndBranch(source, &discard, false_label_); + __ bind(&discard); + __ pop(); + __ jmp(true_label_); + } } } -void FastCodeGenerator::Move(Location destination, Literal* expr) { - switch (destination.type()) { - case Location::kUninitialized: +void FastCodeGenerator::Move(Expression::Context context, Slot* source) { + switch (context) { + case Expression::kUninitialized: UNREACHABLE(); - case Location::kEffect: + case Expression::kEffect: break; - case Location::kValue: - __ mov(ip, Operand(expr->handle())); - __ push(ip); + case Expression::kValue: // Fall through. + case Expression::kTest: // Fall through. + case Expression::kValueTest: // Fall through. + case Expression::kTestValue: + __ ldr(ip, MemOperand(fp, SlotOffset(source))); + Move(context, ip); break; } } -void FastCodeGenerator::Move(Slot* destination, Location source) { - switch (source.type()) { - case Location::kUninitialized: // Fall through. - case Location::kEffect: +void FastCodeGenerator::Move(Expression::Context context, Literal* expr) { + switch (context) { + case Expression::kUninitialized: UNREACHABLE(); - case Location::kValue: - __ pop(ip); - __ str(ip, MemOperand(fp, SlotOffset(destination))); + case Expression::kEffect: + break; + case Expression::kValue: // Fall through. + case Expression::kTest: // Fall through. + case Expression::kValueTest: // Fall through. + case Expression::kTestValue: + __ mov(ip, Operand(expr->handle())); + Move(context, ip); break; } } -void FastCodeGenerator::DropAndMove(Location destination, Register source) { - switch (destination.type()) { - case Location::kUninitialized: +void FastCodeGenerator::DropAndMove(Expression::Context context, + Register source) { + switch (context) { + case Expression::kUninitialized: UNREACHABLE(); - case Location::kEffect: + case Expression::kEffect: + __ pop(); + break; + case Expression::kValue: + __ str(source, MemOperand(sp)); + break; + case Expression::kTest: + ASSERT(!source.is(sp)); + __ pop(); + TestAndBranch(source, true_label_, false_label_); + break; + case Expression::kValueTest: { + Label discard; + __ str(source, MemOperand(sp)); + TestAndBranch(source, true_label_, &discard); + __ bind(&discard); __ pop(); + __ jmp(false_label_); break; - case Location::kValue: + } + case Expression::kTestValue: { + Label discard; __ str(source, MemOperand(sp)); + TestAndBranch(source, &discard, false_label_); + __ bind(&discard); + __ pop(); + __ jmp(true_label_); + break; + } + } +} + + +void FastCodeGenerator::TestAndBranch(Register source, + Label* true_label, + Label* false_label) { + ASSERT_NE(NULL, true_label); + ASSERT_NE(NULL, false_label); + // Call the runtime to find the boolean value of the source and then + // translate it into control flow to the pair of labels. + __ push(source); + __ CallRuntime(Runtime::kToBool, 1); + __ LoadRoot(ip, Heap::kTrueValueRootIndex); + __ cmp(r0, ip); + __ b(eq, true_label); + __ jmp(false_label); +} + + +void FastCodeGenerator::VisitDeclaration(Declaration* decl) { + Comment cmnt(masm_, "[ Declaration"); + Variable* var = decl->proxy()->var(); + ASSERT(var != NULL); // Must have been resolved. + Slot* slot = var->slot(); + ASSERT(slot != NULL); // No global declarations here. + + // We have 3 cases for slots: LOOKUP, LOCAL, CONTEXT. + switch (slot->type()) { + case Slot::LOOKUP: { + __ mov(r2, Operand(var->name())); + // Declaration nodes are always introduced in one of two modes. + ASSERT(decl->mode() == Variable::VAR || decl->mode() == Variable::CONST); + PropertyAttributes attr = decl->mode() == Variable::VAR ? + NONE : READ_ONLY; + __ mov(r1, Operand(Smi::FromInt(attr))); + // Push initial value, if any. + // Note: For variables we must not push an initial value (such as + // 'undefined') because we may have a (legal) redeclaration and we + // must not destroy the current value. + if (decl->mode() == Variable::CONST) { + __ mov(r0, Operand(Factory::the_hole_value())); + __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit() | r0.bit()); + } else if (decl->fun() != NULL) { + __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit()); + Visit(decl->fun()); // Initial value for function decl. + } else { + __ mov(r0, Operand(Smi::FromInt(0))); // No initial value! + __ stm(db_w, sp, cp.bit() | r2.bit() | r1.bit() | r0.bit()); + } + __ CallRuntime(Runtime::kDeclareContextSlot, 4); break; + } + case Slot::LOCAL: + if (decl->mode() == Variable::CONST) { + __ mov(r0, Operand(Factory::the_hole_value())); + __ str(r0, MemOperand(fp, SlotOffset(var->slot()))); + } else if (decl->fun() != NULL) { + Visit(decl->fun()); + __ pop(r0); + __ str(r0, MemOperand(fp, SlotOffset(var->slot()))); + } + break; + case Slot::CONTEXT: + // The variable in the decl always resides in the current context. + ASSERT(function_->scope()->ContextChainLength(slot->var()->scope()) == 0); + if (decl->mode() == Variable::CONST) { + __ mov(r0, Operand(Factory::the_hole_value())); + if (FLAG_debug_code) { + // Check if we have the correct context pointer. + __ ldr(r1, CodeGenerator::ContextOperand( + cp, Context::FCONTEXT_INDEX)); + __ cmp(r1, cp); + __ Check(eq, "Unexpected declaration in current context."); + } + __ str(r0, CodeGenerator::ContextOperand(cp, slot->index())); + // No write barrier since the_hole_value is in old space. + ASSERT(Heap::InNewSpace(*Factory::the_hole_value())); + } else if (decl->fun() != NULL) { + Visit(decl->fun()); + __ pop(r0); + if (FLAG_debug_code) { + // Check if we have the correct context pointer. + __ ldr(r1, CodeGenerator::ContextOperand( + cp, Context::FCONTEXT_INDEX)); + __ cmp(r1, cp); + __ Check(eq, "Unexpected declaration in current context."); + } + __ str(r0, CodeGenerator::ContextOperand(cp, slot->index())); + int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize; + __ mov(r2, Operand(offset)); + // We know that we have written a function, which is not a smi. + __ RecordWrite(cp, r2, r0); + } + break; + default: + UNREACHABLE(); } } @@ -185,27 +420,16 @@ void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) { void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) { Comment cmnt(masm_, "[ ReturnStatement"); - SetStatementPosition(stmt); Expression* expr = stmt->expression(); // Complete the statement based on the type of the subexpression. if (expr->AsLiteral() != NULL) { __ mov(r0, Operand(expr->AsLiteral()->handle())); } else { + ASSERT_EQ(Expression::kValue, expr->context()); Visit(expr); - Move(r0, expr->location()); + __ pop(r0); } - - if (FLAG_trace) { - __ push(r0); - __ CallRuntime(Runtime::kTraceExit, 1); - } - - __ RecordJSReturn(); - __ mov(sp, fp); - __ ldm(ia_w, sp, fp.bit() | lr.bit()); - int num_parameters = function_->scope()->num_parameters(); - __ add(sp, sp, Operand((num_parameters + 1) * kPointerSize)); - __ Jump(lr); + EmitReturnSequence(stmt->statement_pos()); } @@ -213,7 +437,8 @@ void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) { Comment cmnt(masm_, "[ FunctionLiteral"); // Build the function boilerplate and instantiate it. - Handle<JSFunction> boilerplate = BuildBoilerplate(expr); + Handle<JSFunction> boilerplate = + Compiler::BuildBoilerplate(expr, script_, this); if (HasStackOverflow()) return; ASSERT(boilerplate->IsBoilerplate()); @@ -222,7 +447,7 @@ void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) { __ mov(r0, Operand(boilerplate)); __ stm(db_w, sp, cp.bit() | r0.bit()); __ CallRuntime(Runtime::kNewClosure, 2); - Move(expr->location(), r0); + Move(expr->context(), r0); } @@ -230,6 +455,7 @@ void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) { Comment cmnt(masm_, "[ VariableProxy"); Expression* rewrite = expr->var()->rewrite(); if (rewrite == NULL) { + ASSERT(expr->var()->is_global()); Comment cmnt(masm_, "Global variable"); // Use inline caching. Variable name is passed in r2 and the global // object on the stack. @@ -238,16 +464,62 @@ void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) { __ mov(r2, Operand(expr->name())); Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize)); __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT); - DropAndMove(expr->location(), r0); + DropAndMove(expr->context(), r0); + } else if (rewrite->AsSlot() != NULL) { + Slot* slot = rewrite->AsSlot(); + ASSERT_NE(NULL, slot); + switch (slot->type()) { + case Slot::LOCAL: + case Slot::PARAMETER: { + Comment cmnt(masm_, "Stack slot"); + Move(expr->context(), rewrite->AsSlot()); + break; + } + + case Slot::CONTEXT: { + Comment cmnt(masm_, "Context slot"); + int chain_length = + function_->scope()->ContextChainLength(slot->var()->scope()); + if (chain_length > 0) { + // Move up the chain of contexts to the context containing the slot. + __ ldr(r0, CodeGenerator::ContextOperand(cp, Context::CLOSURE_INDEX)); + // Load the function context (which is the incoming, outer context). + __ ldr(r0, FieldMemOperand(r0, JSFunction::kContextOffset)); + for (int i = 1; i < chain_length; i++) { + __ ldr(r0, + CodeGenerator::ContextOperand(r0, Context::CLOSURE_INDEX)); + // Load the function context (which is the incoming, outer context). + __ ldr(r0, FieldMemOperand(r0, JSFunction::kContextOffset)); + } + // The context may be an intermediate context, not a function context. + __ ldr(r0, + CodeGenerator::ContextOperand(r0, Context::FCONTEXT_INDEX)); + } else { // Slot is in the current context. + __ ldr(r0, + CodeGenerator::ContextOperand(cp, Context::FCONTEXT_INDEX)); + } + __ ldr(r0, CodeGenerator::ContextOperand(r0, slot->index())); + Move(expr->context(), r0); + break; + } + + case Slot::LOOKUP: + UNREACHABLE(); + break; + } } else { - Comment cmnt(masm_, "Stack slot"); - Move(expr->location(), rewrite->AsSlot()); + // The parameter variable has been rewritten into an explict access to + // the arguments object. + Property* property = rewrite->AsProperty(); + ASSERT_NOT_NULL(property); + ASSERT_EQ(expr->context(), property->context()); + Visit(property); } } void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) { - Comment cmnt(masm_, "[ RegExp Literal"); + Comment cmnt(masm_, "[ RegExpLiteral"); Label done; // Registers will be used as follows: // r4 = JS function, literals array @@ -269,7 +541,7 @@ void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) { __ stm(db_w, sp, r4.bit() | r3.bit() | r2.bit() | r1.bit()); __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4); __ bind(&done); - Move(expr->location(), r0); + Move(expr->context(), r0); } @@ -304,7 +576,7 @@ void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { } // If result_saved == true: the result is saved on top of the stack. - // If result_saved == false: the result is in eax. + // If result_saved == false: the result is in r0. bool result_saved = false; for (int i = 0; i < expr->properties()->length(); i++) { @@ -326,7 +598,8 @@ void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { case ObjectLiteral::Property::COMPUTED: if (key->handle()->IsSymbol()) { Visit(value); - Move(r0, value->location()); + ASSERT_EQ(Expression::kValue, value->context()); + __ pop(r0); __ mov(r2, Operand(key->handle())); Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); __ Call(ic, RelocInfo::CODE_TARGET); @@ -338,9 +611,9 @@ void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { case ObjectLiteral::Property::PROTOTYPE: __ push(r0); Visit(key); - ASSERT(key->location().is_value()); + ASSERT_EQ(Expression::kValue, key->context()); Visit(value); - ASSERT(value->location().is_value()); + ASSERT_EQ(Expression::kValue, value->context()); __ CallRuntime(Runtime::kSetProperty, 3); __ ldr(r0, MemOperand(sp)); // Restore result into r0 break; @@ -349,27 +622,49 @@ void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { case ObjectLiteral::Property::SETTER: __ push(r0); Visit(key); - ASSERT(key->location().is_value()); + ASSERT_EQ(Expression::kValue, key->context()); __ mov(r1, Operand(property->kind() == ObjectLiteral::Property::SETTER ? Smi::FromInt(1) : Smi::FromInt(0))); __ push(r1); Visit(value); - ASSERT(value->location().is_value()); + ASSERT_EQ(Expression::kValue, value->context()); __ CallRuntime(Runtime::kDefineAccessor, 4); __ ldr(r0, MemOperand(sp)); // Restore result into r0 break; } } - switch (expr->location().type()) { - case Location::kUninitialized: + switch (expr->context()) { + case Expression::kUninitialized: UNREACHABLE(); - case Location::kEffect: + case Expression::kEffect: if (result_saved) __ pop(); break; - case Location::kValue: + case Expression::kValue: + if (!result_saved) __ push(r0); + break; + case Expression::kTest: + if (result_saved) __ pop(r0); + TestAndBranch(r0, true_label_, false_label_); + break; + case Expression::kValueTest: { + Label discard; if (!result_saved) __ push(r0); + TestAndBranch(r0, true_label_, &discard); + __ bind(&discard); + __ pop(); + __ jmp(false_label_); break; + } + case Expression::kTestValue: { + Label discard; + if (!result_saved) __ push(r0); + TestAndBranch(r0, &discard, false_label_); + __ bind(&discard); + __ pop(); + __ jmp(true_label_); + break; + } } } @@ -423,7 +718,7 @@ void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { result_saved = true; } Visit(subexpr); - ASSERT(subexpr->location().is_value()); + ASSERT_EQ(Expression::kValue, subexpr->context()); // Store the subexpression value in the array's elements. __ pop(r0); // Subexpression value. @@ -438,84 +733,227 @@ void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { __ RecordWrite(r1, r2, r0); } - switch (expr->location().type()) { - case Location::kUninitialized: + switch (expr->context()) { + case Expression::kUninitialized: UNREACHABLE(); - case Location::kEffect: + case Expression::kEffect: if (result_saved) __ pop(); break; - case Location::kValue: + case Expression::kValue: + if (!result_saved) __ push(r0); + break; + case Expression::kTest: + if (result_saved) __ pop(r0); + TestAndBranch(r0, true_label_, false_label_); + break; + case Expression::kValueTest: { + Label discard; + if (!result_saved) __ push(r0); + TestAndBranch(r0, true_label_, &discard); + __ bind(&discard); + __ pop(); + __ jmp(false_label_); + break; + } + case Expression::kTestValue: { + Label discard; if (!result_saved) __ push(r0); + TestAndBranch(r0, &discard, false_label_); + __ bind(&discard); + __ pop(); + __ jmp(true_label_); break; + } } } -void FastCodeGenerator::VisitAssignment(Assignment* expr) { - Comment cmnt(masm_, "[ Assignment"); - ASSERT(expr->op() == Token::ASSIGN || expr->op() == Token::INIT_VAR); - - // Left-hand side can only be a global or a (parameter or local) slot. +void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) { Variable* var = expr->target()->AsVariableProxy()->AsVariable(); ASSERT(var != NULL); - ASSERT(var->is_global() || var->slot() != NULL); - Expression* rhs = expr->value(); - Location destination = expr->location(); if (var->is_global()) { - // Assignment to a global variable, use inline caching. Right-hand-side - // value is passed in r0, variable name in r2, and the global object on - // the stack. - - // Code for the right-hand-side expression depends on its type. - if (rhs->AsLiteral() != NULL) { - __ mov(r0, Operand(rhs->AsLiteral()->handle())); - } else { - ASSERT(rhs->location().is_value()); - Visit(rhs); - __ pop(r0); - } + // Assignment to a global variable. Use inline caching for the + // assignment. Right-hand-side value is passed in r0, variable name in + // r2, and the global object on the stack. + __ pop(r0); __ mov(r2, Operand(var->name())); __ ldr(ip, CodeGenerator::GlobalObject()); __ push(ip); Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); __ Call(ic, RelocInfo::CODE_TARGET); // Overwrite the global object on the stack with the result if needed. - DropAndMove(expr->location(), r0); + DropAndMove(expr->context(), r0); + } else { - // Local or parameter assignment. - - // Code for the right-hand side expression depends on its type. - if (rhs->AsLiteral() != NULL) { - // Two cases: 'temp <- (var = constant)', or 'var = constant' with a - // discarded result. Always perform the assignment. - __ mov(ip, Operand(rhs->AsLiteral()->handle())); - __ str(ip, MemOperand(fp, SlotOffset(var->slot()))); - Move(expr->location(), ip); - } else { - ASSERT(rhs->location().is_value()); - Visit(rhs); - // Load right-hand side into ip. - switch (expr->location().type()) { - case Location::kUninitialized: - UNREACHABLE(); - case Location::kEffect: - // Case 'var = temp'. Discard right-hand-side temporary. - __ pop(ip); - break; - case Location::kValue: - // Case 'temp1 <- (var = temp0)'. Preserve right-hand-side - // temporary on the stack. - __ ldr(ip, MemOperand(sp)); - break; + Slot* slot = var->slot(); + ASSERT_NOT_NULL(slot); // Variables rewritten as properties not handled. + switch (slot->type()) { + case Slot::LOCAL: + case Slot::PARAMETER: { + switch (expr->context()) { + case Expression::kUninitialized: + UNREACHABLE(); + case Expression::kEffect: + // Perform assignment and discard value. + __ pop(r0); + __ str(r0, MemOperand(fp, SlotOffset(var->slot()))); + break; + case Expression::kValue: + // Perform assignment and preserve value. + __ ldr(r0, MemOperand(sp)); + __ str(r0, MemOperand(fp, SlotOffset(var->slot()))); + break; + case Expression::kTest: + // Perform assignment and test (and discard) value. + __ pop(r0); + __ str(r0, MemOperand(fp, SlotOffset(var->slot()))); + TestAndBranch(r0, true_label_, false_label_); + break; + case Expression::kValueTest: { + Label discard; + __ ldr(r0, MemOperand(sp)); + __ str(r0, MemOperand(fp, SlotOffset(var->slot()))); + TestAndBranch(r0, true_label_, &discard); + __ bind(&discard); + __ pop(); + __ jmp(false_label_); + break; + } + case Expression::kTestValue: { + Label discard; + __ ldr(r0, MemOperand(sp)); + __ str(r0, MemOperand(fp, SlotOffset(var->slot()))); + TestAndBranch(r0, &discard, false_label_); + __ bind(&discard); + __ pop(); + __ jmp(true_label_); + break; + } + } + break; } - // Do the slot assignment. - __ str(ip, MemOperand(fp, SlotOffset(var->slot()))); + + case Slot::CONTEXT: { + int chain_length = + function_->scope()->ContextChainLength(slot->var()->scope()); + if (chain_length > 0) { + // Move up the chain of contexts to the context containing the slot. + __ ldr(r0, CodeGenerator::ContextOperand(cp, Context::CLOSURE_INDEX)); + // Load the function context (which is the incoming, outer context). + __ ldr(r0, FieldMemOperand(r0, JSFunction::kContextOffset)); + for (int i = 1; i < chain_length; i++) { + __ ldr(r0, + CodeGenerator::ContextOperand(r0, Context::CLOSURE_INDEX)); + __ ldr(r0, FieldMemOperand(r0, JSFunction::kContextOffset)); + } + } else { // Slot is in the current context. Generate optimized code. + __ mov(r0, cp); + } + // The context may be an intermediate context, not a function context. + __ ldr(r0, CodeGenerator::ContextOperand(r0, Context::FCONTEXT_INDEX)); + __ pop(r1); + __ str(r1, CodeGenerator::ContextOperand(r0, slot->index())); + + // RecordWrite may destroy all its register arguments. + if (expr->context() == Expression::kValue) { + __ push(r1); + } else if (expr->context() != Expression::kEffect) { + __ mov(r3, r1); + } + int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize; + + // Update the write barrier for the array store with r0 as the scratch + // register. Skip the write barrier if the value written (r1) is a smi. + // The smi test is part of RecordWrite on other platforms, not on arm. + Label exit; + __ tst(r1, Operand(kSmiTagMask)); + __ b(eq, &exit); + + __ mov(r2, Operand(offset)); + __ RecordWrite(r0, r2, r1); + __ bind(&exit); + if (expr->context() != Expression::kEffect && + expr->context() != Expression::kValue) { + Move(expr->context(), r3); + } + break; + } + + case Slot::LOOKUP: + UNREACHABLE(); + break; } } } +void FastCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) { + // Assignment to a property, using a named store IC. + Property* prop = expr->target()->AsProperty(); + ASSERT(prop != NULL); + ASSERT(prop->key()->AsLiteral() != NULL); + + // If the assignment starts a block of assignments to the same object, + // change to slow case to avoid the quadratic behavior of repeatedly + // adding fast properties. + if (expr->starts_initialization_block()) { + __ ldr(ip, MemOperand(sp, kPointerSize)); // Receiver is under value. + __ push(ip); + __ CallRuntime(Runtime::kToSlowProperties, 1); + } + + __ pop(r0); + __ mov(r2, Operand(prop->key()->AsLiteral()->handle())); + Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); + __ Call(ic, RelocInfo::CODE_TARGET); + + // If the assignment ends an initialization block, revert to fast case. + if (expr->ends_initialization_block()) { + __ push(r0); // Result of assignment, saved even if not needed. + __ ldr(ip, MemOperand(sp, kPointerSize)); // Receiver is under value. + __ push(ip); + __ CallRuntime(Runtime::kToFastProperties, 1); + __ pop(r0); + } + + DropAndMove(expr->context(), r0); +} + + +void FastCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) { + // Assignment to a property, using a keyed store IC. + + // If the assignment starts a block of assignments to the same object, + // change to slow case to avoid the quadratic behavior of repeatedly + // adding fast properties. + if (expr->starts_initialization_block()) { + // Reciever is under the key and value. + __ ldr(ip, MemOperand(sp, 2 * kPointerSize)); + __ push(ip); + __ CallRuntime(Runtime::kToSlowProperties, 1); + } + + __ pop(r0); + Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); + __ Call(ic, RelocInfo::CODE_TARGET); + + // If the assignment ends an initialization block, revert to fast case. + if (expr->ends_initialization_block()) { + __ push(r0); // Result of assignment, saved even if not needed. + // Reciever is under the key and value. + __ ldr(ip, MemOperand(sp, 2 * kPointerSize)); + __ push(ip); + __ CallRuntime(Runtime::kToFastProperties, 1); + __ pop(r0); + } + + // Receiver and key are still on stack. + __ add(sp, sp, Operand(2 * kPointerSize)); + Move(expr->context(), r0); +} + + void FastCodeGenerator::VisitProperty(Property* expr) { Comment cmnt(masm_, "[ Property"); Expression* key = expr->key(); @@ -530,90 +968,155 @@ void FastCodeGenerator::VisitProperty(Property* expr) { if (key->AsLiteral() != NULL && key->AsLiteral()->handle()->IsSymbol() && !String::cast(*(key->AsLiteral()->handle()))->AsArrayIndex(&dummy)) { // Do a NAMED property load. - // The IC expects the property name in ecx and the receiver on the stack. + // The IC expects the property name in r2 and the receiver on the stack. __ mov(r2, Operand(key->AsLiteral()->handle())); Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize)); __ Call(ic, RelocInfo::CODE_TARGET); - // By emitting a nop we make sure that we do not have a "test eax,..." - // instruction after the call it is treated specially by the LoadIC code. - __ nop(); } else { // Do a KEYED property load. Visit(expr->key()); Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); __ Call(ic, RelocInfo::CODE_TARGET); - // By emitting a nop we make sure that we do not have a "test eax,..." - // instruction after the call it is treated specially by the LoadIC code. - __ nop(); // Drop key and receiver left on the stack by IC. __ pop(); } - switch (expr->location().type()) { - case Location::kUninitialized: - UNREACHABLE(); - case Location::kValue: - __ str(r0, MemOperand(sp)); - break; - case Location::kEffect: - __ pop(); - } + DropAndMove(expr->context(), r0); } - -void FastCodeGenerator::VisitCall(Call* expr) { - Comment cmnt(masm_, "[ Call"); - Expression* fun = expr->expression(); +void FastCodeGenerator::EmitCallWithIC(Call* expr, RelocInfo::Mode reloc_info) { + // Code common for calls using the IC. ZoneList<Expression*>* args = expr->arguments(); - Variable* var = fun->AsVariableProxy()->AsVariable(); - ASSERT(var != NULL && !var->is_this() && var->is_global()); - ASSERT(!var->is_possibly_eval()); - - __ mov(r1, Operand(var->name())); - // Push global object as receiver. - __ ldr(r0, CodeGenerator::GlobalObject()); - __ stm(db_w, sp, r1.bit() | r0.bit()); int arg_count = args->length(); for (int i = 0; i < arg_count; i++) { Visit(args->at(i)); - ASSERT(args->at(i)->location().is_value()); + ASSERT_EQ(Expression::kValue, args->at(i)->context()); } - // Record source position for debugger + // Record source position for debugger. SetSourcePosition(expr->position()); // Call the IC initialization code. Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count, NOT_IN_LOOP); - __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT); + __ Call(ic, reloc_info); // Restore context register. __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); - DropAndMove(expr->location(), r0); + // Discard the function left on TOS. + DropAndMove(expr->context(), r0); +} + + +void FastCodeGenerator::EmitCallWithStub(Call* expr) { + // Code common for calls using the call stub. + ZoneList<Expression*>* args = expr->arguments(); + int arg_count = args->length(); + for (int i = 0; i < arg_count; i++) { + Visit(args->at(i)); + } + // Record source position for debugger. + SetSourcePosition(expr->position()); + CallFunctionStub stub(arg_count, NOT_IN_LOOP); + __ CallStub(&stub); + // Restore context register. + __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); + // Discard the function left on TOS. + DropAndMove(expr->context(), r0); +} + + +void FastCodeGenerator::VisitCall(Call* expr) { + Comment cmnt(masm_, "[ Call"); + Expression* fun = expr->expression(); + Variable* var = fun->AsVariableProxy()->AsVariable(); + + if (var != NULL && var->is_possibly_eval()) { + // Call to the identifier 'eval'. + UNREACHABLE(); + } else if (var != NULL && !var->is_this() && var->is_global()) { + // Call to a global variable. + __ mov(r1, Operand(var->name())); + // Push global object as receiver for the call IC lookup. + __ ldr(r0, CodeGenerator::GlobalObject()); + __ stm(db_w, sp, r1.bit() | r0.bit()); + EmitCallWithIC(expr, RelocInfo::CODE_TARGET_CONTEXT); + } else if (var != NULL && var->slot() != NULL && + var->slot()->type() == Slot::LOOKUP) { + // Call to a lookup slot. + UNREACHABLE(); + } else if (fun->AsProperty() != NULL) { + // Call to an object property. + Property* prop = fun->AsProperty(); + Literal* key = prop->key()->AsLiteral(); + if (key != NULL && key->handle()->IsSymbol()) { + // Call to a named property, use call IC. + __ mov(r0, Operand(key->handle())); + __ push(r0); + Visit(prop->obj()); + EmitCallWithIC(expr, RelocInfo::CODE_TARGET); + } else { + // Call to a keyed property, use keyed load IC followed by function + // call. + Visit(prop->obj()); + Visit(prop->key()); + // Record source code position for IC call. + SetSourcePosition(prop->position()); + Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); + __ Call(ic, RelocInfo::CODE_TARGET); + // Load receiver object into r1. + if (prop->is_synthetic()) { + __ ldr(r1, CodeGenerator::GlobalObject()); + } else { + __ ldr(r1, MemOperand(sp, kPointerSize)); + } + // Overwrite (object, key) with (function, receiver). + __ str(r0, MemOperand(sp, kPointerSize)); + __ str(r1, MemOperand(sp)); + EmitCallWithStub(expr); + } + } else { + // Call to some other expression. If the expression is an anonymous + // function literal not called in a loop, mark it as one that should + // also use the fast code generator. + FunctionLiteral* lit = fun->AsFunctionLiteral(); + if (lit != NULL && + lit->name()->Equals(Heap::empty_string()) && + loop_depth() == 0) { + lit->set_try_fast_codegen(true); + } + Visit(fun); + // Load global receiver object. + __ ldr(r1, CodeGenerator::GlobalObject()); + __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset)); + __ push(r1); + // Emit function call. + EmitCallWithStub(expr); + } } -void FastCodeGenerator::VisitCallNew(CallNew* node) { +void FastCodeGenerator::VisitCallNew(CallNew* expr) { Comment cmnt(masm_, "[ CallNew"); // According to ECMA-262, section 11.2.2, page 44, the function // expression in new calls must be evaluated before the // arguments. // Push function on the stack. - Visit(node->expression()); - ASSERT(node->expression()->location().is_value()); + Visit(expr->expression()); + ASSERT_EQ(Expression::kValue, expr->expression()->context()); // Push global object (receiver). __ ldr(r0, CodeGenerator::GlobalObject()); __ push(r0); // Push the arguments ("left-to-right") on the stack. - ZoneList<Expression*>* args = node->arguments(); + ZoneList<Expression*>* args = expr->arguments(); int arg_count = args->length(); for (int i = 0; i < arg_count; i++) { Visit(args->at(i)); - ASSERT(args->at(i)->location().is_value()); + ASSERT_EQ(Expression::kValue, args->at(i)->context()); // If location is value, it is already on the stack, // so nothing to do here. } // Call the construct call builtin that handles allocation and // constructor invocation. - SetSourcePosition(node->position()); + SetSourcePosition(expr->position()); // Load function, arg_count into r1 and r0. __ mov(r0, Operand(arg_count)); @@ -624,7 +1127,7 @@ void FastCodeGenerator::VisitCallNew(CallNew* node) { __ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL); // Replace function on TOS with result in r0, or pop it. - DropAndMove(node->location(), r0); + DropAndMove(expr->context(), r0); } @@ -639,19 +1142,229 @@ void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) { int arg_count = args->length(); for (int i = 0; i < arg_count; i++) { Visit(args->at(i)); - ASSERT(args->at(i)->location().is_value()); + ASSERT_EQ(Expression::kValue, args->at(i)->context()); } __ CallRuntime(function, arg_count); - Move(expr->location(), r0); + Move(expr->context(), r0); +} + + +void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { + switch (expr->op()) { + case Token::VOID: { + Comment cmnt(masm_, "[ UnaryOperation (VOID)"); + Visit(expr->expression()); + ASSERT_EQ(Expression::kEffect, expr->expression()->context()); + switch (expr->context()) { + case Expression::kUninitialized: + UNREACHABLE(); + break; + case Expression::kEffect: + break; + case Expression::kValue: + __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); + __ push(ip); + break; + case Expression::kTestValue: + // Value is false so it's needed. + __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); + __ push(ip); + case Expression::kTest: // Fall through. + case Expression::kValueTest: + __ jmp(false_label_); + break; + } + break; + } + + case Token::NOT: { + Comment cmnt(masm_, "[ UnaryOperation (NOT)"); + ASSERT_EQ(Expression::kTest, expr->expression()->context()); + + Label push_true; + Label push_false; + Label done; + Label* saved_true = true_label_; + Label* saved_false = false_label_; + switch (expr->context()) { + case Expression::kUninitialized: + UNREACHABLE(); + break; + + case Expression::kValue: + true_label_ = &push_false; + false_label_ = &push_true; + Visit(expr->expression()); + __ bind(&push_true); + __ LoadRoot(ip, Heap::kTrueValueRootIndex); + __ push(ip); + __ jmp(&done); + __ bind(&push_false); + __ LoadRoot(ip, Heap::kFalseValueRootIndex); + __ push(ip); + __ bind(&done); + break; + + case Expression::kEffect: + true_label_ = &done; + false_label_ = &done; + Visit(expr->expression()); + __ bind(&done); + break; + + case Expression::kTest: + true_label_ = saved_false; + false_label_ = saved_true; + Visit(expr->expression()); + break; + + case Expression::kValueTest: + true_label_ = saved_false; + false_label_ = &push_true; + Visit(expr->expression()); + __ bind(&push_true); + __ LoadRoot(ip, Heap::kTrueValueRootIndex); + __ push(ip); + __ jmp(saved_true); + break; + + case Expression::kTestValue: + true_label_ = &push_false; + false_label_ = saved_true; + Visit(expr->expression()); + __ bind(&push_false); + __ LoadRoot(ip, Heap::kFalseValueRootIndex); + __ push(ip); + __ jmp(saved_false); + break; + } + true_label_ = saved_true; + false_label_ = saved_false; + break; + } + + case Token::TYPEOF: { + Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)"); + ASSERT_EQ(Expression::kValue, expr->expression()->context()); + + VariableProxy* proxy = expr->expression()->AsVariableProxy(); + if (proxy != NULL && + !proxy->var()->is_this() && + proxy->var()->is_global()) { + Comment cmnt(masm_, "Global variable"); + __ ldr(r0, CodeGenerator::GlobalObject()); + __ push(r0); + __ mov(r2, Operand(proxy->name())); + Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize)); + // Use a regular load, not a contextual load, to avoid a reference + // error. + __ Call(ic, RelocInfo::CODE_TARGET); + __ str(r0, MemOperand(sp)); + } else if (proxy != NULL && + proxy->var()->slot() != NULL && + proxy->var()->slot()->type() == Slot::LOOKUP) { + __ mov(r0, Operand(proxy->name())); + __ stm(db_w, sp, cp.bit() | r0.bit()); + __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2); + __ push(r0); + } else { + // This expression cannot throw a reference error at the top level. + Visit(expr->expression()); + } + + __ CallRuntime(Runtime::kTypeof, 1); + Move(expr->context(), r0); + break; + } + + default: + UNREACHABLE(); + } +} + + +void FastCodeGenerator::VisitCountOperation(CountOperation* expr) { + Comment cmnt(masm_, "[ CountOperation"); + VariableProxy* proxy = expr->expression()->AsVariableProxy(); + ASSERT(proxy->AsVariable() != NULL); + ASSERT(proxy->AsVariable()->is_global()); + + Visit(proxy); + __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS); + + switch (expr->context()) { + case Expression::kUninitialized: + UNREACHABLE(); + case Expression::kValue: // Fall through + case Expression::kTest: // Fall through + case Expression::kTestValue: // Fall through + case Expression::kValueTest: + // Duplicate the result on the stack. + __ push(r0); + break; + case Expression::kEffect: + // Do not save result. + break; + } + // Call runtime for +1/-1. + __ push(r0); + __ mov(ip, Operand(Smi::FromInt(1))); + __ push(ip); + if (expr->op() == Token::INC) { + __ CallRuntime(Runtime::kNumberAdd, 2); + } else { + __ CallRuntime(Runtime::kNumberSub, 2); + } + // Call Store IC. + __ mov(r2, Operand(proxy->AsVariable()->name())); + __ ldr(ip, CodeGenerator::GlobalObject()); + __ push(ip); + Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); + __ Call(ic, RelocInfo::CODE_TARGET); + // Restore up stack after store IC. + __ add(sp, sp, Operand(kPointerSize)); + + switch (expr->context()) { + case Expression::kUninitialized: + UNREACHABLE(); + case Expression::kEffect: // Fall through + case Expression::kValue: + // Do nothing. Result in either on the stack for value context + // or discarded for effect context. + break; + case Expression::kTest: + __ pop(r0); + TestAndBranch(r0, true_label_, false_label_); + break; + case Expression::kValueTest: { + Label discard; + __ ldr(r0, MemOperand(sp)); + TestAndBranch(r0, true_label_, &discard); + __ bind(&discard); + __ add(sp, sp, Operand(kPointerSize)); + __ b(false_label_); + break; + } + case Expression::kTestValue: { + Label discard; + __ ldr(r0, MemOperand(sp)); + TestAndBranch(r0, &discard, false_label_); + __ bind(&discard); + __ add(sp, sp, Operand(kPointerSize)); + __ b(true_label_); + break; + } + } } void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) { + Comment cmnt(masm_, "[ BinaryOperation"); switch (expr->op()) { case Token::COMMA: - ASSERT(expr->left()->location().is_effect()); - ASSERT_EQ(expr->right()->location().type(), expr->location().type()); + ASSERT_EQ(Expression::kEffect, expr->left()->context()); + ASSERT_EQ(expr->context(), expr->right()->context()); Visit(expr->left()); Visit(expr->right()); break; @@ -672,8 +1385,8 @@ void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) { case Token::SHL: case Token::SHR: case Token::SAR: { - ASSERT(expr->left()->location().is_value()); - ASSERT(expr->right()->location().is_value()); + ASSERT_EQ(Expression::kValue, expr->left()->context()); + ASSERT_EQ(Expression::kValue, expr->right()->context()); Visit(expr->left()); Visit(expr->right()); @@ -682,7 +1395,7 @@ void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) { GenericBinaryOpStub stub(expr->op(), NO_OVERWRITE); __ CallStub(&stub); - Move(expr->location(), r0); + Move(expr->context(), r0); break; } @@ -692,52 +1405,170 @@ void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) { } -void FastCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) { - // Compile a short-circuited boolean operation in a non-test context. - - // Compile (e0 || e1) as if it were - // (let (temp = e0) temp ? temp : e1). - // Compile (e0 && e1) as if it were - // (let (temp = e0) !temp ? temp : e1). +void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) { + Comment cmnt(masm_, "[ CompareOperation"); + ASSERT_EQ(Expression::kValue, expr->left()->context()); + ASSERT_EQ(Expression::kValue, expr->right()->context()); + Visit(expr->left()); + Visit(expr->right()); + // Convert current context to test context: Pre-test code. + Label push_true; + Label push_false; Label done; - Location destination = expr->location(); - Expression* left = expr->left(); - Expression* right = expr->right(); - - // Call the runtime to find the boolean value of the left-hand - // subexpression. Duplicate the value if it may be needed as the final - // result. - if (left->AsLiteral() != NULL) { - __ mov(r0, Operand(left->AsLiteral()->handle())); - __ push(r0); - if (destination.is_value()) __ push(r0); - } else { - Visit(left); - ASSERT(left->location().is_value()); - if (destination.is_value()) { - __ ldr(r0, MemOperand(sp)); - __ push(r0); - } + Label* saved_true = true_label_; + Label* saved_false = false_label_; + switch (expr->context()) { + case Expression::kUninitialized: + UNREACHABLE(); + break; + + case Expression::kValue: + true_label_ = &push_true; + false_label_ = &push_false; + break; + + case Expression::kEffect: + true_label_ = &done; + false_label_ = &done; + break; + + case Expression::kTest: + break; + + case Expression::kValueTest: + true_label_ = &push_true; + break; + + case Expression::kTestValue: + false_label_ = &push_false; + break; } - // The left-hand value is in on top of the stack. It is duplicated on the - // stack iff the destination location is value. - __ CallRuntime(Runtime::kToBool, 1); - if (expr->op() == Token::OR) { - __ LoadRoot(ip, Heap::kTrueValueRootIndex); - } else { - __ LoadRoot(ip, Heap::kFalseValueRootIndex); + // Convert current context to test context: End pre-test code. + + switch (expr->op()) { + case Token::IN: { + __ InvokeBuiltin(Builtins::IN, CALL_JS); + __ LoadRoot(ip, Heap::kTrueValueRootIndex); + __ cmp(r0, ip); + __ b(eq, true_label_); + __ jmp(false_label_); + break; + } + + case Token::INSTANCEOF: { + InstanceofStub stub; + __ CallStub(&stub); + __ tst(r0, r0); + __ b(eq, true_label_); // The stub returns 0 for true. + __ jmp(false_label_); + break; + } + + default: { + Condition cc = eq; + bool strict = false; + switch (expr->op()) { + case Token::EQ_STRICT: + strict = true; + // Fall through + case Token::EQ: + cc = eq; + __ pop(r0); + __ pop(r1); + break; + case Token::LT: + cc = lt; + __ pop(r0); + __ pop(r1); + break; + case Token::GT: + // Reverse left and right sizes to obtain ECMA-262 conversion order. + cc = lt; + __ pop(r1); + __ pop(r0); + break; + case Token::LTE: + // Reverse left and right sizes to obtain ECMA-262 conversion order. + cc = ge; + __ pop(r1); + __ pop(r0); + break; + case Token::GTE: + cc = ge; + __ pop(r0); + __ pop(r1); + break; + case Token::IN: + case Token::INSTANCEOF: + default: + UNREACHABLE(); + } + + // The comparison stub expects the smi vs. smi case to be handled + // before it is called. + Label slow_case; + __ orr(r2, r0, Operand(r1)); + __ tst(r2, Operand(kSmiTagMask)); + __ b(ne, &slow_case); + __ cmp(r1, r0); + __ b(cc, true_label_); + __ jmp(false_label_); + + __ bind(&slow_case); + CompareStub stub(cc, strict); + __ CallStub(&stub); + __ tst(r0, r0); + __ b(cc, true_label_); + __ jmp(false_label_); + } } - __ cmp(r0, ip); - __ b(eq, &done); - // Discard the left-hand value if present on the stack. - if (destination.is_value()) __ pop(); - // Save or discard the right-hand value as needed. - Visit(right); - ASSERT_EQ(destination.type(), right->location().type()); + // Convert current context to test context: Post-test code. + switch (expr->context()) { + case Expression::kUninitialized: + UNREACHABLE(); + break; - __ bind(&done); + case Expression::kValue: + __ bind(&push_true); + __ LoadRoot(ip, Heap::kTrueValueRootIndex); + __ push(ip); + __ jmp(&done); + __ bind(&push_false); + __ LoadRoot(ip, Heap::kFalseValueRootIndex); + __ push(ip); + __ bind(&done); + break; + + case Expression::kEffect: + __ bind(&done); + break; + + case Expression::kTest: + break; + + case Expression::kValueTest: + __ bind(&push_true); + __ LoadRoot(ip, Heap::kTrueValueRootIndex); + __ push(ip); + __ jmp(saved_true); + break; + + case Expression::kTestValue: + __ bind(&push_false); + __ LoadRoot(ip, Heap::kFalseValueRootIndex); + __ push(ip); + __ jmp(saved_false); + break; + } + true_label_ = saved_true; + false_label_ = saved_false; + // Convert current context to test context: End post-test code. } + +#undef __ + + } } // namespace v8::internal diff --git a/deps/v8/src/arm/frames-arm.cc b/deps/v8/src/arm/frames-arm.cc index 6fde4b73c0..b0fa13a5a1 100644 --- a/deps/v8/src/arm/frames-arm.cc +++ b/deps/v8/src/arm/frames-arm.cc @@ -54,23 +54,24 @@ StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) { if (fp == 0) return NONE; // Compute frame type and stack pointer. Address sp = fp + ExitFrameConstants::kSPDisplacement; - Type type; - if (Memory::Address_at(fp + ExitFrameConstants::kDebugMarkOffset) != 0) { - type = EXIT_DEBUG; + const int offset = ExitFrameConstants::kCodeOffset; + Object* code = Memory::Object_at(fp + offset); + bool is_debug_exit = code->IsSmi(); + if (is_debug_exit) { sp -= kNumJSCallerSaved * kPointerSize; - } else { - type = EXIT; } // Fill in the state. state->sp = sp; state->fp = fp; state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize); - return type; + return EXIT; } void ExitFrame::Iterate(ObjectVisitor* v) const { - // Do nothing + v->VisitPointer(&code_slot()); + // The arguments are traversed as part of the expression stack of + // the calling frame. } diff --git a/deps/v8/src/arm/frames-arm.h b/deps/v8/src/arm/frames-arm.h index 0874c09274..4924c1aeb9 100644 --- a/deps/v8/src/arm/frames-arm.h +++ b/deps/v8/src/arm/frames-arm.h @@ -100,7 +100,7 @@ class ExitFrameConstants : public AllStatic { static const int kSPDisplacement = -1 * kPointerSize; // The debug marker is just above the frame pointer. - static const int kDebugMarkOffset = -1 * kPointerSize; + static const int kCodeOffset = -1 * kPointerSize; static const int kSavedRegistersOffset = 0 * kPointerSize; diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index 45c6540eeb..a668cb1f71 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -274,9 +274,7 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) { } -void MacroAssembler::EnterExitFrame(StackFrame::Type type) { - ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG); - +void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode) { // Compute the argv pointer and keep it in a callee-saved register. // r0 is argc. add(r6, sp, Operand(r0, LSL, kPointerSizeLog2)); @@ -298,8 +296,11 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) { stm(db_w, sp, fp.bit() | ip.bit() | lr.bit()); mov(fp, Operand(sp)); // setup new frame pointer - // Push debug marker. - mov(ip, Operand(type == StackFrame::EXIT_DEBUG ? 1 : 0)); + if (mode == ExitFrame::MODE_DEBUG) { + mov(ip, Operand(Smi::FromInt(0))); + } else { + mov(ip, Operand(CodeObject())); + } push(ip); // Save the frame pointer and the context in top. @@ -316,7 +317,7 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) { #ifdef ENABLE_DEBUGGER_SUPPORT // Save the state of all registers to the stack from the memory // location. This is needed to allow nested break points. - if (type == StackFrame::EXIT_DEBUG) { + if (mode == ExitFrame::MODE_DEBUG) { // Use sp as base to push. CopyRegistersFromMemoryToStack(sp, kJSCallerSaved); } @@ -348,14 +349,14 @@ void MacroAssembler::AlignStack(int offset) { } -void MacroAssembler::LeaveExitFrame(StackFrame::Type type) { +void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) { #ifdef ENABLE_DEBUGGER_SUPPORT // Restore the memory copy of the registers by digging them out from // the stack. This is needed to allow nested break points. - if (type == StackFrame::EXIT_DEBUG) { + if (mode == ExitFrame::MODE_DEBUG) { // This code intentionally clobbers r2 and r3. const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize; - const int kOffset = ExitFrameConstants::kDebugMarkOffset - kCallerSavedSize; + const int kOffset = ExitFrameConstants::kCodeOffset - kCallerSavedSize; add(r3, fp, Operand(kOffset)); CopyRegistersFromStackToMemory(r3, r2, kJSCallerSaved); } @@ -975,6 +976,17 @@ void MacroAssembler::IllegalOperation(int num_arguments) { } +void MacroAssembler::IntegerToDoubleConversionWithVFP3(Register inReg, + Register outHighReg, + Register outLowReg) { + // ARMv7 VFP3 instructions to implement integer to double conversion. + mov(r7, Operand(inReg, ASR, kSmiTagSize)); + fmsr(s15, r7); + fsitod(d7, s15); + fmrrd(outLowReg, outHighReg, d7); +} + + void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) { // All parameters are on the stack. r0 has the return value after call. diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index e37bb5e1c2..8c247bfbcd 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -87,14 +87,14 @@ class MacroAssembler: public Assembler { void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); } void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); } - // Enter specific kind of exit frame; either EXIT or - // EXIT_DEBUG. Expects the number of arguments in register r0 and + // Enter specific kind of exit frame; either normal or debug mode. + // Expects the number of arguments in register r0 and // the builtin function to call in register r1. Exits with argc in // r4, argv in r6, and and the builtin function to call in r5. - void EnterExitFrame(StackFrame::Type type); + void EnterExitFrame(ExitFrame::Mode mode); // Leave the current exit frame. Expects the return value in r0. - void LeaveExitFrame(StackFrame::Type type); + void LeaveExitFrame(ExitFrame::Mode mode); // Align the stack by optionally pushing a Smi zero. void AlignStack(int offset); @@ -240,6 +240,11 @@ class MacroAssembler: public Assembler { // occurred. void IllegalOperation(int num_arguments); + // Uses VFP instructions to Convert a Smi to a double. + void IntegerToDoubleConversionWithVFP3(Register inReg, + Register outHighReg, + Register outLowReg); + // --------------------------------------------------------------------------- // Runtime calls diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc index bd50428d8b..24b6a9c81a 100644 --- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc +++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc @@ -588,9 +588,9 @@ Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) { Label stack_limit_hit; Label stack_ok; - ExternalReference stack_guard_limit = - ExternalReference::address_of_stack_guard_limit(); - __ mov(r0, Operand(stack_guard_limit)); + ExternalReference stack_limit = + ExternalReference::address_of_stack_limit(); + __ mov(r0, Operand(stack_limit)); __ ldr(r0, MemOperand(r0)); __ sub(r0, sp, r0, SetCC); // Handle it if the stack pointer is already below the stack limit. @@ -1090,9 +1090,9 @@ void RegExpMacroAssemblerARM::Pop(Register target) { void RegExpMacroAssemblerARM::CheckPreemption() { // Check for preemption. - ExternalReference stack_guard_limit = - ExternalReference::address_of_stack_guard_limit(); - __ mov(r0, Operand(stack_guard_limit)); + ExternalReference stack_limit = + ExternalReference::address_of_stack_limit(); + __ mov(r0, Operand(stack_limit)); __ ldr(r0, MemOperand(r0)); __ cmp(sp, r0); SafeCall(&check_preempt_label_, ls); diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc index 22bec82201..9dc417bb71 100644 --- a/deps/v8/src/arm/simulator-arm.cc +++ b/deps/v8/src/arm/simulator-arm.cc @@ -342,6 +342,11 @@ void Debugger::Debug() { PrintF("Z flag: %d; ", sim_->z_flag_); PrintF("C flag: %d; ", sim_->c_flag_); PrintF("V flag: %d\n", sim_->v_flag_); + PrintF("INVALID OP flag: %d; ", sim_->inv_op_vfp_flag_); + PrintF("DIV BY ZERO flag: %d; ", sim_->div_zero_vfp_flag_); + PrintF("OVERFLOW flag: %d; ", sim_->overflow_vfp_flag_); + PrintF("UNDERFLOW flag: %d; ", sim_->underflow_vfp_flag_); + PrintF("INEXACT flag: %d; ", sim_->inexact_vfp_flag_); } else if (strcmp(cmd, "unstop") == 0) { intptr_t stop_pc = sim_->get_pc() - Instr::kInstrSize; Instr* stop_instr = reinterpret_cast<Instr*>(stop_pc); @@ -429,6 +434,24 @@ Simulator::Simulator() { c_flag_ = false; v_flag_ = false; + // Initializing VFP registers. + // All registers are initialized to zero to start with + // even though s_registers_ & d_registers_ share the same + // physical registers in the target. + for (int i = 0; i < num_s_registers; i++) { + vfp_register[i] = 0; + } + n_flag_FPSCR_ = false; + z_flag_FPSCR_ = false; + c_flag_FPSCR_ = false; + v_flag_FPSCR_ = false; + + inv_op_vfp_flag_ = false; + div_zero_vfp_flag_ = false; + overflow_vfp_flag_ = false; + underflow_vfp_flag_ = false; + inexact_vfp_flag_ = false; + // The sp is initialized to point to the bottom (high address) of the // allocated stack area. To be safe in potential stack underflows we leave // some buffer below. @@ -545,6 +568,99 @@ int32_t Simulator::get_pc() const { } +// Getting from and setting into VFP registers. +void Simulator::set_s_register(int sreg, unsigned int value) { + ASSERT((sreg >= 0) && (sreg < num_s_registers)); + vfp_register[sreg] = value; +} + + +unsigned int Simulator::get_s_register(int sreg) const { + ASSERT((sreg >= 0) && (sreg < num_s_registers)); + return vfp_register[sreg]; +} + + +void Simulator::set_s_register_from_float(int sreg, const float flt) { + ASSERT((sreg >= 0) && (sreg < num_s_registers)); + // Read the bits from the single precision floating point value + // into the unsigned integer element of vfp_register[] given by index=sreg. + char buffer[sizeof(vfp_register[0])]; + memcpy(buffer, &flt, sizeof(vfp_register[0])); + memcpy(&vfp_register[sreg], buffer, sizeof(vfp_register[0])); +} + + +void Simulator::set_s_register_from_sinteger(int sreg, const int sint) { + ASSERT((sreg >= 0) && (sreg < num_s_registers)); + // Read the bits from the integer value into the unsigned integer element of + // vfp_register[] given by index=sreg. + char buffer[sizeof(vfp_register[0])]; + memcpy(buffer, &sint, sizeof(vfp_register[0])); + memcpy(&vfp_register[sreg], buffer, sizeof(vfp_register[0])); +} + + +void Simulator::set_d_register_from_double(int dreg, const double& dbl) { + ASSERT((dreg >= 0) && (dreg < num_d_registers)); + // Read the bits from the double precision floating point value into the two + // consecutive unsigned integer elements of vfp_register[] given by index + // 2*sreg and 2*sreg+1. + char buffer[2 * sizeof(vfp_register[0])]; + memcpy(buffer, &dbl, 2 * sizeof(vfp_register[0])); +#ifndef BIG_ENDIAN_FLOATING_POINT + memcpy(&vfp_register[dreg * 2], buffer, 2 * sizeof(vfp_register[0])); +#else + memcpy(&vfp_register[dreg * 2], &buffer[4], sizeof(vfp_register[0])); + memcpy(&vfp_register[dreg * 2 + 1], &buffer[0], sizeof(vfp_register[0])); +#endif +} + + +float Simulator::get_float_from_s_register(int sreg) { + ASSERT((sreg >= 0) && (sreg < num_s_registers)); + + float sm_val = 0.0; + // Read the bits from the unsigned integer vfp_register[] array + // into the single precision floating point value and return it. + char buffer[sizeof(vfp_register[0])]; + memcpy(buffer, &vfp_register[sreg], sizeof(vfp_register[0])); + memcpy(&sm_val, buffer, sizeof(vfp_register[0])); + return(sm_val); +} + + +int Simulator::get_sinteger_from_s_register(int sreg) { + ASSERT((sreg >= 0) && (sreg < num_s_registers)); + + int sm_val = 0; + // Read the bits from the unsigned integer vfp_register[] array + // into the single precision floating point value and return it. + char buffer[sizeof(vfp_register[0])]; + memcpy(buffer, &vfp_register[sreg], sizeof(vfp_register[0])); + memcpy(&sm_val, buffer, sizeof(vfp_register[0])); + return(sm_val); +} + + +double Simulator::get_double_from_d_register(int dreg) { + ASSERT((dreg >= 0) && (dreg < num_d_registers)); + + double dm_val = 0.0; + // Read the bits from the unsigned integer vfp_register[] array + // into the double precision floating point value and return it. + char buffer[2 * sizeof(vfp_register[0])]; +#ifdef BIG_ENDIAN_FLOATING_POINT + memcpy(&buffer[0], &vfp_register[2 * dreg + 1], sizeof(vfp_register[0])); + memcpy(&buffer[4], &vfp_register[2 * dreg], sizeof(vfp_register[0])); +#else + memcpy(buffer, &vfp_register[2 * dreg], 2 * sizeof(vfp_register[0])); +#endif + memcpy(&dm_val, buffer, 2 * sizeof(vfp_register[0])); + return(dm_val); +} + + // For use in calls that take two double values, constructed from r0, r1, r2 // and r3. void Simulator::GetFpArgs(double* x, double* y) { @@ -772,6 +888,37 @@ bool Simulator::OverflowFrom(int32_t alu_out, } +// Support for VFP comparisons. +void Simulator::Compute_FPSCR_Flags(double val1, double val2) { + // All non-NaN cases. + if (val1 == val2) { + n_flag_FPSCR_ = false; + z_flag_FPSCR_ = true; + c_flag_FPSCR_ = true; + v_flag_FPSCR_ = false; + } else if (val1 < val2) { + n_flag_FPSCR_ = true; + z_flag_FPSCR_ = false; + c_flag_FPSCR_ = false; + v_flag_FPSCR_ = false; + } else { + // Case when (val1 > val2). + n_flag_FPSCR_ = false; + z_flag_FPSCR_ = false; + c_flag_FPSCR_ = true; + v_flag_FPSCR_ = false; + } +} + + +void Simulator::Copy_FPSCR_to_APSR() { + n_flag_ = n_flag_FPSCR_; + z_flag_ = z_flag_FPSCR_; + c_flag_ = c_flag_FPSCR_; + v_flag_ = v_flag_FPSCR_; +} + + // Addressing Mode 1 - Data-processing operands: // Get the value based on the shifter_operand with register. int32_t Simulator::GetShiftRm(Instr* instr, bool* carry_out) { @@ -1154,7 +1301,7 @@ void Simulator::DecodeType01(Instr* instr) { } } } else { - UNIMPLEMENTED(); // not used by V8 + UNIMPLEMENTED(); // Not used by V8. } } else { // extra load/store instructions @@ -1664,16 +1811,15 @@ void Simulator::DecodeType5(Instr* instr) { void Simulator::DecodeType6(Instr* instr) { - UNIMPLEMENTED(); + DecodeType6CoprocessorIns(instr); } void Simulator::DecodeType7(Instr* instr) { if (instr->Bit(24) == 1) { - // Format(instr, "swi 'swi"); SoftwareInterrupt(instr); } else { - UNIMPLEMENTED(); + DecodeTypeVFP(instr); } } @@ -1745,6 +1891,177 @@ void Simulator::DecodeUnconditional(Instr* instr) { } +// void Simulator::DecodeTypeVFP(Instr* instr) +// The Following ARMv7 VFPv instructions are currently supported. +// fmsr :Sn = Rt +// fmrs :Rt = Sn +// fsitod: Dd = Sm +// ftosid: Sd = Dm +// Dd = faddd(Dn, Dm) +// Dd = fsubd(Dn, Dm) +// Dd = fmuld(Dn, Dm) +// Dd = fdivd(Dn, Dm) +// vcmp(Dd, Dm) +// VMRS +void Simulator::DecodeTypeVFP(Instr* instr) { + ASSERT((instr->TypeField() == 7) && (instr->Bit(24) == 0x0) ); + + int rt = instr->RtField(); + int vm = instr->VmField(); + int vn = instr->VnField(); + int vd = instr->VdField(); + + if (instr->Bit(23) == 1) { + if ((instr->Bits(21, 19) == 0x7) && + (instr->Bits(18, 16) == 0x5) && + (instr->Bits(11, 9) == 0x5) && + (instr->Bit(8) == 1) && + (instr->Bit(6) == 1) && + (instr->Bit(4) == 0)) { + double dm_val = get_double_from_d_register(vm); + int32_t int_value = static_cast<int32_t>(dm_val); + set_s_register_from_sinteger(((vd<<1) | instr->DField()), int_value); + } else if ((instr->Bits(21, 19) == 0x7) && + (instr->Bits(18, 16) == 0x0) && + (instr->Bits(11, 9) == 0x5) && + (instr->Bit(8) == 1) && + (instr->Bit(7) == 1) && + (instr->Bit(6) == 1) && + (instr->Bit(4) == 0)) { + int32_t int_value = get_sinteger_from_s_register(((vm<<1) | + instr->MField())); + double dbl_value = static_cast<double>(int_value); + set_d_register_from_double(vd, dbl_value); + } else if ((instr->Bit(21) == 0x0) && + (instr->Bit(20) == 0x0) && + (instr->Bits(11, 9) == 0x5) && + (instr->Bit(8) == 1) && + (instr->Bit(6) == 0) && + (instr->Bit(4) == 0)) { + double dn_value = get_double_from_d_register(vn); + double dm_value = get_double_from_d_register(vm); + double dd_value = dn_value / dm_value; + set_d_register_from_double(vd, dd_value); + } else if ((instr->Bits(21, 20) == 0x3) && + (instr->Bits(19, 16) == 0x4) && + (instr->Bits(11, 9) == 0x5) && + (instr->Bit(8) == 0x1) && + (instr->Bit(6) == 0x1) && + (instr->Bit(4) == 0x0)) { + double dd_value = get_double_from_d_register(vd); + double dm_value = get_double_from_d_register(vm); + Compute_FPSCR_Flags(dd_value, dm_value); + } else if ((instr->Bits(23, 20) == 0xF) && + (instr->Bits(19, 16) == 0x1) && + (instr->Bits(11, 8) == 0xA) && + (instr->Bits(7, 5) == 0x0) && + (instr->Bit(4) == 0x1) && + (instr->Bits(3, 0) == 0x0)) { + if (instr->Bits(15, 12) == 0xF) + Copy_FPSCR_to_APSR(); + else + UNIMPLEMENTED(); // Not used by V8. + } else { + UNIMPLEMENTED(); // Not used by V8. + } + } else if (instr->Bit(21) == 1) { + if ((instr->Bit(20) == 0x1) && + (instr->Bits(11, 9) == 0x5) && + (instr->Bit(8) == 0x1) && + (instr->Bit(6) == 0) && + (instr->Bit(4) == 0)) { + double dn_value = get_double_from_d_register(vn); + double dm_value = get_double_from_d_register(vm); + double dd_value = dn_value + dm_value; + set_d_register_from_double(vd, dd_value); + } else if ((instr->Bit(20) == 0x1) && + (instr->Bits(11, 9) == 0x5) && + (instr->Bit(8) == 0x1) && + (instr->Bit(6) == 1) && + (instr->Bit(4) == 0)) { + double dn_value = get_double_from_d_register(vn); + double dm_value = get_double_from_d_register(vm); + double dd_value = dn_value - dm_value; + set_d_register_from_double(vd, dd_value); + } else if ((instr->Bit(20) == 0x0) && + (instr->Bits(11, 9) == 0x5) && + (instr->Bit(8) == 0x1) && + (instr->Bit(6) == 0) && + (instr->Bit(4) == 0)) { + double dn_value = get_double_from_d_register(vn); + double dm_value = get_double_from_d_register(vm); + double dd_value = dn_value * dm_value; + set_d_register_from_double(vd, dd_value); + } else { + UNIMPLEMENTED(); // Not used by V8. + } + } else { + if ((instr->Bit(20) == 0x0) && + (instr->Bits(11, 8) == 0xA) && + (instr->Bits(6, 5) == 0x0) && + (instr->Bit(4) == 1) && + (instr->Bits(3, 0) == 0x0)) { + int32_t rs_val = get_register(rt); + set_s_register_from_sinteger(((vn<<1) | instr->NField()), rs_val); + } else if ((instr->Bit(20) == 0x1) && + (instr->Bits(11, 8) == 0xA) && + (instr->Bits(6, 5) == 0x0) && + (instr->Bit(4) == 1) && + (instr->Bits(3, 0) == 0x0)) { + int32_t int_value = get_sinteger_from_s_register(((vn<<1) | + instr->NField())); + set_register(rt, int_value); + } else { + UNIMPLEMENTED(); // Not used by V8. + } + } +} + + +// void Simulator::DecodeType6CoprocessorIns(Instr* instr) +// Decode Type 6 coprocessor instructions. +// Dm = fmdrr(Rt, Rt2) +// <Rt, Rt2> = fmrrd(Dm) +void Simulator::DecodeType6CoprocessorIns(Instr* instr) { + ASSERT((instr->TypeField() == 6)); + + int rt = instr->RtField(); + int rn = instr->RnField(); + int vm = instr->VmField(); + + if (instr->Bit(23) == 1) { + UNIMPLEMENTED(); + } else if (instr->Bit(22) == 1) { + if ((instr->Bits(27, 24) == 0xC) && + (instr->Bit(22) == 1) && + (instr->Bits(11, 8) == 0xB) && + (instr->Bits(7, 6) == 0x0) && + (instr->Bit(4) == 1)) { + if (instr->Bit(20) == 0) { + int32_t rs_val = get_register(rt); + int32_t rn_val = get_register(rn); + + set_s_register_from_sinteger(2*vm, rs_val); + set_s_register_from_sinteger((2*vm+1), rn_val); + + } else if (instr->Bit(20) == 1) { + int32_t rt_int_value = get_sinteger_from_s_register(2*vm); + int32_t rn_int_value = get_sinteger_from_s_register(2*vm+1); + + set_register(rt, rt_int_value); + set_register(rn, rn_int_value); + } + } else { + UNIMPLEMENTED(); + } + } else if (instr->Bit(21) == 1) { + UNIMPLEMENTED(); + } else { + UNIMPLEMENTED(); + } +} + + // Executes the current instruction. void Simulator::InstructionDecode(Instr* instr) { pc_modified_ = false; @@ -1802,7 +2119,6 @@ void Simulator::InstructionDecode(Instr* instr) { } -// void Simulator::Execute() { // Get the PC to simulate. Cannot use the accessor here as we need the // raw PC value and not the one used as input to arithmetic instructions. @@ -1924,6 +2240,25 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) { return result; } + +uintptr_t Simulator::PushAddress(uintptr_t address) { + int new_sp = get_register(sp) - sizeof(uintptr_t); + uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp); + *stack_slot = address; + set_register(sp, new_sp); + return new_sp; +} + + +uintptr_t Simulator::PopAddress() { + int current_sp = get_register(sp); + uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp); + uintptr_t address = *stack_slot; + set_register(sp, current_sp + sizeof(uintptr_t)); + return address; +} + + } } // namespace assembler::arm #endif // !defined(__arm__) diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h index ff6bbf4302..3a4bb311be 100644 --- a/deps/v8/src/arm/simulator-arm.h +++ b/deps/v8/src/arm/simulator-arm.h @@ -52,6 +52,12 @@ class SimulatorStack : public v8::internal::AllStatic { static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) { return c_limit; } + + static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) { + return try_catch_address; + } + + static inline void UnregisterCTryCatch() { } }; @@ -60,6 +66,10 @@ class SimulatorStack : public v8::internal::AllStatic { #define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \ entry(p0, p1, p2, p3, p4, p5, p6) +#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ + reinterpret_cast<TryCatch*>(try_catch_address) + + #else // defined(__arm__) // When running with the simulator transition into simulated execution at this @@ -73,6 +83,11 @@ class SimulatorStack : public v8::internal::AllStatic { assembler::arm::Simulator::current()->Call( \ FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6) +#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ + try_catch_address == NULL ? \ + NULL : *(reinterpret_cast<TryCatch**>(try_catch_address)) + + #include "constants-arm.h" @@ -82,7 +97,6 @@ namespace arm { class Simulator { public: friend class Debugger; - enum Register { no_reg = -1, r0 = 0, r1, r2, r3, r4, r5, r6, r7, @@ -90,7 +104,15 @@ class Simulator { num_registers, sp = 13, lr = 14, - pc = 15 + pc = 15, + s0 = 0, s1, s2, s3, s4, s5, s6, s7, + s8, s9, s10, s11, s12, s13, s14, s15, + s16, s17, s18, s19, s20, s21, s22, s23, + s24, s25, s26, s27, s28, s29, s30, s31, + num_s_registers = 32, + d0 = 0, d1, d2, d3, d4, d5, d6, d7, + d8, d9, d10, d11, d12, d13, d14, d15, + num_d_registers = 16 }; Simulator(); @@ -106,6 +128,16 @@ class Simulator { void set_register(int reg, int32_t value); int32_t get_register(int reg) const; + // Support for VFP. + void set_s_register(int reg, unsigned int value); + unsigned int get_s_register(int reg) const; + void set_d_register_from_double(int dreg, const double& dbl); + double get_double_from_d_register(int dreg); + void set_s_register_from_float(int sreg, const float dbl); + float get_float_from_s_register(int sreg); + void set_s_register_from_sinteger(int reg, const int value); + int get_sinteger_from_s_register(int reg); + // Special case of set_register and get_register to access the raw PC value. void set_pc(int32_t value); int32_t get_pc() const; @@ -124,6 +156,12 @@ class Simulator { // which sets up the simulator state and grabs the result on return. int32_t Call(byte* entry, int argument_count, ...); + // Push an address onto the JS stack. + uintptr_t PushAddress(uintptr_t address); + + // Pop an address from the JS stack. + uintptr_t PopAddress(); + private: enum special_values { // Known bad pc value to ensure that the simulator does not execute @@ -154,6 +192,10 @@ class Simulator { int32_t right, bool addition); + // Support for VFP. + void Compute_FPSCR_Flags(double val1, double val2); + void Copy_FPSCR_to_APSR(); + // Helper functions to decode common "addressing" modes int32_t GetShiftRm(Instr* instr, bool* carry_out); int32_t GetImm(Instr* instr, bool* carry_out); @@ -185,6 +227,10 @@ class Simulator { void DecodeType7(Instr* instr); void DecodeUnconditional(Instr* instr); + // Support for VFP. + void DecodeTypeVFP(Instr* instr); + void DecodeType6CoprocessorIns(Instr* instr); + // Executes one instruction. void InstructionDecode(Instr* instr); @@ -198,20 +244,34 @@ class Simulator { void SetFpResult(const double& result); void TrashCallerSaveRegisters(); - // architecture state + // Architecture state. int32_t registers_[16]; bool n_flag_; bool z_flag_; bool c_flag_; bool v_flag_; - // simulator support + // VFP architecture state. + unsigned int vfp_register[num_s_registers]; + bool n_flag_FPSCR_; + bool z_flag_FPSCR_; + bool c_flag_FPSCR_; + bool v_flag_FPSCR_; + + // VFP FP exception flags architecture state. + bool inv_op_vfp_flag_; + bool div_zero_vfp_flag_; + bool overflow_vfp_flag_; + bool underflow_vfp_flag_; + bool inexact_vfp_flag_; + + // Simulator support. char* stack_; bool pc_modified_; int icount_; static bool initialized_; - // registered breakpoints + // Registered breakpoints. Instr* break_pc_; instr_t break_instr_; }; @@ -229,6 +289,15 @@ class SimulatorStack : public v8::internal::AllStatic { static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) { return assembler::arm::Simulator::current()->StackLimit(); } + + static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) { + assembler::arm::Simulator* sim = assembler::arm::Simulator::current(); + return sim->PushAddress(try_catch_address); + } + + static inline void UnregisterCTryCatch() { + assembler::arm::Simulator::current()->PopAddress(); + } }; diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc index 34595f83ff..9c9ddcdda6 100644 --- a/deps/v8/src/assembler.cc +++ b/deps/v8/src/assembler.cc @@ -174,14 +174,14 @@ void RelocInfoWriter::WriteTaggedPC(uint32_t pc_delta, int tag) { void RelocInfoWriter::WriteTaggedData(intptr_t data_delta, int tag) { - *--pos_ = data_delta << kPositionTypeTagBits | tag; + *--pos_ = static_cast<byte>(data_delta << kPositionTypeTagBits | tag); } void RelocInfoWriter::WriteExtraTag(int extra_tag, int top_tag) { - *--pos_ = top_tag << (kTagBits + kExtraTagBits) | - extra_tag << kTagBits | - kDefaultTag; + *--pos_ = static_cast<int>(top_tag << (kTagBits + kExtraTagBits) | + extra_tag << kTagBits | + kDefaultTag); } @@ -196,7 +196,7 @@ void RelocInfoWriter::WriteExtraTaggedPC(uint32_t pc_delta, int extra_tag) { void RelocInfoWriter::WriteExtraTaggedData(intptr_t data_delta, int top_tag) { WriteExtraTag(kDataJumpTag, top_tag); for (int i = 0; i < kIntptrSize; i++) { - *--pos_ = data_delta; + *--pos_ = static_cast<byte>(data_delta); // Signed right shift is arithmetic shift. Tested in test-utils.cc. data_delta = data_delta >> kBitsPerByte; } @@ -211,7 +211,7 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) { ASSERT(rinfo->pc() - last_pc_ >= 0); ASSERT(RelocInfo::NUMBER_OF_MODES < kMaxRelocModes); // Use unsigned delta-encoding for pc. - uint32_t pc_delta = rinfo->pc() - last_pc_; + uint32_t pc_delta = static_cast<uint32_t>(rinfo->pc() - last_pc_); RelocInfo::Mode rmode = rinfo->rmode(); // The two most common modes are given small tags, and usually fit in a byte. @@ -522,6 +522,10 @@ ExternalReference::ExternalReference(Builtins::CFunctionId id) : address_(Redirect(Builtins::c_function_address(id))) {} +ExternalReference::ExternalReference(ApiFunction* fun) + : address_(Redirect(fun->address())) {} + + ExternalReference::ExternalReference(Builtins::Name name) : address_(Builtins::builtin_address(name)) {} @@ -579,11 +583,16 @@ ExternalReference ExternalReference::roots_address() { } -ExternalReference ExternalReference::address_of_stack_guard_limit() { +ExternalReference ExternalReference::address_of_stack_limit() { return ExternalReference(StackGuard::address_of_jslimit()); } +ExternalReference ExternalReference::address_of_real_stack_limit() { + return ExternalReference(StackGuard::address_of_real_jslimit()); +} + + ExternalReference ExternalReference::address_of_regexp_stack_limit() { return ExternalReference(RegExpStack::limit_address()); } @@ -608,6 +617,27 @@ ExternalReference ExternalReference::new_space_allocation_limit_address() { return ExternalReference(Heap::NewSpaceAllocationLimitAddress()); } + +ExternalReference ExternalReference::handle_scope_extensions_address() { + return ExternalReference(HandleScope::current_extensions_address()); +} + + +ExternalReference ExternalReference::handle_scope_next_address() { + return ExternalReference(HandleScope::current_next_address()); +} + + +ExternalReference ExternalReference::handle_scope_limit_address() { + return ExternalReference(HandleScope::current_limit_address()); +} + + +ExternalReference ExternalReference::scheduled_exception_address() { + return ExternalReference(Top::scheduled_exception_address()); +} + + #ifdef V8_NATIVE_REGEXP ExternalReference ExternalReference::re_check_stack_guard_state() { diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h index 21a66dd501..aecd4cd63a 100644 --- a/deps/v8/src/assembler.h +++ b/deps/v8/src/assembler.h @@ -373,6 +373,8 @@ class ExternalReference BASE_EMBEDDED { public: explicit ExternalReference(Builtins::CFunctionId id); + explicit ExternalReference(ApiFunction* ptr); + explicit ExternalReference(Builtins::Name name); explicit ExternalReference(Runtime::FunctionId id); @@ -406,7 +408,10 @@ class ExternalReference BASE_EMBEDDED { static ExternalReference roots_address(); // Static variable StackGuard::address_of_jslimit() - static ExternalReference address_of_stack_guard_limit(); + static ExternalReference address_of_stack_limit(); + + // Static variable StackGuard::address_of_real_jslimit() + static ExternalReference address_of_real_stack_limit(); // Static variable RegExpStack::limit_address() static ExternalReference address_of_regexp_stack_limit(); @@ -422,6 +427,12 @@ class ExternalReference BASE_EMBEDDED { static ExternalReference double_fp_operation(Token::Value operation); static ExternalReference compare_doubles(); + static ExternalReference handle_scope_extensions_address(); + static ExternalReference handle_scope_next_address(); + static ExternalReference handle_scope_limit_address(); + + static ExternalReference scheduled_exception_address(); + Address address() const {return reinterpret_cast<Address>(address_);} #ifdef ENABLE_DEBUGGER_SUPPORT @@ -460,12 +471,16 @@ class ExternalReference BASE_EMBEDDED { static void* Redirect(void* address, bool fp_return = false) { if (redirector_ == NULL) return address; - return (*redirector_)(address, fp_return); + void* answer = (*redirector_)(address, fp_return); + return answer; } static void* Redirect(Address address_arg, bool fp_return = false) { void* address = reinterpret_cast<void*>(address_arg); - return redirector_ == NULL ? address : (*redirector_)(address, fp_return); + void* answer = (redirector_ == NULL) ? + address : + (*redirector_)(address, fp_return); + return answer; } void* address_; diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h index be64dcb3c6..560470f7e7 100644 --- a/deps/v8/src/ast.h +++ b/deps/v8/src/ast.h @@ -28,7 +28,6 @@ #ifndef V8_AST_H_ #define V8_AST_H_ -#include "location.h" #include "execution.h" #include "factory.h" #include "jsregexp.h" @@ -162,7 +161,25 @@ class Statement: public AstNode { class Expression: public AstNode { public: - Expression() : location_(Location::Uninitialized()) {} + enum Context { + // Not assigned a context yet, or else will not be visited during + // code generation. + kUninitialized, + // Evaluated for its side effects. + kEffect, + // Evaluated for its value (and side effects). + kValue, + // Evaluated for control flow (and side effects). + kTest, + // Evaluated for control flow and side effects. Value is also + // needed if true. + kValueTest, + // Evaluated for control flow and side effects. Value is also + // needed if false. + kTestValue + }; + + Expression() : context_(kUninitialized) {} virtual Expression* AsExpression() { return this; } @@ -177,12 +194,12 @@ class Expression: public AstNode { // Static type information for this expression. SmiAnalysis* type() { return &type_; } - Location location() { return location_; } - void set_location(Location loc) { location_ = loc; } + Context context() { return context_; } + void set_context(Context context) { context_ = context; } private: SmiAnalysis type_; - Location location_; + Context context_; }; @@ -305,7 +322,7 @@ class IterationStatement: public BreakableStatement { class DoWhileStatement: public IterationStatement { public: explicit DoWhileStatement(ZoneStringList* labels) - : IterationStatement(labels), cond_(NULL) { + : IterationStatement(labels), cond_(NULL), condition_position_(-1) { } void Initialize(Expression* cond, Statement* body) { @@ -317,8 +334,14 @@ class DoWhileStatement: public IterationStatement { Expression* cond() const { return cond_; } + // Position where condition expression starts. We need it to make + // the loop's condition a breakable location. + int condition_position() { return condition_position_; } + void set_condition_position(int pos) { condition_position_ = pos; } + private: Expression* cond_; + int condition_position_; }; @@ -935,11 +958,7 @@ class Slot: public Expression { // variable name in the context object on the heap, // with lookup starting at the current context. index() // is invalid. - LOOKUP, - - // A property in the global object. var()->name() is - // the property name. - GLOBAL + LOOKUP }; Slot(Variable* var, Type type, int index) @@ -1263,7 +1282,6 @@ class FunctionLiteral: public Expression { ZoneList<Statement*>* body, int materialized_literal_count, int expected_property_count, - bool has_only_this_property_assignments, bool has_only_simple_this_property_assignments, Handle<FixedArray> this_property_assignments, int num_parameters, @@ -1275,7 +1293,6 @@ class FunctionLiteral: public Expression { body_(body), materialized_literal_count_(materialized_literal_count), expected_property_count_(expected_property_count), - has_only_this_property_assignments_(has_only_this_property_assignments), has_only_simple_this_property_assignments_( has_only_simple_this_property_assignments), this_property_assignments_(this_property_assignments), @@ -1285,7 +1302,8 @@ class FunctionLiteral: public Expression { is_expression_(is_expression), loop_nesting_(0), function_token_position_(RelocInfo::kNoPosition), - inferred_name_(Heap::empty_string()) { + inferred_name_(Heap::empty_string()), + try_fast_codegen_(false) { #ifdef DEBUG already_compiled_ = false; #endif @@ -1307,9 +1325,6 @@ class FunctionLiteral: public Expression { int materialized_literal_count() { return materialized_literal_count_; } int expected_property_count() { return expected_property_count_; } - bool has_only_this_property_assignments() { - return has_only_this_property_assignments_; - } bool has_only_simple_this_property_assignments() { return has_only_simple_this_property_assignments_; } @@ -1328,6 +1343,9 @@ class FunctionLiteral: public Expression { inferred_name_ = inferred_name; } + bool try_fast_codegen() { return try_fast_codegen_; } + void set_try_fast_codegen(bool flag) { try_fast_codegen_ = flag; } + #ifdef DEBUG void mark_as_compiled() { ASSERT(!already_compiled_); @@ -1341,7 +1359,6 @@ class FunctionLiteral: public Expression { ZoneList<Statement*>* body_; int materialized_literal_count_; int expected_property_count_; - bool has_only_this_property_assignments_; bool has_only_simple_this_property_assignments_; Handle<FixedArray> this_property_assignments_; int num_parameters_; @@ -1351,6 +1368,7 @@ class FunctionLiteral: public Expression { int loop_nesting_; int function_token_position_; Handle<String> inferred_name_; + bool try_fast_codegen_; #ifdef DEBUG bool already_compiled_; #endif diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc index 3436b505dc..4491962163 100644 --- a/deps/v8/src/bootstrapper.cc +++ b/deps/v8/src/bootstrapper.cc @@ -36,6 +36,7 @@ #include "global-handles.h" #include "macro-assembler.h" #include "natives.h" +#include "snapshot.h" namespace v8 { namespace internal { @@ -92,14 +93,39 @@ class SourceCodeCache BASE_EMBEDDED { static SourceCodeCache natives_cache(Script::TYPE_NATIVE); static SourceCodeCache extensions_cache(Script::TYPE_EXTENSION); +// This is for delete, not delete[]. +static List<char*>* delete_these_non_arrays_on_tear_down = NULL; + + +NativesExternalStringResource::NativesExternalStringResource(const char* source) + : data_(source), length_(StrLength(source)) { + if (delete_these_non_arrays_on_tear_down == NULL) { + delete_these_non_arrays_on_tear_down = new List<char*>(2); + } + // The resources are small objects and we only make a fixed number of + // them, but let's clean them up on exit for neatness. + delete_these_non_arrays_on_tear_down-> + Add(reinterpret_cast<char*>(this)); +} Handle<String> Bootstrapper::NativesSourceLookup(int index) { ASSERT(0 <= index && index < Natives::GetBuiltinsCount()); if (Heap::natives_source_cache()->get(index)->IsUndefined()) { - Handle<String> source_code = - Factory::NewStringFromAscii(Natives::GetScriptSource(index)); - Heap::natives_source_cache()->set(index, *source_code); + if (!Snapshot::IsEnabled() || FLAG_new_snapshot) { + // We can use external strings for the natives. + NativesExternalStringResource* resource = + new NativesExternalStringResource( + Natives::GetScriptSource(index).start()); + Handle<String> source_code = + Factory::NewExternalStringFromAscii(resource); + Heap::natives_source_cache()->set(index, *source_code); + } else { + // Old snapshot code can't cope with external strings at all. + Handle<String> source_code = + Factory::NewStringFromAscii(Natives::GetScriptSource(index)); + Heap::natives_source_cache()->set(index, *source_code); + } } Handle<Object> cached_source(Heap::natives_source_cache()->get(index)); return Handle<String>::cast(cached_source); @@ -125,6 +151,16 @@ void Bootstrapper::Initialize(bool create_heap_objects) { void Bootstrapper::TearDown() { + if (delete_these_non_arrays_on_tear_down != NULL) { + int len = delete_these_non_arrays_on_tear_down->length(); + ASSERT(len < 20); // Don't use this mechanism for unbounded allocations. + for (int i = 0; i < len; i++) { + delete delete_these_non_arrays_on_tear_down->at(i); + } + delete delete_these_non_arrays_on_tear_down; + delete_these_non_arrays_on_tear_down = NULL; + } + natives_cache.Initialize(false); // Yes, symmetrical extensions_cache.Initialize(false); } diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h index 15fc88dc06..07d2747b44 100644 --- a/deps/v8/src/bootstrapper.h +++ b/deps/v8/src/bootstrapper.h @@ -76,6 +76,24 @@ class Bootstrapper : public AllStatic { static void FreeThreadResources(); }; + +class NativesExternalStringResource + : public v8::String::ExternalAsciiStringResource { + public: + explicit NativesExternalStringResource(const char* source); + + const char* data() const { + return data_; + } + + size_t length() const { + return length_; + } + private: + const char* data_; + size_t length_; +}; + }} // namespace v8::internal #endif // V8_BOOTSTRAPPER_H_ diff --git a/deps/v8/src/checks.cc b/deps/v8/src/checks.cc index f8a2f24f64..b5df316d0f 100644 --- a/deps/v8/src/checks.cc +++ b/deps/v8/src/checks.cc @@ -36,6 +36,8 @@ static int fatal_error_handler_nesting_depth = 0; // Contains protection against recursive calls (faults while handling faults). extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) { + fflush(stdout); + fflush(stderr); fatal_error_handler_nesting_depth++; // First time we try to print an error message if (fatal_error_handler_nesting_depth < 2) { diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc index 73ff0115f2..dbc39ff3bf 100644 --- a/deps/v8/src/code-stubs.cc +++ b/deps/v8/src/code-stubs.cc @@ -36,10 +36,27 @@ namespace v8 { namespace internal { Handle<Code> CodeStub::GetCode() { - uint32_t key = GetKey(); - int index = Heap::code_stubs()->FindEntry(key); - if (index == NumberDictionary::kNotFound) { - HandleScope scope; + bool custom_cache = has_custom_cache(); + + int index = 0; + uint32_t key = 0; + if (custom_cache) { + Code* cached; + if (GetCustomCache(&cached)) { + return Handle<Code>(cached); + } else { + index = NumberDictionary::kNotFound; + } + } else { + key = GetKey(); + index = Heap::code_stubs()->FindEntry(key); + if (index != NumberDictionary::kNotFound) + return Handle<Code>(Code::cast(Heap::code_stubs()->ValueAt(index))); + } + + Code* result; + { + v8::HandleScope scope; // Update the static counter each time a new code stub is generated. Counters::code_stubs.Increment(); @@ -79,25 +96,28 @@ Handle<Code> CodeStub::GetCode() { } #endif - // Update the dictionary and the root in Heap. - Handle<NumberDictionary> dict = - Factory::DictionaryAtNumberPut( - Handle<NumberDictionary>(Heap::code_stubs()), - key, - code); - Heap::public_set_code_stubs(*dict); - index = Heap::code_stubs()->FindEntry(key); + if (custom_cache) { + SetCustomCache(*code); + } else { + // Update the dictionary and the root in Heap. + Handle<NumberDictionary> dict = + Factory::DictionaryAtNumberPut( + Handle<NumberDictionary>(Heap::code_stubs()), + key, + code); + Heap::public_set_code_stubs(*dict); + } + result = *code; } - ASSERT(index != NumberDictionary::kNotFound); - return Handle<Code>(Code::cast(Heap::code_stubs()->ValueAt(index))); + return Handle<Code>(result); } const char* CodeStub::MajorName(CodeStub::Major major_key) { switch (major_key) { #define DEF_CASE(name) case name: return #name; - CODE_STUB_LIST_ALL(DEF_CASE) + CODE_STUB_LIST(DEF_CASE) #undef DEF_CASE default: UNREACHABLE(); diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h index 121140d1ba..6c50c6db0e 100644 --- a/deps/v8/src/code-stubs.h +++ b/deps/v8/src/code-stubs.h @@ -33,23 +33,23 @@ namespace internal { // List of code stubs used on all platforms. The order in this list is important // as only the stubs up to and including RecordWrite allows nested stub calls. -#define CODE_STUB_LIST_ALL(V) \ - V(CallFunction) \ - V(GenericBinaryOp) \ - V(SmiOp) \ - V(Compare) \ - V(RecordWrite) \ - V(ConvertToDouble) \ - V(WriteInt32ToHeapNumber) \ - V(StackCheck) \ - V(UnarySub) \ - V(RevertToNumber) \ - V(ToBoolean) \ - V(Instanceof) \ - V(CounterOp) \ - V(ArgumentsAccess) \ - V(Runtime) \ - V(CEntry) \ +#define CODE_STUB_LIST_ALL_PLATFORMS(V) \ + V(CallFunction) \ + V(GenericBinaryOp) \ + V(SmiOp) \ + V(Compare) \ + V(RecordWrite) \ + V(ConvertToDouble) \ + V(WriteInt32ToHeapNumber) \ + V(StackCheck) \ + V(UnarySub) \ + V(RevertToNumber) \ + V(ToBoolean) \ + V(Instanceof) \ + V(CounterOp) \ + V(ArgumentsAccess) \ + V(Runtime) \ + V(CEntry) \ V(JSEntry) // List of code stubs only used on ARM platforms. @@ -64,8 +64,8 @@ namespace internal { #endif // Combined list of code stubs. -#define CODE_STUB_LIST(V) \ - CODE_STUB_LIST_ALL(V) \ +#define CODE_STUB_LIST(V) \ + CODE_STUB_LIST_ALL_PLATFORMS(V) \ CODE_STUB_LIST_ARM(V) // Stub is base classes of all stubs. @@ -75,6 +75,7 @@ class CodeStub BASE_EMBEDDED { #define DEF_ENUM(name) name, CODE_STUB_LIST(DEF_ENUM) #undef DEF_ENUM + NoCache, // marker for stubs that do custom caching NUMBER_OF_IDS }; @@ -91,6 +92,12 @@ class CodeStub BASE_EMBEDDED { virtual ~CodeStub() {} + // Override these methods to provide a custom caching mechanism for + // an individual type of code stub. + virtual bool GetCustomCache(Code** code_out) { return false; } + virtual void SetCustomCache(Code* value) { } + virtual bool has_custom_cache() { return false; } + protected: static const int kMajorBits = 5; static const int kMinorBits = kBitsPerInt - kSmiTagSize - kMajorBits; diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc index 28c0ba5f9e..6917d459c7 100644 --- a/deps/v8/src/codegen.cc +++ b/deps/v8/src/codegen.cc @@ -29,6 +29,7 @@ #include "bootstrapper.h" #include "codegen-inl.h" +#include "compiler.h" #include "debug.h" #include "oprofile-agent.h" #include "prettyprinter.h" @@ -250,98 +251,6 @@ bool CodeGenerator::ShouldGenerateLog(Expression* type) { #endif -// Sets the function info on a function. -// The start_position points to the first '(' character after the function name -// in the full script source. When counting characters in the script source the -// the first character is number 0 (not 1). -void CodeGenerator::SetFunctionInfo(Handle<JSFunction> fun, - FunctionLiteral* lit, - bool is_toplevel, - Handle<Script> script) { - fun->shared()->set_length(lit->num_parameters()); - fun->shared()->set_formal_parameter_count(lit->num_parameters()); - fun->shared()->set_script(*script); - fun->shared()->set_function_token_position(lit->function_token_position()); - fun->shared()->set_start_position(lit->start_position()); - fun->shared()->set_end_position(lit->end_position()); - fun->shared()->set_is_expression(lit->is_expression()); - fun->shared()->set_is_toplevel(is_toplevel); - fun->shared()->set_inferred_name(*lit->inferred_name()); - fun->shared()->SetThisPropertyAssignmentsInfo( - lit->has_only_this_property_assignments(), - lit->has_only_simple_this_property_assignments(), - *lit->this_property_assignments()); -} - - -Handle<Code> CodeGenerator::ComputeLazyCompile(int argc) { - CALL_HEAP_FUNCTION(StubCache::ComputeLazyCompile(argc), Code); -} - - -Handle<JSFunction> CodeGenerator::BuildBoilerplate(FunctionLiteral* node) { -#ifdef DEBUG - // We should not try to compile the same function literal more than - // once. - node->mark_as_compiled(); -#endif - - // Determine if the function can be lazily compiled. This is - // necessary to allow some of our builtin JS files to be lazily - // compiled. These builtins cannot be handled lazily by the parser, - // since we have to know if a function uses the special natives - // syntax, which is something the parser records. - bool allow_lazy = node->AllowsLazyCompilation(); - - // Generate code - Handle<Code> code; - if (FLAG_lazy && allow_lazy) { - code = ComputeLazyCompile(node->num_parameters()); - } else { - // The bodies of function literals have not yet been visited by - // the AST optimizer/analyzer. - if (!Rewriter::Optimize(node)) { - return Handle<JSFunction>::null(); - } - - code = MakeCode(node, script_, false); - - // Check for stack-overflow exception. - if (code.is_null()) { - SetStackOverflow(); - return Handle<JSFunction>::null(); - } - - // Function compilation complete. - LOG(CodeCreateEvent(Logger::FUNCTION_TAG, *code, *node->name())); - -#ifdef ENABLE_OPROFILE_AGENT - OProfileAgent::CreateNativeCodeRegion(*node->name(), - code->instruction_start(), - code->instruction_size()); -#endif - } - - // Create a boilerplate function. - Handle<JSFunction> function = - Factory::NewFunctionBoilerplate(node->name(), - node->materialized_literal_count(), - code); - CodeGenerator::SetFunctionInfo(function, node, false, script_); - -#ifdef ENABLE_DEBUGGER_SUPPORT - // Notify debugger that a new function has been added. - Debugger::OnNewFunction(function); -#endif - - // Set the expected number of properties for instances and return - // the resulting function. - SetExpectedNofPropertiesFromEstimate(function, - node->expected_property_count()); - return function; -} - - Handle<Code> CodeGenerator::ComputeCallInitialize( int argc, InLoopFlag in_loop) { @@ -398,7 +307,8 @@ void CodeGenerator::ProcessDeclarations(ZoneList<Declaration*>* declarations) { array->set_undefined(j++); } } else { - Handle<JSFunction> function = BuildBoilerplate(node->fun()); + Handle<JSFunction> function = + Compiler::BuildBoilerplate(node->fun(), script(), this); // Check for stack-overflow exception. if (HasStackOverflow()) return; array->set(j++, *function); @@ -521,6 +431,9 @@ void CodeGenerator::CodeForStatementPosition(Statement* stmt) { if (FLAG_debug_info) RecordPositions(masm(), stmt->statement_pos()); } +void CodeGenerator::CodeForDoWhileConditionPosition(DoWhileStatement* stmt) { + if (FLAG_debug_info) RecordPositions(masm(), stmt->condition_position()); +} void CodeGenerator::CodeForSourcePosition(int pos) { if (FLAG_debug_info && pos != RelocInfo::kNoPosition) { @@ -551,4 +464,20 @@ void ArgumentsAccessStub::Generate(MacroAssembler* masm) { } +bool ApiGetterEntryStub::GetCustomCache(Code** code_out) { + Object* cache = info()->load_stub_cache(); + if (cache->IsUndefined()) { + return false; + } else { + *code_out = Code::cast(cache); + return true; + } +} + + +void ApiGetterEntryStub::SetCustomCache(Code* value) { + info()->set_load_stub_cache(value); +} + + } } // namespace v8::internal diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h index 8c1b733675..85a08d59e5 100644 --- a/deps/v8/src/codegen.h +++ b/deps/v8/src/codegen.h @@ -38,9 +38,9 @@ // MakeCode // MakeCodePrologue // MakeCodeEpilogue -// SetFunctionInfo // masm // frame +// script // has_valid_frame // SetFrame // DeleteFrame @@ -69,6 +69,7 @@ // CodeForFunctionPosition // CodeForReturnPosition // CodeForStatementPosition +// CodeForDoWhileConditionPosition // CodeForSourcePosition @@ -301,7 +302,7 @@ class CEntryStub : public CodeStub { Label* throw_normal_exception, Label* throw_termination_exception, Label* throw_out_of_memory_exception, - StackFrame::Type frame_type, + ExitFrame::Mode mode, bool do_gc, bool always_allocate_scope); void GenerateThrowTOS(MacroAssembler* masm); @@ -320,6 +321,32 @@ class CEntryStub : public CodeStub { }; +class ApiGetterEntryStub : public CodeStub { + public: + ApiGetterEntryStub(Handle<AccessorInfo> info, + ApiFunction* fun) + : info_(info), + fun_(fun) { } + void Generate(MacroAssembler* masm); + virtual bool has_custom_cache() { return true; } + virtual bool GetCustomCache(Code** code_out); + virtual void SetCustomCache(Code* value); + + static const int kStackSpace = 6; + static const int kArgc = 4; + private: + Handle<AccessorInfo> info() { return info_; } + ApiFunction* fun() { return fun_; } + Major MajorKey() { return NoCache; } + int MinorKey() { return 0; } + const char* GetName() { return "ApiEntryStub"; } + // The accessor info associated with the function. + Handle<AccessorInfo> info_; + // The function to be called. + ApiFunction* fun_; +}; + + class CEntryDebugBreakStub : public CEntryStub { public: CEntryDebugBreakStub() : CEntryStub(1) { } diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc index 129f1aac15..4e80a24502 100644 --- a/deps/v8/src/compiler.cc +++ b/deps/v8/src/compiler.cc @@ -48,23 +48,24 @@ class CodeGenSelector: public AstVisitor { CodeGenSelector() : has_supported_syntax_(true), - location_(Location::Uninitialized()) { + context_(Expression::kUninitialized) { } CodeGenTag Select(FunctionLiteral* fun); private: + // Visit an expression in a given expression context. + void ProcessExpression(Expression* expr, Expression::Context context) { + Expression::Context saved = context_; + context_ = context; + Visit(expr); + expr->set_context(context); + context_ = saved; + } + void VisitDeclarations(ZoneList<Declaration*>* decls); void VisitStatements(ZoneList<Statement*>* stmts); - // Visit an expression in effect context with a desired location of - // nowhere. - void VisitAsEffect(Expression* expr); - - // Visit an expression in value context with a desired location of - // temporary. - void VisitAsValue(Expression* expr); - // AST node visit functions. #define DECLARE_VISIT(type) virtual void Visit##type(type* node); AST_NODE_LIST(DECLARE_VISIT) @@ -72,8 +73,8 @@ class CodeGenSelector: public AstVisitor { bool has_supported_syntax_; - // The desired location of the currently visited expression. - Location location_; + // The desired expression context of the currently visited expression. + Expression::Context context_; DISALLOW_COPY_AND_ASSIGN(CodeGenSelector); }; @@ -82,7 +83,8 @@ class CodeGenSelector: public AstVisitor { static Handle<Code> MakeCode(FunctionLiteral* literal, Handle<Script> script, Handle<Context> context, - bool is_eval) { + bool is_eval, + Handle<SharedFunctionInfo> shared) { ASSERT(literal != NULL); // Rewrite the AST by introducing .result assignments where needed. @@ -119,12 +121,21 @@ static Handle<Code> MakeCode(FunctionLiteral* literal, // Generate code and return it. if (FLAG_fast_compiler) { - CodeGenSelector selector; - CodeGenSelector::CodeGenTag code_gen = selector.Select(literal); - if (code_gen == CodeGenSelector::FAST) { - return FastCodeGenerator::MakeCode(literal, script, is_eval); + // If there is no shared function info, try the fast code + // generator for code in the global scope. Otherwise obey the + // explicit hint in the shared function info. + if (shared.is_null() && !literal->scope()->is_global_scope()) { + if (FLAG_trace_bailout) PrintF("Non-global scope\n"); + } else if (!shared.is_null() && !shared->try_fast_codegen()) { + if (FLAG_trace_bailout) PrintF("No hint to try fast\n"); + } else { + CodeGenSelector selector; + CodeGenSelector::CodeGenTag code_gen = selector.Select(literal); + if (code_gen == CodeGenSelector::FAST) { + return FastCodeGenerator::MakeCode(literal, script, is_eval); + } + ASSERT(code_gen == CodeGenSelector::NORMAL); } - ASSERT(code_gen == CodeGenSelector::NORMAL); } return CodeGenerator::MakeCode(literal, script, is_eval); } @@ -166,7 +177,8 @@ static Handle<JSFunction> MakeFunction(bool is_global, if (is_eval) { JavaScriptFrameIterator it; script->set_eval_from_function(it.frame()->function()); - int offset = it.frame()->pc() - it.frame()->code()->instruction_start(); + int offset = static_cast<int>( + it.frame()->pc() - it.frame()->code()->instruction_start()); script->set_eval_from_instructions_offset(Smi::FromInt(offset)); } } @@ -209,7 +221,8 @@ static Handle<JSFunction> MakeFunction(bool is_global, HistogramTimerScope timer(rate); // Compile the code. - Handle<Code> code = MakeCode(lit, script, context, is_eval); + Handle<Code> code = MakeCode(lit, script, context, is_eval, + Handle<SharedFunctionInfo>::null()); // Check for stack-overflow exceptions. if (code.is_null()) { @@ -246,7 +259,7 @@ static Handle<JSFunction> MakeFunction(bool is_global, code); ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position()); - CodeGenerator::SetFunctionInfo(fun, lit, true, script); + Compiler::SetFunctionInfo(fun, lit, true, script); // Hint to the runtime system used when allocating space for initial // property space by setting the expected number of properties for @@ -410,7 +423,8 @@ bool Compiler::CompileLazy(Handle<SharedFunctionInfo> shared, HistogramTimerScope timer(&Counters::compile_lazy); // Compile the code. - Handle<Code> code = MakeCode(lit, script, Handle<Context>::null(), false); + Handle<Code> code = MakeCode(lit, script, Handle<Context>::null(), false, + shared); // Check for stack-overflow exception. if (code.is_null()) { @@ -452,7 +466,6 @@ bool Compiler::CompileLazy(Handle<SharedFunctionInfo> shared, // Set the optimication hints after performing lazy compilation, as these are // not set when the function is set up as a lazily compiled function. shared->SetThisPropertyAssignmentsInfo( - lit->has_only_this_property_assignments(), lit->has_only_simple_this_property_assignments(), *lit->this_property_assignments()); @@ -462,18 +475,137 @@ bool Compiler::CompileLazy(Handle<SharedFunctionInfo> shared, } +Handle<JSFunction> Compiler::BuildBoilerplate(FunctionLiteral* literal, + Handle<Script> script, + AstVisitor* caller) { +#ifdef DEBUG + // We should not try to compile the same function literal more than + // once. + literal->mark_as_compiled(); +#endif + + // Determine if the function can be lazily compiled. This is + // necessary to allow some of our builtin JS files to be lazily + // compiled. These builtins cannot be handled lazily by the parser, + // since we have to know if a function uses the special natives + // syntax, which is something the parser records. + bool allow_lazy = literal->AllowsLazyCompilation(); + + // Generate code + Handle<Code> code; + if (FLAG_lazy && allow_lazy) { + code = ComputeLazyCompile(literal->num_parameters()); + } else { + // The bodies of function literals have not yet been visited by + // the AST optimizer/analyzer. + if (!Rewriter::Optimize(literal)) { + return Handle<JSFunction>::null(); + } + + // Generate code and return it. + bool is_compiled = false; + if (FLAG_fast_compiler && literal->try_fast_codegen()) { + CodeGenSelector selector; + CodeGenSelector::CodeGenTag code_gen = selector.Select(literal); + if (code_gen == CodeGenSelector::FAST) { + code = FastCodeGenerator::MakeCode(literal, + script, + false); // Not eval. + is_compiled = true; + } + } + + if (!is_compiled) { + // We didn't try the fast compiler, or we failed to select it. + code = CodeGenerator::MakeCode(literal, + script, + false); // Not eval. + } + + // Check for stack-overflow exception. + if (code.is_null()) { + caller->SetStackOverflow(); + return Handle<JSFunction>::null(); + } + + // Function compilation complete. + LOG(CodeCreateEvent(Logger::FUNCTION_TAG, *code, *literal->name())); + +#ifdef ENABLE_OPROFILE_AGENT + OProfileAgent::CreateNativeCodeRegion(*node->name(), + code->instruction_start(), + code->instruction_size()); +#endif + } + + // Create a boilerplate function. + Handle<JSFunction> function = + Factory::NewFunctionBoilerplate(literal->name(), + literal->materialized_literal_count(), + code); + SetFunctionInfo(function, literal, false, script); + +#ifdef ENABLE_DEBUGGER_SUPPORT + // Notify debugger that a new function has been added. + Debugger::OnNewFunction(function); +#endif + + // Set the expected number of properties for instances and return + // the resulting function. + SetExpectedNofPropertiesFromEstimate(function, + literal->expected_property_count()); + return function; +} + + +// Sets the function info on a function. +// The start_position points to the first '(' character after the function name +// in the full script source. When counting characters in the script source the +// the first character is number 0 (not 1). +void Compiler::SetFunctionInfo(Handle<JSFunction> fun, + FunctionLiteral* lit, + bool is_toplevel, + Handle<Script> script) { + fun->shared()->set_length(lit->num_parameters()); + fun->shared()->set_formal_parameter_count(lit->num_parameters()); + fun->shared()->set_script(*script); + fun->shared()->set_function_token_position(lit->function_token_position()); + fun->shared()->set_start_position(lit->start_position()); + fun->shared()->set_end_position(lit->end_position()); + fun->shared()->set_is_expression(lit->is_expression()); + fun->shared()->set_is_toplevel(is_toplevel); + fun->shared()->set_inferred_name(*lit->inferred_name()); + fun->shared()->SetThisPropertyAssignmentsInfo( + lit->has_only_simple_this_property_assignments(), + *lit->this_property_assignments()); + fun->shared()->set_try_fast_codegen(lit->try_fast_codegen()); +} + + CodeGenSelector::CodeGenTag CodeGenSelector::Select(FunctionLiteral* fun) { Scope* scope = fun->scope(); - if (!scope->is_global_scope()) { - if (FLAG_trace_bailout) PrintF("Non-global scope\n"); + if (scope->num_heap_slots() > 0) { + // We support functions with a local context if they do not have + // parameters that need to be copied into the context. + for (int i = 0, len = scope->num_parameters(); i < len; i++) { + Slot* slot = scope->parameter(i)->slot(); + if (slot != NULL && slot->type() == Slot::CONTEXT) { + if (FLAG_trace_bailout) { + PrintF("function has context-allocated parameters"); + } + return NORMAL; + } + } + } + + if (scope->arguments() != NULL) { + if (FLAG_trace_bailout) PrintF("function uses 'arguments'\n"); return NORMAL; } - ASSERT(scope->num_heap_slots() == 0); - ASSERT(scope->arguments() == NULL); has_supported_syntax_ = true; - VisitDeclarations(fun->scope()->declarations()); + VisitDeclarations(scope->declarations()); if (!has_supported_syntax_) return NORMAL; VisitStatements(fun->body()); @@ -513,34 +645,9 @@ void CodeGenSelector::VisitStatements(ZoneList<Statement*>* stmts) { } -void CodeGenSelector::VisitAsEffect(Expression* expr) { - if (location_.is_effect()) { - Visit(expr); - } else { - Location saved = location_; - location_ = Location::Effect(); - Visit(expr); - location_ = saved; - } -} - - -void CodeGenSelector::VisitAsValue(Expression* expr) { - if (location_.is_value()) { - Visit(expr); - } else { - Location saved = location_; - location_ = Location::Value(); - Visit(expr); - location_ = saved; - } -} - - void CodeGenSelector::VisitDeclaration(Declaration* decl) { - Variable* var = decl->proxy()->var(); - if (!var->is_global() || var->mode() == Variable::CONST) { - BAILOUT("Non-global declaration"); + if (decl->fun() != NULL) { + ProcessExpression(decl->fun(), Expression::kValue); } } @@ -551,7 +658,7 @@ void CodeGenSelector::VisitBlock(Block* stmt) { void CodeGenSelector::VisitExpressionStatement(ExpressionStatement* stmt) { - VisitAsEffect(stmt->expression()); + ProcessExpression(stmt->expression(), Expression::kEffect); } @@ -561,7 +668,11 @@ void CodeGenSelector::VisitEmptyStatement(EmptyStatement* stmt) { void CodeGenSelector::VisitIfStatement(IfStatement* stmt) { - BAILOUT("IfStatement"); + ProcessExpression(stmt->condition(), Expression::kTest); + CHECK_BAILOUT; + Visit(stmt->then_statement()); + CHECK_BAILOUT; + Visit(stmt->else_statement()); } @@ -576,7 +687,7 @@ void CodeGenSelector::VisitBreakStatement(BreakStatement* stmt) { void CodeGenSelector::VisitReturnStatement(ReturnStatement* stmt) { - VisitAsValue(stmt->expression()); + ProcessExpression(stmt->expression(), Expression::kValue); } @@ -596,17 +707,39 @@ void CodeGenSelector::VisitSwitchStatement(SwitchStatement* stmt) { void CodeGenSelector::VisitDoWhileStatement(DoWhileStatement* stmt) { - BAILOUT("DoWhileStatement"); + // We do not handle loops with breaks or continue statements in their + // body. We will bailout when we hit those statements in the body. + ProcessExpression(stmt->cond(), Expression::kTest); + CHECK_BAILOUT; + Visit(stmt->body()); } void CodeGenSelector::VisitWhileStatement(WhileStatement* stmt) { - BAILOUT("WhileStatement"); + // We do not handle loops with breaks or continue statements in their + // body. We will bailout when we hit those statements in the body. + ProcessExpression(stmt->cond(), Expression::kTest); + CHECK_BAILOUT; + Visit(stmt->body()); } void CodeGenSelector::VisitForStatement(ForStatement* stmt) { - BAILOUT("ForStatement"); + // We do not handle loops with breaks or continue statements in their + // body. We will bailout when we hit those statements in the body. + if (stmt->init() != NULL) { + Visit(stmt->init()); + CHECK_BAILOUT; + } + if (stmt->cond() != NULL) { + ProcessExpression(stmt->cond(), Expression::kTest); + CHECK_BAILOUT; + } + Visit(stmt->body()); + if (stmt->next() != NULL) { + CHECK_BAILOUT; + Visit(stmt->next()); + } } @@ -626,15 +759,12 @@ void CodeGenSelector::VisitTryFinallyStatement(TryFinallyStatement* stmt) { void CodeGenSelector::VisitDebuggerStatement(DebuggerStatement* stmt) { - BAILOUT("DebuggerStatement"); + // Debugger statement is supported. } void CodeGenSelector::VisitFunctionLiteral(FunctionLiteral* expr) { - if (!expr->AllowsLazyCompilation()) { - BAILOUT("FunctionLiteral does not allow lazy compilation"); - } - expr->set_location(location_); + // Function literal is supported. } @@ -645,7 +775,11 @@ void CodeGenSelector::VisitFunctionBoilerplateLiteral( void CodeGenSelector::VisitConditional(Conditional* expr) { - BAILOUT("Conditional"); + ProcessExpression(expr->condition(), Expression::kTest); + CHECK_BAILOUT; + ProcessExpression(expr->then_expression(), context_); + CHECK_BAILOUT; + ProcessExpression(expr->else_expression(), context_); } @@ -660,28 +794,27 @@ void CodeGenSelector::VisitVariableProxy(VariableProxy* expr) { if (rewrite != NULL) { // Non-global. Slot* slot = rewrite->AsSlot(); - if (slot == NULL) { - // This is a variable rewritten to an explicit property access - // on the arguments object. - BAILOUT("non-global/non-slot variable reference"); - } - - Slot::Type type = slot->type(); - if (type != Slot::PARAMETER && type != Slot::LOCAL) { - BAILOUT("non-parameter/non-local slot reference"); + if (slot != NULL) { + Slot::Type type = slot->type(); + // When LOOKUP slots are enabled, some currently dead code + // implementing unary typeof will become live. + if (type == Slot::LOOKUP) { + BAILOUT("Lookup slot"); + } + } else { + BAILOUT("access to arguments object"); } } - expr->set_location(location_); } void CodeGenSelector::VisitLiteral(Literal* expr) { - expr->set_location(location_); + /* Nothing to do. */ } void CodeGenSelector::VisitRegExpLiteral(RegExpLiteral* expr) { - expr->set_location(location_); + /* Nothing to do. */ } @@ -711,14 +844,13 @@ void CodeGenSelector::VisitObjectLiteral(ObjectLiteral* expr) { case ObjectLiteral::Property::GETTER: // Fall through. case ObjectLiteral::Property::SETTER: // Fall through. case ObjectLiteral::Property::PROTOTYPE: - VisitAsValue(property->key()); + ProcessExpression(property->key(), Expression::kValue); CHECK_BAILOUT; break; } - VisitAsValue(property->value()); + ProcessExpression(property->value(), Expression::kValue); CHECK_BAILOUT; } - expr->set_location(location_); } @@ -728,10 +860,9 @@ void CodeGenSelector::VisitArrayLiteral(ArrayLiteral* expr) { Expression* subexpr = subexprs->at(i); if (subexpr->AsLiteral() != NULL) continue; if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue; - VisitAsValue(subexpr); + ProcessExpression(subexpr, Expression::kValue); CHECK_BAILOUT; } - expr->set_location(location_); } @@ -741,13 +872,8 @@ void CodeGenSelector::VisitCatchExtensionObject(CatchExtensionObject* expr) { void CodeGenSelector::VisitAssignment(Assignment* expr) { - // We support plain non-compound assignments to parameters and - // non-context (stack-allocated) locals. - if (expr->starts_initialization_block() || - expr->ends_initialization_block()) { - BAILOUT("initialization block start"); - } - + // We support plain non-compound assignments to properties, parameters and + // non-context (stack-allocated) locals, and global variables. Token::Value op = expr->op(); if (op == Token::INIT_CONST) BAILOUT("initialize constant"); if (op != Token::ASSIGN && op != Token::INIT_VAR) { @@ -755,18 +881,39 @@ void CodeGenSelector::VisitAssignment(Assignment* expr) { } Variable* var = expr->target()->AsVariableProxy()->AsVariable(); - if (var == NULL) BAILOUT("non-variable assignment"); - - if (!var->is_global()) { - ASSERT(var->slot() != NULL); - Slot::Type type = var->slot()->type(); - if (type != Slot::PARAMETER && type != Slot::LOCAL) { - BAILOUT("non-parameter/non-local slot assignment"); + Property* prop = expr->target()->AsProperty(); + if (var != NULL) { + // All global variables are supported. + if (!var->is_global()) { + if (var->slot() == NULL) { + // This is a parameter that has rewritten to an arguments access. + BAILOUT("non-global/non-slot assignment"); + } + Slot::Type type = var->slot()->type(); + if (type == Slot::LOOKUP) { + BAILOUT("Lookup slot"); + } } + } else if (prop != NULL) { + ProcessExpression(prop->obj(), Expression::kValue); + CHECK_BAILOUT; + // We will only visit the key during code generation for keyed property + // stores. Leave its expression context uninitialized for named + // property stores. + Literal* lit = prop->key()->AsLiteral(); + uint32_t ignored; + if (lit == NULL || + !lit->handle()->IsSymbol() || + String::cast(*(lit->handle()))->AsArrayIndex(&ignored)) { + ProcessExpression(prop->key(), Expression::kValue); + CHECK_BAILOUT; + } + } else { + // This is a throw reference error. + BAILOUT("non-variable/non-property assignment"); } - VisitAsValue(expr->value()); - expr->set_location(location_); + ProcessExpression(expr->value(), Expression::kValue); } @@ -776,10 +923,9 @@ void CodeGenSelector::VisitThrow(Throw* expr) { void CodeGenSelector::VisitProperty(Property* expr) { - VisitAsValue(expr->obj()); + ProcessExpression(expr->obj(), Expression::kValue); CHECK_BAILOUT; - VisitAsValue(expr->key()); - expr->set_location(location_); + ProcessExpression(expr->key(), Expression::kValue); } @@ -790,33 +936,45 @@ void CodeGenSelector::VisitCall(Call* expr) { // Check for supported calls if (var != NULL && var->is_possibly_eval()) { - BAILOUT("Call to a function named 'eval'"); + BAILOUT("call to the identifier 'eval'"); } else if (var != NULL && !var->is_this() && var->is_global()) { - // ---------------------------------- - // JavaScript example: 'foo(1, 2, 3)' // foo is global - // ---------------------------------- + // Calls to global variables are supported. + } else if (var != NULL && var->slot() != NULL && + var->slot()->type() == Slot::LOOKUP) { + BAILOUT("call to a lookup slot"); + } else if (fun->AsProperty() != NULL) { + Property* prop = fun->AsProperty(); + Literal* literal_key = prop->key()->AsLiteral(); + if (literal_key != NULL && literal_key->handle()->IsSymbol()) { + ProcessExpression(prop->obj(), Expression::kValue); + CHECK_BAILOUT; + } else { + ProcessExpression(prop->obj(), Expression::kValue); + CHECK_BAILOUT; + ProcessExpression(prop->key(), Expression::kValue); + CHECK_BAILOUT; + } } else { - BAILOUT("Call to a non-global function"); + // Otherwise the call is supported if the function expression is. + ProcessExpression(fun, Expression::kValue); } - // Check all arguments to the call. (Relies on TEMP meaning STACK.) + // Check all arguments to the call. for (int i = 0; i < args->length(); i++) { - VisitAsValue(args->at(i)); + ProcessExpression(args->at(i), Expression::kValue); CHECK_BAILOUT; } - expr->set_location(location_); } void CodeGenSelector::VisitCallNew(CallNew* expr) { - VisitAsValue(expr->expression()); + ProcessExpression(expr->expression(), Expression::kValue); CHECK_BAILOUT; ZoneList<Expression*>* args = expr->arguments(); // Check all arguments to the call for (int i = 0; i < args->length(); i++) { - VisitAsValue(args->at(i)); + ProcessExpression(args->at(i), Expression::kValue); CHECK_BAILOUT; } - expr->set_location(location_); } @@ -830,37 +988,88 @@ void CodeGenSelector::VisitCallRuntime(CallRuntime* expr) { } // Check all arguments to the call. (Relies on TEMP meaning STACK.) for (int i = 0; i < expr->arguments()->length(); i++) { - VisitAsValue(expr->arguments()->at(i)); + ProcessExpression(expr->arguments()->at(i), Expression::kValue); CHECK_BAILOUT; } - expr->set_location(location_); } void CodeGenSelector::VisitUnaryOperation(UnaryOperation* expr) { - BAILOUT("UnaryOperation"); + switch (expr->op()) { + case Token::VOID: + ProcessExpression(expr->expression(), Expression::kEffect); + break; + case Token::NOT: + ProcessExpression(expr->expression(), Expression::kTest); + break; + case Token::TYPEOF: + ProcessExpression(expr->expression(), Expression::kValue); + break; + default: + BAILOUT("UnaryOperation"); + } } void CodeGenSelector::VisitCountOperation(CountOperation* expr) { - BAILOUT("CountOperation"); + // We support postfix count operations on global variables. + if (expr->is_prefix()) BAILOUT("Prefix CountOperation"); + Variable* var = expr->expression()->AsVariableProxy()->AsVariable(); + if (var == NULL || !var->is_global()) BAILOUT("non-global postincrement"); + ProcessExpression(expr->expression(), Expression::kValue); } void CodeGenSelector::VisitBinaryOperation(BinaryOperation* expr) { switch (expr->op()) { case Token::COMMA: - VisitAsEffect(expr->left()); + ProcessExpression(expr->left(), Expression::kEffect); CHECK_BAILOUT; - Visit(expr->right()); // Location is the same as the parent location. + ProcessExpression(expr->right(), context_); break; case Token::OR: - VisitAsValue(expr->left()); + switch (context_) { + case Expression::kUninitialized: + UNREACHABLE(); + case Expression::kEffect: // Fall through. + case Expression::kTest: // Fall through. + case Expression::kTestValue: + // The left subexpression's value is not needed, it is in a pure + // test context. + ProcessExpression(expr->left(), Expression::kTest); + break; + case Expression::kValue: // Fall through. + case Expression::kValueTest: + // The left subexpression's value is needed, it is in a hybrid + // value/test context. + ProcessExpression(expr->left(), Expression::kValueTest); + break; + } CHECK_BAILOUT; - // The location for the right subexpression is the same as for the - // whole expression so we call Visit directly. - Visit(expr->right()); + ProcessExpression(expr->right(), context_); + break; + + case Token::AND: + switch (context_) { + case Expression::kUninitialized: + UNREACHABLE(); + case Expression::kEffect: // Fall through. + case Expression::kTest: // Fall through. + case Expression::kValueTest: + // The left subexpression's value is not needed, it is in a pure + // test context. + ProcessExpression(expr->left(), Expression::kTest); + break; + case Expression::kValue: // Fall through. + case Expression::kTestValue: + // The left subexpression's value is needed, it is in a hybrid + // test/value context. + ProcessExpression(expr->left(), Expression::kTestValue); + break; + } + CHECK_BAILOUT; + ProcessExpression(expr->right(), context_); break; case Token::ADD: @@ -874,20 +1083,21 @@ void CodeGenSelector::VisitBinaryOperation(BinaryOperation* expr) { case Token::SHL: case Token::SHR: case Token::SAR: - VisitAsValue(expr->left()); + ProcessExpression(expr->left(), Expression::kValue); CHECK_BAILOUT; - VisitAsValue(expr->right()); + ProcessExpression(expr->right(), Expression::kValue); break; default: BAILOUT("Unsupported binary operation"); } - expr->set_location(location_); } void CodeGenSelector::VisitCompareOperation(CompareOperation* expr) { - BAILOUT("CompareOperation"); + ProcessExpression(expr->left(), Expression::kValue); + CHECK_BAILOUT; + ProcessExpression(expr->right(), Expression::kValue); } diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h index 579970b3c6..546e446b98 100644 --- a/deps/v8/src/compiler.h +++ b/deps/v8/src/compiler.h @@ -71,6 +71,19 @@ class Compiler : public AllStatic { // true on success and false if the compilation resulted in a stack // overflow. static bool CompileLazy(Handle<SharedFunctionInfo> shared, int loop_nesting); + + // Compile a function boilerplate object (the function is possibly + // lazily compiled). Called recursively from a backend code + // generator 'caller' to build the boilerplate. + static Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node, + Handle<Script> script, + AstVisitor* caller); + + // Set the function info for a newly compiled function. + static void SetFunctionInfo(Handle<JSFunction> fun, + FunctionLiteral* lit, + bool is_toplevel, + Handle<Script> script); }; diff --git a/deps/v8/src/conversions.cc b/deps/v8/src/conversions.cc index 3e66d286c2..fd6d38d84d 100644 --- a/deps/v8/src/conversions.cc +++ b/deps/v8/src/conversions.cc @@ -50,7 +50,7 @@ int HexValue(uc32 c) { // Provide a common interface to getting a character at a certain // index from a char* or a String object. static inline int GetChar(const char* str, int index) { - ASSERT(index >= 0 && index < static_cast<int>(strlen(str))); + ASSERT(index >= 0 && index < StrLength(str)); return str[index]; } @@ -61,7 +61,7 @@ static inline int GetChar(String* str, int index) { static inline int GetLength(const char* str) { - return strlen(str); + return StrLength(str); } @@ -101,7 +101,7 @@ static inline void ReleaseCString(String* original, const char* str) { static inline bool IsSpace(const char* str, int index) { - ASSERT(index >= 0 && index < static_cast<int>(strlen(str))); + ASSERT(index >= 0 && index < StrLength(str)); return Scanner::kIsWhiteSpace.get(str[index]); } @@ -121,13 +121,13 @@ static inline bool SubStringEquals(const char* str, static inline bool SubStringEquals(String* str, int index, const char* other) { HandleScope scope; int str_length = str->length(); - int other_length = strlen(other); + int other_length = StrLength(other); int end = index + other_length < str_length ? index + other_length : str_length; - Handle<String> slice = - Factory::NewStringSlice(Handle<String>(str), index, end); - return slice->IsEqualTo(Vector<const char>(other, other_length)); + Handle<String> substring = + Factory::NewSubString(Handle<String>(str), index, end); + return substring->IsEqualTo(Vector<const char>(other, other_length)); } @@ -319,7 +319,7 @@ static double InternalStringToDouble(S* str, ReleaseCString(str, cstr); if (result != 0.0 || end != cstr) { // It appears that strtod worked - index += end - cstr; + index += static_cast<int>(end - cstr); } else { // Check for {+,-,}Infinity bool is_negative = (GetChar(str, index) == '-'); @@ -383,7 +383,7 @@ const char* DoubleToCString(double v, Vector<char> buffer) { int sign; char* decimal_rep = dtoa(v, 0, 0, &decimal_point, &sign, NULL); - int length = strlen(decimal_rep); + int length = StrLength(decimal_rep); if (sign) builder.AddCharacter('-'); @@ -465,7 +465,7 @@ char* DoubleToFixedCString(double value, int f) { int decimal_point; int sign; char* decimal_rep = dtoa(abs_value, 3, f, &decimal_point, &sign, NULL); - int decimal_rep_length = strlen(decimal_rep); + int decimal_rep_length = StrLength(decimal_rep); // Create a representation that is padded with zeros if needed. int zero_prefix_length = 0; @@ -526,7 +526,8 @@ static char* CreateExponentialRepresentation(char* decimal_rep, if (significant_digits != 1) { builder.AddCharacter('.'); builder.AddString(decimal_rep + 1); - builder.AddPadding('0', significant_digits - strlen(decimal_rep)); + int rep_length = StrLength(decimal_rep); + builder.AddPadding('0', significant_digits - rep_length); } builder.AddCharacter('e'); @@ -553,11 +554,11 @@ char* DoubleToExponentialCString(double value, int f) { char* decimal_rep = NULL; if (f == -1) { decimal_rep = dtoa(value, 0, 0, &decimal_point, &sign, NULL); - f = strlen(decimal_rep) - 1; + f = StrLength(decimal_rep) - 1; } else { decimal_rep = dtoa(value, 2, f + 1, &decimal_point, &sign, NULL); } - int decimal_rep_length = strlen(decimal_rep); + int decimal_rep_length = StrLength(decimal_rep); ASSERT(decimal_rep_length > 0); ASSERT(decimal_rep_length <= f + 1); USE(decimal_rep_length); @@ -585,7 +586,7 @@ char* DoubleToPrecisionCString(double value, int p) { int decimal_point; int sign; char* decimal_rep = dtoa(value, 2, p, &decimal_point, &sign, NULL); - int decimal_rep_length = strlen(decimal_rep); + int decimal_rep_length = StrLength(decimal_rep); ASSERT(decimal_rep_length <= p); int exponent = decimal_point - 1; @@ -619,7 +620,7 @@ char* DoubleToPrecisionCString(double value, int p) { builder.AddCharacter('.'); const int extra = negative ? 2 : 1; if (decimal_rep_length > decimal_point) { - const int len = strlen(decimal_rep + decimal_point); + const int len = StrLength(decimal_rep + decimal_point); const int n = Min(len, p - (builder.position() - extra)); builder.AddSubstring(decimal_rep + decimal_point, n); } diff --git a/deps/v8/src/debug-agent.cc b/deps/v8/src/debug-agent.cc index 9d5cace03b..070138254c 100644 --- a/deps/v8/src/debug-agent.cc +++ b/deps/v8/src/debug-agent.cc @@ -105,7 +105,7 @@ void DebuggerAgent::CreateSession(Socket* client) { if (session_ != NULL) { static const char* message = "Remote debugging session already active\r\n"; - client->Send(message, strlen(message)); + client->Send(message, StrLength(message)); delete client; return; } @@ -172,14 +172,15 @@ void DebuggerAgentSession::Run() { } // Convert UTF-8 to UTF-16. - unibrow::Utf8InputBuffer<> buf(*message, strlen(*message)); + unibrow::Utf8InputBuffer<> buf(*message, + StrLength(*message)); int len = 0; while (buf.has_more()) { buf.GetNext(); len++; } int16_t* temp = NewArray<int16_t>(len + 1); - buf.Reset(*message, strlen(*message)); + buf.Reset(*message, StrLength(*message)); for (int i = 0; i < len; i++) { temp[i] = buf.GetNext(); } @@ -203,7 +204,8 @@ void DebuggerAgentSession::Shutdown() { const char* DebuggerAgentUtil::kContentLength = "Content-Length"; -int DebuggerAgentUtil::kContentLengthSize = strlen(kContentLength); +int DebuggerAgentUtil::kContentLengthSize = + StrLength(kContentLength); SmartPointer<char> DebuggerAgentUtil::ReceiveMessage(const Socket* conn) { diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc index 775b09aefd..2c4552effe 100644 --- a/deps/v8/src/debug.cc +++ b/deps/v8/src/debug.cc @@ -108,12 +108,13 @@ void BreakLocationIterator::Next() { // current value of these. if (RelocInfo::IsPosition(rmode())) { if (RelocInfo::IsStatementPosition(rmode())) { - statement_position_ = - rinfo()->data() - debug_info_->shared()->start_position(); + statement_position_ = static_cast<int>( + rinfo()->data() - debug_info_->shared()->start_position()); } // Always update the position as we don't want that to be before the // statement position. - position_ = rinfo()->data() - debug_info_->shared()->start_position(); + position_ = static_cast<int>( + rinfo()->data() - debug_info_->shared()->start_position()); ASSERT(position_ >= 0); ASSERT(statement_position_ >= 0); } @@ -182,7 +183,7 @@ void BreakLocationIterator::FindBreakLocationFromAddress(Address pc) { // Check if this break point is closer that what was previously found. if (this->pc() < pc && pc - this->pc() < distance) { closest_break_point = break_point(); - distance = pc - this->pc(); + distance = static_cast<int>(pc - this->pc()); // Check whether we can't get any closer. if (distance == 0) break; } diff --git a/deps/v8/src/debug.h b/deps/v8/src/debug.h index 19da2519a1..c5c6b5ee56 100644 --- a/deps/v8/src/debug.h +++ b/deps/v8/src/debug.h @@ -102,7 +102,9 @@ class BreakLocationIterator { void ClearAllDebugBreak(); - inline int code_position() { return pc() - debug_info_->code()->entry(); } + inline int code_position() { + return static_cast<int>(pc() - debug_info_->code()->entry()); + } inline int break_point() { return break_point_; } inline int position() { return position_; } inline int statement_position() { return statement_position_; } @@ -377,6 +379,8 @@ class Debug { static const int kX64CallInstructionLength = 13; static const int kX64JSReturnSequenceLength = 13; + static const int kARMJSReturnSequenceLength = 4; + // Code generator routines. static void GenerateLoadICDebugBreak(MacroAssembler* masm); static void GenerateStoreICDebugBreak(MacroAssembler* masm); diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc index e2f908d7b2..524dbe6719 100644 --- a/deps/v8/src/disassembler.cc +++ b/deps/v8/src/disassembler.cc @@ -74,7 +74,7 @@ const char* V8NameConverter::NameOfAddress(byte* pc) const { } if (code_ != NULL) { - int offs = pc - code_->instruction_start(); + int offs = static_cast<int>(pc - code_->instruction_start()); // print as code offset, if it seems reasonable if (0 <= offs && offs < code_->instruction_size()) { OS::SNPrintF(buffer, "%d (%p)", offs, pc); @@ -289,7 +289,7 @@ static int DecodeIt(FILE* f, } delete it; - return pc - begin; + return static_cast<int>(pc - begin); } diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc index 229b8df970..2f646a5638 100644 --- a/deps/v8/src/execution.cc +++ b/deps/v8/src/execution.cc @@ -31,18 +31,8 @@ #include "api.h" #include "codegen-inl.h" - -#if V8_TARGET_ARCH_IA32 -#include "ia32/simulator-ia32.h" -#elif V8_TARGET_ARCH_X64 -#include "x64/simulator-x64.h" -#elif V8_TARGET_ARCH_ARM -#include "arm/simulator-arm.h" -#else -#error Unsupported target architecture. -#endif - #include "debug.h" +#include "simulator.h" #include "v8threads.h" namespace v8 { @@ -237,15 +227,14 @@ void StackGuard::SetStackLimit(uintptr_t limit) { // If the current limits are special (eg due to a pending interrupt) then // leave them alone. uintptr_t jslimit = SimulatorStack::JsLimitFromCLimit(limit); - if (thread_local_.jslimit_ == thread_local_.initial_jslimit_) { + if (thread_local_.jslimit_ == thread_local_.real_jslimit_) { thread_local_.jslimit_ = jslimit; - Heap::SetStackLimit(jslimit); } - if (thread_local_.climit_ == thread_local_.initial_climit_) { + if (thread_local_.climit_ == thread_local_.real_climit_) { thread_local_.climit_ = limit; } - thread_local_.initial_climit_ = limit; - thread_local_.initial_jslimit_ = jslimit; + thread_local_.real_climit_ = limit; + thread_local_.real_jslimit_ = jslimit; } @@ -354,7 +343,7 @@ char* StackGuard::ArchiveStackGuard(char* to) { char* StackGuard::RestoreStackGuard(char* from) { ExecutionAccess access; memcpy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal)); - Heap::SetStackLimit(thread_local_.jslimit_); + Heap::SetStackLimits(); return from + sizeof(ThreadLocal); } @@ -366,33 +355,33 @@ static internal::Thread::LocalStorageKey stack_limit_key = void StackGuard::FreeThreadResources() { Thread::SetThreadLocal( stack_limit_key, - reinterpret_cast<void*>(thread_local_.initial_climit_)); + reinterpret_cast<void*>(thread_local_.real_climit_)); } void StackGuard::ThreadLocal::Clear() { - initial_jslimit_ = kIllegalLimit; + real_jslimit_ = kIllegalLimit; jslimit_ = kIllegalLimit; - initial_climit_ = kIllegalLimit; + real_climit_ = kIllegalLimit; climit_ = kIllegalLimit; nesting_ = 0; postpone_interrupts_nesting_ = 0; interrupt_flags_ = 0; - Heap::SetStackLimit(kIllegalLimit); + Heap::SetStackLimits(); } void StackGuard::ThreadLocal::Initialize() { - if (initial_climit_ == kIllegalLimit) { + if (real_climit_ == kIllegalLimit) { // Takes the address of the limit variable in order to find out where // the top of stack is right now. uintptr_t limit = reinterpret_cast<uintptr_t>(&limit) - kLimitSize; ASSERT(reinterpret_cast<uintptr_t>(&limit) > kLimitSize); - initial_jslimit_ = SimulatorStack::JsLimitFromCLimit(limit); + real_jslimit_ = SimulatorStack::JsLimitFromCLimit(limit); jslimit_ = SimulatorStack::JsLimitFromCLimit(limit); - initial_climit_ = limit; + real_climit_ = limit; climit_ = limit; - Heap::SetStackLimit(SimulatorStack::JsLimitFromCLimit(limit)); + Heap::SetStackLimits(); } nesting_ = 0; postpone_interrupts_nesting_ = 0; diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h index ac00aa46fd..52198c420d 100644 --- a/deps/v8/src/execution.h +++ b/deps/v8/src/execution.h @@ -150,10 +150,6 @@ class StackGuard : public AllStatic { // is assumed to grow downwards. static void SetStackLimit(uintptr_t limit); - static Address address_of_jslimit() { - return reinterpret_cast<Address>(&thread_local_.jslimit_); - } - // Threading support. static char* ArchiveStackGuard(char* to); static char* RestoreStackGuard(char* from); @@ -181,16 +177,24 @@ class StackGuard : public AllStatic { #endif static void Continue(InterruptFlag after_what); - // This provides an asynchronous read of the stack limit for the current + // This provides an asynchronous read of the stack limits for the current // thread. There are no locks protecting this, but it is assumed that you // have the global V8 lock if you are using multiple V8 threads. static uintptr_t climit() { return thread_local_.climit_; } - static uintptr_t jslimit() { return thread_local_.jslimit_; } + static uintptr_t real_jslimit() { + return thread_local_.real_jslimit_; + } + static Address address_of_jslimit() { + return reinterpret_cast<Address>(&thread_local_.jslimit_); + } + static Address address_of_real_jslimit() { + return reinterpret_cast<Address>(&thread_local_.real_jslimit_); + } private: // You should hold the ExecutionAccess lock when calling this method. @@ -198,17 +202,17 @@ class StackGuard : public AllStatic { // You should hold the ExecutionAccess lock when calling this method. static void set_limits(uintptr_t value, const ExecutionAccess& lock) { - Heap::SetStackLimit(value); thread_local_.jslimit_ = value; thread_local_.climit_ = value; + Heap::SetStackLimits(); } - // Reset limits to initial values. For example after handling interrupt. + // Reset limits to actual values. For example after handling interrupt. // You should hold the ExecutionAccess lock when calling this method. static void reset_limits(const ExecutionAccess& lock) { - thread_local_.jslimit_ = thread_local_.initial_jslimit_; - Heap::SetStackLimit(thread_local_.jslimit_); - thread_local_.climit_ = thread_local_.initial_climit_; + thread_local_.jslimit_ = thread_local_.real_jslimit_; + thread_local_.climit_ = thread_local_.real_climit_; + Heap::SetStackLimits(); } // Enable or disable interrupts. @@ -232,10 +236,21 @@ class StackGuard : public AllStatic { // Clear. void Initialize(); void Clear(); - uintptr_t initial_jslimit_; + + // The stack limit is split into a JavaScript and a C++ stack limit. These + // two are the same except when running on a simulator where the C++ and + // JavaScript stacks are separate. Each of the two stack limits have two + // values. The one eith the real_ prefix is the actual stack limit + // set for the VM. The one without the real_ prefix has the same value as + // the actual stack limit except when there is an interruption (e.g. debug + // break or preemption) in which case it is lowered to make stack checks + // fail. Both the generated code and the runtime system check against the + // one without the real_ prefix. + uintptr_t real_jslimit_; // Actual JavaScript stack limit set for the VM. uintptr_t jslimit_; - uintptr_t initial_climit_; + uintptr_t real_climit_; // Actual C++ stack limit set for the VM. uintptr_t climit_; + int nesting_; int postpone_interrupts_nesting_; int interrupt_flags_; diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc index 32b69db394..3d9cd7a110 100644 --- a/deps/v8/src/factory.cc +++ b/deps/v8/src/factory.cc @@ -106,10 +106,10 @@ Handle<String> Factory::NewConsString(Handle<String> first, } -Handle<String> Factory::NewStringSlice(Handle<String> str, - int begin, - int end) { - CALL_HEAP_FUNCTION(str->Slice(begin, end), String); +Handle<String> Factory::NewSubString(Handle<String> str, + int begin, + int end) { + CALL_HEAP_FUNCTION(str->SubString(begin, end), String); } @@ -188,7 +188,8 @@ Handle<Script> Factory::NewScript(Handle<String> source) { script->set_type(Smi::FromInt(Script::TYPE_NORMAL)); script->set_compilation_type(Smi::FromInt(Script::COMPILATION_TYPE_HOST)); script->set_wrapper(*wrapper); - script->set_line_ends(Heap::undefined_value()); + script->set_line_ends_fixed_array(Heap::undefined_value()); + script->set_line_ends_js_array(Heap::undefined_value()); script->set_eval_from_function(Heap::undefined_value()); script->set_eval_from_instructions_offset(Smi::FromInt(0)); diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h index cb438e95e7..951c0439b0 100644 --- a/deps/v8/src/factory.h +++ b/deps/v8/src/factory.h @@ -106,11 +106,10 @@ class Factory : public AllStatic { static Handle<String> NewConsString(Handle<String> first, Handle<String> second); - // Create a new sliced string object which represents a substring of a - // backing string. - static Handle<String> NewStringSlice(Handle<String> str, - int begin, - int end); + // Create a new string object which holds a substring of a string. + static Handle<String> NewSubString(Handle<String> str, + int begin, + int end); // Creates a new external String object. There are two String encodings // in the system: ASCII and two byte. Unlike other String types, it does diff --git a/deps/v8/src/fast-codegen.cc b/deps/v8/src/fast-codegen.cc index 2f6a27a5e8..53fcf3112c 100644 --- a/deps/v8/src/fast-codegen.cc +++ b/deps/v8/src/fast-codegen.cc @@ -28,6 +28,7 @@ #include "v8.h" #include "codegen-inl.h" +#include "compiler.h" #include "fast-codegen.h" #include "stub-cache.h" #include "debug.h" @@ -35,6 +36,8 @@ namespace v8 { namespace internal { +#define __ ACCESS_MASM(masm_) + Handle<Code> FastCodeGenerator::MakeCode(FunctionLiteral* fun, Handle<Script> script, bool is_eval) { @@ -71,119 +74,57 @@ int FastCodeGenerator::SlotOffset(Slot* slot) { } -// All platform macro assemblers in {ia32,x64,arm} have a push(Register) -// function. -void FastCodeGenerator::Move(Location destination, Register source) { - switch (destination.type()) { - case Location::kUninitialized: - UNREACHABLE(); - case Location::kEffect: - break; - case Location::kValue: - masm_->push(source); - break; - } -} - - -// All platform macro assemblers in {ia32,x64,arm} have a pop(Register) -// function. -void FastCodeGenerator::Move(Register destination, Location source) { - switch (source.type()) { - case Location::kUninitialized: // Fall through. - case Location::kEffect: - UNREACHABLE(); - case Location::kValue: - masm_->pop(destination); - } -} - - void FastCodeGenerator::VisitDeclarations( ZoneList<Declaration*>* declarations) { int length = declarations->length(); int globals = 0; for (int i = 0; i < length; i++) { - Declaration* node = declarations->at(i); - Variable* var = node->proxy()->var(); + Declaration* decl = declarations->at(i); + Variable* var = decl->proxy()->var(); Slot* slot = var->slot(); // If it was not possible to allocate the variable at compile // time, we need to "declare" it at runtime to make sure it // actually exists in the local context. if ((slot != NULL && slot->type() == Slot::LOOKUP) || !var->is_global()) { - UNREACHABLE(); + VisitDeclaration(decl); } else { // Count global variables and functions for later processing globals++; } } - // Return in case of no declared global functions or variables. - if (globals == 0) return; - // Compute array of global variable and function declarations. - Handle<FixedArray> array = Factory::NewFixedArray(2 * globals, TENURED); - for (int j = 0, i = 0; i < length; i++) { - Declaration* node = declarations->at(i); - Variable* var = node->proxy()->var(); - Slot* slot = var->slot(); - - if ((slot == NULL || slot->type() != Slot::LOOKUP) && var->is_global()) { - array->set(j++, *(var->name())); - if (node->fun() == NULL) { - if (var->mode() == Variable::CONST) { - // In case this is const property use the hole. - array->set_the_hole(j++); + // Do nothing in case of no declared global functions or variables. + if (globals > 0) { + Handle<FixedArray> array = Factory::NewFixedArray(2 * globals, TENURED); + for (int j = 0, i = 0; i < length; i++) { + Declaration* decl = declarations->at(i); + Variable* var = decl->proxy()->var(); + Slot* slot = var->slot(); + + if ((slot == NULL || slot->type() != Slot::LOOKUP) && var->is_global()) { + array->set(j++, *(var->name())); + if (decl->fun() == NULL) { + if (var->mode() == Variable::CONST) { + // In case this is const property use the hole. + array->set_the_hole(j++); + } else { + array->set_undefined(j++); + } } else { - array->set_undefined(j++); + Handle<JSFunction> function = + Compiler::BuildBoilerplate(decl->fun(), script_, this); + // Check for stack-overflow exception. + if (HasStackOverflow()) return; + array->set(j++, *function); } - } else { - Handle<JSFunction> function = BuildBoilerplate(node->fun()); - // Check for stack-overflow exception. - if (HasStackOverflow()) return; - array->set(j++, *function); } } + // Invoke the platform-dependent code generator to do the actual + // declaration the global variables and functions. + DeclareGlobals(array); } - - // Invoke the platform-dependent code generator to do the actual - // declaration the global variables and functions. - DeclareGlobals(array); -} - -Handle<JSFunction> FastCodeGenerator::BuildBoilerplate(FunctionLiteral* fun) { -#ifdef DEBUG - // We should not try to compile the same function literal more than - // once. - fun->mark_as_compiled(); -#endif - - // Generate code - Handle<Code> code = CodeGenerator::ComputeLazyCompile(fun->num_parameters()); - // Check for stack-overflow exception. - if (code.is_null()) { - SetStackOverflow(); - return Handle<JSFunction>::null(); - } - - // Create a boilerplate function. - Handle<JSFunction> function = - Factory::NewFunctionBoilerplate(fun->name(), - fun->materialized_literal_count(), - code); - CodeGenerator::SetFunctionInfo(function, fun, false, script_); - -#ifdef ENABLE_DEBUGGER_SUPPORT - // Notify debugger that a new function has been added. - Debugger::OnNewFunction(function); -#endif - - // Set the expected number of properties for instances and return - // the resulting function. - SetExpectedNofPropertiesFromEstimate(function, - fun->expected_property_count()); - return function; } @@ -215,8 +156,77 @@ void FastCodeGenerator::SetSourcePosition(int pos) { } -void FastCodeGenerator::VisitDeclaration(Declaration* decl) { - UNREACHABLE(); +void FastCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) { +#ifdef DEBUG + Expression::Context expected = Expression::kUninitialized; + switch (expr->context()) { + case Expression::kUninitialized: + UNREACHABLE(); + case Expression::kEffect: // Fall through. + case Expression::kTest: + // The value of the left subexpression is not needed. + expected = Expression::kTest; + break; + case Expression::kValue: + // The value of the left subexpression is needed and its specific + // context depends on the operator. + expected = (expr->op() == Token::OR) + ? Expression::kValueTest + : Expression::kTestValue; + break; + case Expression::kValueTest: + // The value of the left subexpression is needed for OR. + expected = (expr->op() == Token::OR) + ? Expression::kValueTest + : Expression::kTest; + break; + case Expression::kTestValue: + // The value of the left subexpression is needed for AND. + expected = (expr->op() == Token::OR) + ? Expression::kTest + : Expression::kTestValue; + break; + } + ASSERT_EQ(expected, expr->left()->context()); + ASSERT_EQ(expr->context(), expr->right()->context()); +#endif + + Label eval_right, done; + Label* saved_true = true_label_; + Label* saved_false = false_label_; + + // Set up the appropriate context for the left subexpression based on the + // operation and our own context. + if (expr->op() == Token::OR) { + // If there is no usable true label in the OR expression's context, use + // the end of this expression, otherwise inherit the same true label. + if (expr->context() == Expression::kEffect || + expr->context() == Expression::kValue) { + true_label_ = &done; + } + // The false label is the label of the second subexpression. + false_label_ = &eval_right; + } else { + ASSERT_EQ(Token::AND, expr->op()); + // The true label is the label of the second subexpression. + true_label_ = &eval_right; + // If there is no usable false label in the AND expression's context, + // use the end of the expression, otherwise inherit the same false + // label. + if (expr->context() == Expression::kEffect || + expr->context() == Expression::kValue) { + false_label_ = &done; + } + } + + Visit(expr->left()); + true_label_ = saved_true; + false_label_ = saved_false; + + __ bind(&eval_right); + Visit(expr->right()); + + __ bind(&done); } @@ -241,7 +251,29 @@ void FastCodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) { void FastCodeGenerator::VisitIfStatement(IfStatement* stmt) { - UNREACHABLE(); + Comment cmnt(masm_, "[ IfStatement"); + // Expressions cannot recursively enter statements, there are no labels in + // the state. + ASSERT_EQ(NULL, true_label_); + ASSERT_EQ(NULL, false_label_); + Label then_part, else_part, done; + + // Do not worry about optimizing for empty then or else bodies. + true_label_ = &then_part; + false_label_ = &else_part; + ASSERT(stmt->condition()->context() == Expression::kTest); + Visit(stmt->condition()); + true_label_ = NULL; + false_label_ = NULL; + + __ bind(&then_part); + Visit(stmt->then_statement()); + __ jmp(&done); + + __ bind(&else_part); + Visit(stmt->else_statement()); + + __ bind(&done); } @@ -271,17 +303,91 @@ void FastCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { void FastCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) { - UNREACHABLE(); + Comment cmnt(masm_, "[ DoWhileStatement"); + increment_loop_depth(); + Label body, exit; + + // Emit the test at the bottom of the loop. + __ bind(&body); + Visit(stmt->body()); + + // We are not in an expression context because we have been compiling + // statements. Set up a test expression context for the condition. + ASSERT_EQ(NULL, true_label_); + ASSERT_EQ(NULL, false_label_); + true_label_ = &body; + false_label_ = &exit; + ASSERT(stmt->cond()->context() == Expression::kTest); + Visit(stmt->cond()); + true_label_ = NULL; + false_label_ = NULL; + + __ bind(&exit); + + decrement_loop_depth(); } void FastCodeGenerator::VisitWhileStatement(WhileStatement* stmt) { - UNREACHABLE(); + Comment cmnt(masm_, "[ WhileStatement"); + increment_loop_depth(); + Label test, body, exit; + + // Emit the test at the bottom of the loop. + __ jmp(&test); + + __ bind(&body); + Visit(stmt->body()); + + __ bind(&test); + // We are not in an expression context because we have been compiling + // statements. Set up a test expression context for the condition. + ASSERT_EQ(NULL, true_label_); + ASSERT_EQ(NULL, false_label_); + true_label_ = &body; + false_label_ = &exit; + ASSERT(stmt->cond()->context() == Expression::kTest); + Visit(stmt->cond()); + true_label_ = NULL; + false_label_ = NULL; + + __ bind(&exit); + + decrement_loop_depth(); } void FastCodeGenerator::VisitForStatement(ForStatement* stmt) { - UNREACHABLE(); + Comment cmnt(masm_, "[ ForStatement"); + Label test, body, exit; + if (stmt->init() != NULL) Visit(stmt->init()); + + increment_loop_depth(); + // Emit the test at the bottom of the loop (even if empty). + __ jmp(&test); + __ bind(&body); + Visit(stmt->body()); + if (stmt->next() != NULL) Visit(stmt->next()); + + __ bind(&test); + if (stmt->cond() == NULL) { + // For an empty test jump to the top of the loop. + __ jmp(&body); + } else { + // We are not in an expression context because we have been compiling + // statements. Set up a test expression context for the condition. + ASSERT_EQ(NULL, true_label_); + ASSERT_EQ(NULL, false_label_); + true_label_ = &body; + false_label_ = &exit; + ASSERT(stmt->cond()->context() == Expression::kTest); + Visit(stmt->cond()); + true_label_ = NULL; + false_label_ = NULL; + } + + __ bind(&exit); + decrement_loop_depth(); } @@ -301,7 +407,12 @@ void FastCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) { void FastCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) { - UNREACHABLE(); +#ifdef ENABLE_DEBUGGER_SUPPORT + Comment cmnt(masm_, "[ DebuggerStatement"); + SetStatementPosition(stmt); + __ CallRuntime(Runtime::kDebugBreak, 0); + // Ignore the return value. +#endif } @@ -312,7 +423,37 @@ void FastCodeGenerator::VisitFunctionBoilerplateLiteral( void FastCodeGenerator::VisitConditional(Conditional* expr) { - UNREACHABLE(); + Comment cmnt(masm_, "[ Conditional"); + ASSERT_EQ(Expression::kTest, expr->condition()->context()); + ASSERT_EQ(expr->context(), expr->then_expression()->context()); + ASSERT_EQ(expr->context(), expr->else_expression()->context()); + + + Label true_case, false_case, done; + Label* saved_true = true_label_; + Label* saved_false = false_label_; + + true_label_ = &true_case; + false_label_ = &false_case; + Visit(expr->condition()); + true_label_ = saved_true; + false_label_ = saved_false; + + __ bind(&true_case); + Visit(expr->then_expression()); + // If control flow falls through Visit, jump to done. + if (expr->context() == Expression::kEffect || + expr->context() == Expression::kValue) { + __ jmp(&done); + } + + __ bind(&false_case); + Visit(expr->else_expression()); + // If control flow falls through Visit, merge it with true case here. + if (expr->context() == Expression::kEffect || + expr->context() == Expression::kValue) { + __ bind(&done); + } } @@ -323,7 +464,48 @@ void FastCodeGenerator::VisitSlot(Slot* expr) { void FastCodeGenerator::VisitLiteral(Literal* expr) { - Move(expr->location(), expr); + Comment cmnt(masm_, "[ Literal"); + Move(expr->context(), expr); +} + + +void FastCodeGenerator::VisitAssignment(Assignment* expr) { + Comment cmnt(masm_, "[ Assignment"); + ASSERT(expr->op() == Token::ASSIGN || expr->op() == Token::INIT_VAR); + + // Record source code position of the (possible) IC call. + SetSourcePosition(expr->position()); + + Expression* rhs = expr->value(); + // Left-hand side can only be a property, a global or a (parameter or + // local) slot. + Variable* var = expr->target()->AsVariableProxy()->AsVariable(); + Property* prop = expr->target()->AsProperty(); + if (var != NULL) { + Visit(rhs); + ASSERT_EQ(Expression::kValue, rhs->context()); + EmitVariableAssignment(expr); + } else if (prop != NULL) { + // Assignment to a property. + Visit(prop->obj()); + ASSERT_EQ(Expression::kValue, prop->obj()->context()); + // Use the expression context of the key subexpression to detect whether + // we have decided to us a named or keyed IC. + if (prop->key()->context() == Expression::kUninitialized) { + ASSERT(prop->key()->AsLiteral() != NULL); + Visit(rhs); + ASSERT_EQ(Expression::kValue, rhs->context()); + EmitNamedPropertyAssignment(expr); + } else { + Visit(prop->key()); + ASSERT_EQ(Expression::kValue, prop->key()->context()); + Visit(rhs); + ASSERT_EQ(Expression::kValue, rhs->context()); + EmitKeyedPropertyAssignment(expr); + } + } else { + UNREACHABLE(); + } } @@ -337,24 +519,12 @@ void FastCodeGenerator::VisitThrow(Throw* expr) { } -void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { - UNREACHABLE(); -} - - -void FastCodeGenerator::VisitCountOperation(CountOperation* expr) { - UNREACHABLE(); -} - - -void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) { +void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) { UNREACHABLE(); } -void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) { - UNREACHABLE(); -} +#undef __ } } // namespace v8::internal diff --git a/deps/v8/src/fast-codegen.h b/deps/v8/src/fast-codegen.h index 31bb41c4da..ee2e1d345e 100644 --- a/deps/v8/src/fast-codegen.h +++ b/deps/v8/src/fast-codegen.h @@ -39,7 +39,13 @@ namespace internal { class FastCodeGenerator: public AstVisitor { public: FastCodeGenerator(MacroAssembler* masm, Handle<Script> script, bool is_eval) - : masm_(masm), function_(NULL), script_(script), is_eval_(is_eval) { + : masm_(masm), + function_(NULL), + script_(script), + is_eval_(is_eval), + loop_depth_(0), + true_label_(NULL), + false_label_(NULL) { } static Handle<Code> MakeCode(FunctionLiteral* fun, @@ -51,26 +57,54 @@ class FastCodeGenerator: public AstVisitor { private: int SlotOffset(Slot* slot); - void Move(Location destination, Register source); - void Move(Location destination, Slot* source); - void Move(Location destination, Literal* source); - - void Move(Register destination, Location source); - void Move(Slot* destination, Location source); + void Move(Expression::Context destination, Register source); + void Move(Expression::Context destination, Slot* source); + void Move(Expression::Context destination, Literal* source); // Drop the TOS, and store source to destination. // If destination is TOS, just overwrite TOS with source. - void DropAndMove(Location destination, Register source); + void DropAndMove(Expression::Context destination, Register source); + + // Test the JavaScript value in source as if in a test context, compile + // control flow to a pair of labels. + void TestAndBranch(Register source, Label* true_label, Label* false_label); void VisitDeclarations(ZoneList<Declaration*>* declarations); - Handle<JSFunction> BuildBoilerplate(FunctionLiteral* fun); void DeclareGlobals(Handle<FixedArray> pairs); + // Platform-specific return sequence + void EmitReturnSequence(int position); + + // Platform-specific code sequences for calls + void EmitCallWithStub(Call* expr); + void EmitCallWithIC(Call* expr, RelocInfo::Mode reloc_info); + + // Platform-specific support for compiling assignments. + + // Complete a variable assignment. The right-hand-side value is expected + // on top of the stack. + void EmitVariableAssignment(Assignment* expr); + + // Complete a named property assignment. The receiver and right-hand-side + // value are expected on top of the stack. + void EmitNamedPropertyAssignment(Assignment* expr); + + // Complete a keyed property assignment. The reciever, key, and + // right-hand-side value are expected on top of the stack. + void EmitKeyedPropertyAssignment(Assignment* expr); + void SetFunctionPosition(FunctionLiteral* fun); void SetReturnPosition(FunctionLiteral* fun); void SetStatementPosition(Statement* stmt); void SetSourcePosition(int pos); + int loop_depth() { return loop_depth_; } + void increment_loop_depth() { loop_depth_++; } + void decrement_loop_depth() { + ASSERT(loop_depth_ > 0); + loop_depth_--; + } + // AST node visit functions. #define DECLARE_VISIT(type) virtual void Visit##type(type* node); AST_NODE_LIST(DECLARE_VISIT) @@ -83,6 +117,11 @@ class FastCodeGenerator: public AstVisitor { FunctionLiteral* function_; Handle<Script> script_; bool is_eval_; + Label return_label_; + int loop_depth_; + + Label* true_label_; + Label* false_label_; DISALLOW_COPY_AND_ASSIGN(FastCodeGenerator); }; diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h index 1ceb6722e1..975350353e 100644 --- a/deps/v8/src/flag-definitions.h +++ b/deps/v8/src/flag-definitions.h @@ -114,6 +114,8 @@ DEFINE_bool(enable_rdtsc, true, "enable use of RDTSC instruction if available") DEFINE_bool(enable_sahf, true, "enable use of SAHF instruction if available (X64 only)") +DEFINE_bool(enable_vfp3, true, + "enable use of VFP3 instructions if available (ARM only)") // bootstrapper.cc DEFINE_string(expose_natives_as, NULL, "expose natives in global object") @@ -196,6 +198,7 @@ DEFINE_bool(canonicalize_object_literal_maps, true, // mksnapshot.cc DEFINE_bool(h, false, "print this message") +DEFINE_bool(new_snapshot, true, "use new snapshot implementation") // parser.cc DEFINE_bool(allow_natives_syntax, false, "allow natives syntax") diff --git a/deps/v8/src/flags.cc b/deps/v8/src/flags.cc index 5df3afd5f9..d444c976fb 100644 --- a/deps/v8/src/flags.cc +++ b/deps/v8/src/flags.cc @@ -303,8 +303,8 @@ static void SplitArgument(const char* arg, // get the value if any if (*arg == '=') { // make a copy so we can NUL-terminate flag name - int n = arg - *name; - CHECK(n < buffer_size); // buffer is too small + size_t n = arg - *name; + CHECK(n < static_cast<size_t>(buffer_size)); // buffer is too small memcpy(buffer, *name, n); buffer[n] = '\0'; *name = buffer; diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc index 5cd83324c6..7c327dd321 100644 --- a/deps/v8/src/frames.cc +++ b/deps/v8/src/frames.cc @@ -393,8 +393,19 @@ Code* EntryConstructFrame::code() const { } +Object*& ExitFrame::code_slot() const { + const int offset = ExitFrameConstants::kCodeOffset; + return Memory::Object_at(fp() + offset); +} + + Code* ExitFrame::code() const { - return Heap::c_entry_code(); + Object* code = code_slot(); + if (code->IsSmi()) { + return Heap::c_entry_debug_break_code(); + } else { + return Code::cast(code); + } } @@ -412,11 +423,6 @@ Address ExitFrame::GetCallerStackPointer() const { } -Code* ExitDebugFrame::code() const { - return Heap::c_entry_debug_break_code(); -} - - Address StandardFrame::GetExpressionAddress(int n) const { const int offset = StandardFrameConstants::kExpressionsOffset; return fp() + offset - n * kPointerSize; @@ -430,7 +436,7 @@ int StandardFrame::ComputeExpressionsCount() const { Address limit = sp(); ASSERT(base >= limit); // stack grows downwards // Include register-allocated locals in number of expressions. - return (base - limit) / kPointerSize; + return static_cast<int>((base - limit) / kPointerSize); } @@ -460,7 +466,7 @@ Object* JavaScriptFrame::GetParameter(int index) const { int JavaScriptFrame::ComputeParametersCount() const { Address base = caller_sp() + JavaScriptFrameConstants::kReceiverOffset; Address limit = fp() + JavaScriptFrameConstants::kSavedRegistersOffset; - return (base - limit) / kPointerSize; + return static_cast<int>((base - limit) / kPointerSize); } diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h index 768196d3c6..024065abf7 100644 --- a/deps/v8/src/frames.h +++ b/deps/v8/src/frames.h @@ -93,7 +93,6 @@ class StackHandler BASE_EMBEDDED { V(ENTRY, EntryFrame) \ V(ENTRY_CONSTRUCT, EntryConstructFrame) \ V(EXIT, ExitFrame) \ - V(EXIT_DEBUG, ExitDebugFrame) \ V(JAVA_SCRIPT, JavaScriptFrame) \ V(INTERNAL, InternalFrame) \ V(CONSTRUCT, ConstructFrame) \ @@ -119,7 +118,6 @@ class StackFrame BASE_EMBEDDED { bool is_entry() const { return type() == ENTRY; } bool is_entry_construct() const { return type() == ENTRY_CONSTRUCT; } bool is_exit() const { return type() == EXIT; } - bool is_exit_debug() const { return type() == EXIT_DEBUG; } bool is_java_script() const { return type() == JAVA_SCRIPT; } bool is_arguments_adaptor() const { return type() == ARGUMENTS_ADAPTOR; } bool is_internal() const { return type() == INTERNAL; } @@ -260,10 +258,13 @@ class EntryConstructFrame: public EntryFrame { // Exit frames are used to exit JavaScript execution and go to C. class ExitFrame: public StackFrame { public: + enum Mode { MODE_NORMAL, MODE_DEBUG }; virtual Type type() const { return EXIT; } virtual Code* code() const; + Object*& code_slot() const; + // Garbage collection support. virtual void Iterate(ObjectVisitor* v) const; @@ -289,26 +290,6 @@ class ExitFrame: public StackFrame { }; -class ExitDebugFrame: public ExitFrame { - public: - virtual Type type() const { return EXIT_DEBUG; } - - virtual Code* code() const; - - static ExitDebugFrame* cast(StackFrame* frame) { - ASSERT(frame->is_exit_debug()); - return static_cast<ExitDebugFrame*>(frame); - } - - protected: - explicit ExitDebugFrame(StackFrameIterator* iterator) - : ExitFrame(iterator) { } - - private: - friend class StackFrameIterator; -}; - - class StandardFrame: public StackFrame { public: // Testers. diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc index c6cc288e6a..e519d7077f 100644 --- a/deps/v8/src/global-handles.cc +++ b/deps/v8/src/global-handles.cc @@ -165,6 +165,9 @@ class GlobalHandles::Node : public Malloced { // It's fine though to reuse nodes that were destroyed in weak callback // as those cannot be deallocated until we are back from the callback. set_first_free(NULL); + if (first_deallocated()) { + first_deallocated()->set_next(head()); + } // Leaving V8. VMState state(EXTERNAL); func(object, par); @@ -270,6 +273,7 @@ Handle<Object> GlobalHandles::Create(Object* value) { // Next try deallocated list result = first_deallocated(); set_first_deallocated(result->next_free()); + ASSERT(result->next() == head()); set_head(result); } else { // Allocate a new node. @@ -390,8 +394,8 @@ void GlobalHandles::PostGarbageCollectionProcessing() { } -void GlobalHandles::IterateRoots(ObjectVisitor* v) { - // Traversal of global handles marked as NORMAL or NEAR_DEATH. +void GlobalHandles::IterateStrongRoots(ObjectVisitor* v) { + // Traversal of global handles marked as NORMAL. for (Node* current = head_; current != NULL; current = current->next()) { if (current->state_ == Node::NORMAL) { v->VisitPointer(¤t->object_); @@ -399,6 +403,16 @@ void GlobalHandles::IterateRoots(ObjectVisitor* v) { } } + +void GlobalHandles::IterateAllRoots(ObjectVisitor* v) { + for (Node* current = head_; current != NULL; current = current->next()) { + if (current->state_ != Node::DESTROYED) { + v->VisitPointer(¤t->object_); + } + } +} + + void GlobalHandles::TearDown() { // Reset all the lists. set_head(NULL); diff --git a/deps/v8/src/global-handles.h b/deps/v8/src/global-handles.h index 87eb9b8301..b9cac5c2a1 100644 --- a/deps/v8/src/global-handles.h +++ b/deps/v8/src/global-handles.h @@ -48,7 +48,8 @@ namespace internal { class ObjectGroup : public Malloced { public: ObjectGroup() : objects_(4) {} - explicit ObjectGroup(size_t capacity) : objects_(capacity) {} + explicit ObjectGroup(size_t capacity) + : objects_(static_cast<int>(capacity)) { } List<Object**> objects_; }; @@ -95,8 +96,11 @@ class GlobalHandles : public AllStatic { // Process pending weak handles. static void PostGarbageCollectionProcessing(); + // Iterates over all strong handles. + static void IterateStrongRoots(ObjectVisitor* v); + // Iterates over all handles. - static void IterateRoots(ObjectVisitor* v); + static void IterateAllRoots(ObjectVisitor* v); // Iterates over all weak roots in heap. static void IterateWeakRoots(ObjectVisitor* v); diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h index fbb648f5b1..ad0539f460 100644 --- a/deps/v8/src/globals.h +++ b/deps/v8/src/globals.h @@ -103,6 +103,10 @@ typedef byte* Address; #define V8PRIxPTR "lx" #endif +#if defined(__APPLE__) && defined(__MACH__) +#define USING_MAC_ABI +#endif + // Code-point values in Unicode 4.0 are 21 bits wide. typedef uint16_t uc16; typedef int32_t uc32; @@ -248,7 +252,6 @@ class Variable; class VariableProxy; class RelocInfo; class Deserializer; -class GenericDeserializer; // TODO(erikcorry): Get rid of this. class MessageLocation; class ObjectGroup; class TickSample; @@ -291,6 +294,8 @@ enum GarbageCollector { SCAVENGER, MARK_COMPACTOR }; enum Executability { NOT_EXECUTABLE, EXECUTABLE }; +enum VisitMode { VISIT_ALL, VISIT_ONLY_STRONG }; + // A CodeDesc describes a buffer holding instructions and relocation // information. The instructions start at the beginning of the buffer @@ -570,6 +575,17 @@ inline Dest bit_cast(const Source& source) { } +// Feature flags bit positions. They are mostly based on the CPUID spec. +// (We assign CPUID itself to one of the currently reserved bits -- +// feel free to change this if needed.) +enum CpuFeature { SSE3 = 32, // x86 + SSE2 = 26, // x86 + CMOV = 15, // x86 + RDTSC = 4, // x86 + CPUID = 10, // x86 + VFP3 = 1, // ARM + SAHF = 0}; // x86 + } } // namespace v8::internal #endif // V8_GLOBALS_H_ diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc index b764334e83..b42ad241a6 100644 --- a/deps/v8/src/handles.cc +++ b/deps/v8/src/handles.cc @@ -37,6 +37,7 @@ #include "global-handles.h" #include "natives.h" #include "runtime.h" +#include "stub-cache.h" namespace v8 { namespace internal { @@ -49,8 +50,8 @@ v8::ImplementationUtilities::HandleScopeData HandleScope::current_ = int HandleScope::NumberOfHandles() { int n = HandleScopeImplementer::instance()->blocks()->length(); if (n == 0) return 0; - return ((n - 1) * kHandleBlockSize) + - (current_.next - HandleScopeImplementer::instance()->blocks()->last()); + return ((n - 1) * kHandleBlockSize) + static_cast<int>( + (current_.next - HandleScopeImplementer::instance()->blocks()->last())); } @@ -105,6 +106,21 @@ void HandleScope::ZapRange(Object** start, Object** end) { } +Address HandleScope::current_extensions_address() { + return reinterpret_cast<Address>(¤t_.extensions); +} + + +Address HandleScope::current_next_address() { + return reinterpret_cast<Address>(¤t_.next); +} + + +Address HandleScope::current_limit_address() { + return reinterpret_cast<Address>(¤t_.limit); +} + + Handle<FixedArray> AddKeysFromJSArray(Handle<FixedArray> content, Handle<JSArray> array) { CALL_HEAP_FUNCTION(content->AddKeysFromJSArray(*array), FixedArray); @@ -285,7 +301,9 @@ Handle<Object> GetPrototype(Handle<Object> obj) { Handle<Object> GetHiddenProperties(Handle<JSObject> obj, bool create_if_needed) { - Handle<String> key = Factory::hidden_symbol(); + Object* holder = obj->BypassGlobalProxy(); + if (holder->IsUndefined()) return Factory::undefined_value(); + obj = Handle<JSObject>(JSObject::cast(holder)); if (obj->HasFastProperties()) { // If the object has fast properties, check whether the first slot @@ -294,7 +312,7 @@ Handle<Object> GetHiddenProperties(Handle<JSObject> obj, // code zero) it will always occupy the first entry if present. DescriptorArray* descriptors = obj->map()->instance_descriptors(); if ((descriptors->number_of_descriptors() > 0) && - (descriptors->GetKey(0) == *key) && + (descriptors->GetKey(0) == Heap::hidden_symbol()) && descriptors->IsProperty(0)) { ASSERT(descriptors->GetType(0) == FIELD); return Handle<Object>(obj->FastPropertyAt(descriptors->GetFieldIndex(0))); @@ -304,17 +322,17 @@ Handle<Object> GetHiddenProperties(Handle<JSObject> obj, // Only attempt to find the hidden properties in the local object and not // in the prototype chain. Note that HasLocalProperty() can cause a GC in // the general case in the presence of interceptors. - if (!obj->HasLocalProperty(*key)) { + if (!obj->HasHiddenPropertiesObject()) { // Hidden properties object not found. Allocate a new hidden properties // object if requested. Otherwise return the undefined value. if (create_if_needed) { Handle<Object> hidden_obj = Factory::NewJSObject(Top::object_function()); - return SetProperty(obj, key, hidden_obj, DONT_ENUM); + CALL_HEAP_FUNCTION(obj->SetHiddenPropertiesObject(*hidden_obj), Object); } else { return Factory::undefined_value(); } } - return GetProperty(obj, key); + return Handle<Object>(obj->GetHiddenPropertiesObject()); } @@ -338,7 +356,7 @@ Handle<Object> LookupSingleCharacterStringFromCode(uint32_t index) { Handle<String> SubString(Handle<String> str, int start, int end) { - CALL_HEAP_FUNCTION(str->Slice(start, end), String); + CALL_HEAP_FUNCTION(str->SubString(start, end), String); } @@ -411,12 +429,12 @@ Handle<JSValue> GetScriptWrapper(Handle<Script> script) { // Init line_ends array with code positions of line ends inside script // source. void InitScriptLineEnds(Handle<Script> script) { - if (!script->line_ends()->IsUndefined()) return; + if (!script->line_ends_fixed_array()->IsUndefined()) return; if (!script->source()->IsString()) { ASSERT(script->source()->IsUndefined()); - script->set_line_ends(*(Factory::NewJSArray(0))); - ASSERT(script->line_ends()->IsJSArray()); + script->set_line_ends_fixed_array(*(Factory::NewFixedArray(0))); + ASSERT(script->line_ends_fixed_array()->IsFixedArray()); return; } @@ -449,9 +467,8 @@ void InitScriptLineEnds(Handle<Script> script) { } ASSERT(array_index == line_count); - Handle<JSArray> object = Factory::NewJSArrayWithElements(array); - script->set_line_ends(*object); - ASSERT(script->line_ends()->IsJSArray()); + script->set_line_ends_fixed_array(*array); + ASSERT(script->line_ends_fixed_array()->IsFixedArray()); } @@ -459,17 +476,18 @@ void InitScriptLineEnds(Handle<Script> script) { int GetScriptLineNumber(Handle<Script> script, int code_pos) { InitScriptLineEnds(script); AssertNoAllocation no_allocation; - JSArray* line_ends_array = JSArray::cast(script->line_ends()); - const int line_ends_len = (Smi::cast(line_ends_array->length()))->value(); + FixedArray* line_ends_array = + FixedArray::cast(script->line_ends_fixed_array()); + const int line_ends_len = line_ends_array->length(); int line = -1; if (line_ends_len > 0 && - code_pos <= (Smi::cast(line_ends_array->GetElement(0)))->value()) { + code_pos <= (Smi::cast(line_ends_array->get(0)))->value()) { line = 0; } else { for (int i = 1; i < line_ends_len; ++i) { - if ((Smi::cast(line_ends_array->GetElement(i - 1)))->value() < code_pos && - code_pos <= (Smi::cast(line_ends_array->GetElement(i)))->value()) { + if ((Smi::cast(line_ends_array->get(i - 1)))->value() < code_pos && + code_pos <= (Smi::cast(line_ends_array->get(i)))->value()) { line = i; break; } @@ -672,6 +690,11 @@ OptimizedObjectForAddingMultipleProperties(Handle<JSObject> object, } +Handle<Code> ComputeLazyCompile(int argc) { + CALL_HEAP_FUNCTION(StubCache::ComputeLazyCompile(argc), Code); +} + + OptimizedObjectForAddingMultipleProperties:: ~OptimizedObjectForAddingMultipleProperties() { // Reoptimize the object to allow fast property access. diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h index 5d574657c5..f610a34d0d 100644 --- a/deps/v8/src/handles.h +++ b/deps/v8/src/handles.h @@ -133,6 +133,13 @@ class HandleScope { return result; } + // Deallocates any extensions used by the current scope. + static void DeleteExtensions(); + + static Address current_extensions_address(); + static Address current_next_address(); + static Address current_limit_address(); + private: // Prevent heap allocation or illegal handle scopes. HandleScope(const HandleScope&); @@ -166,9 +173,6 @@ class HandleScope { // Extend the handle scope making room for more handles. static internal::Object** Extend(); - // Deallocates any extensions used by the current scope. - static void DeleteExtensions(); - // Zaps the handles in the half-open interval [start, end). static void ZapRange(internal::Object** start, internal::Object** end); @@ -304,8 +308,8 @@ Handle<Object> SetPrototype(Handle<JSFunction> function, Handle<Object> prototype); -// Do lazy compilation of the given function. Returns true on success -// and false if the compilation resulted in a stack overflow. +// Does lazy compilation of the given function. Returns true on success and +// false if the compilation resulted in a stack overflow. enum ClearExceptionFlag { KEEP_EXCEPTION, CLEAR_EXCEPTION }; bool CompileLazyShared(Handle<SharedFunctionInfo> shared, @@ -315,6 +319,9 @@ bool CompileLazyShared(Handle<SharedFunctionInfo> shared, bool CompileLazy(Handle<JSFunction> function, ClearExceptionFlag flag); bool CompileLazyInLoop(Handle<JSFunction> function, ClearExceptionFlag flag); +// Returns the lazy compilation stub for argc arguments. +Handle<Code> ComputeLazyCompile(int argc); + // These deal with lazily loaded properties. void SetupLazy(Handle<JSObject> obj, int index, diff --git a/deps/v8/src/heap-profiler.cc b/deps/v8/src/heap-profiler.cc index 7f7cd7f169..bd1cd2d9cf 100644 --- a/deps/v8/src/heap-profiler.cc +++ b/deps/v8/src/heap-profiler.cc @@ -536,7 +536,7 @@ RetainerHeapProfile::RetainerHeapProfile() : zscope_(DELETE_ON_EXIT) { JSObjectsCluster roots(JSObjectsCluster::ROOTS); ReferencesExtractor extractor(roots, this); - Heap::IterateRoots(&extractor); + Heap::IterateRoots(&extractor, VISIT_ONLY_STRONG); } diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc index ae18fbe6b7..43886c144c 100644 --- a/deps/v8/src/heap.cc +++ b/deps/v8/src/heap.cc @@ -733,10 +733,7 @@ void Heap::Scavenge() { ScavengeVisitor scavenge_visitor; // Copy roots. - IterateRoots(&scavenge_visitor); - - // Copy objects reachable from weak pointers. - GlobalHandles::IterateWeakRoots(&scavenge_visitor); + IterateRoots(&scavenge_visitor, VISIT_ALL); // Copy objects reachable from the old generation. By definition, // there are no intergenerational pointers in code or data spaces. @@ -1730,6 +1727,7 @@ Object* Heap::AllocateProxy(Address proxy, PretenureFlag pretenure) { // Statically ensure that it is safe to allocate proxies in paged spaces. STATIC_ASSERT(Proxy::kSize <= Page::kMaxHeapObjectSize); AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE; + if (always_allocate()) space = OLD_DATA_SPACE; Object* result = Allocate(proxy_map(), space); if (result->IsFailure()) return result; @@ -1766,10 +1764,14 @@ Object* Heap::AllocateSharedFunctionInfo(Object* name) { Object* Heap::AllocateConsString(String* first, String* second) { int first_length = first->length(); - if (first_length == 0) return second; + if (first_length == 0) { + return second; + } int second_length = second->length(); - if (second_length == 0) return first; + if (second_length == 0) { + return first; + } int length = first_length + second_length; bool is_ascii = first->IsAsciiRepresentation() @@ -1821,54 +1823,18 @@ Object* Heap::AllocateConsString(String* first, String* second) { : long_cons_string_map(); } - Object* result = Allocate(map, NEW_SPACE); + Object* result = Allocate(map, + always_allocate() ? OLD_POINTER_SPACE : NEW_SPACE); if (result->IsFailure()) return result; - ASSERT(InNewSpace(result)); ConsString* cons_string = ConsString::cast(result); - cons_string->set_first(first, SKIP_WRITE_BARRIER); - cons_string->set_second(second, SKIP_WRITE_BARRIER); + WriteBarrierMode mode = cons_string->GetWriteBarrierMode(); + cons_string->set_first(first, mode); + cons_string->set_second(second, mode); cons_string->set_length(length); return result; } -Object* Heap::AllocateSlicedString(String* buffer, - int start, - int end) { - int length = end - start; - - // If the resulting string is small make a sub string. - if (length <= String::kMinNonFlatLength) { - return Heap::AllocateSubString(buffer, start, end); - } - - Map* map; - if (length <= String::kMaxShortSize) { - map = buffer->IsAsciiRepresentation() ? - short_sliced_ascii_string_map() : - short_sliced_string_map(); - } else if (length <= String::kMaxMediumSize) { - map = buffer->IsAsciiRepresentation() ? - medium_sliced_ascii_string_map() : - medium_sliced_string_map(); - } else { - map = buffer->IsAsciiRepresentation() ? - long_sliced_ascii_string_map() : - long_sliced_string_map(); - } - - Object* result = Allocate(map, NEW_SPACE); - if (result->IsFailure()) return result; - - SlicedString* sliced_string = SlicedString::cast(result); - sliced_string->set_buffer(buffer); - sliced_string->set_start(start); - sliced_string->set_length(length); - - return result; -} - - Object* Heap::AllocateSubString(String* buffer, int start, int end) { @@ -1888,22 +1854,19 @@ Object* Heap::AllocateSubString(String* buffer, ? AllocateRawAsciiString(length) : AllocateRawTwoByteString(length); if (result->IsFailure()) return result; + String* string_result = String::cast(result); // Copy the characters into the new object. - String* string_result = String::cast(result); - StringHasher hasher(length); - int i = 0; - for (; i < length && hasher.is_array_index(); i++) { - uc32 c = buffer->Get(start + i); - hasher.AddCharacter(c); - string_result->Set(i, c); - } - for (; i < length; i++) { - uc32 c = buffer->Get(start + i); - hasher.AddCharacterNoIndex(c); - string_result->Set(i, c); + if (buffer->IsAsciiRepresentation()) { + ASSERT(string_result->IsAsciiRepresentation()); + char* dest = SeqAsciiString::cast(string_result)->GetChars(); + String::WriteToFlat(buffer, dest, start, end); + } else { + ASSERT(string_result->IsTwoByteRepresentation()); + uc16* dest = SeqTwoByteString::cast(string_result)->GetChars(); + String::WriteToFlat(buffer, dest, start, end); } - string_result->set_length_field(hasher.GetHashField()); + return result; } @@ -1911,20 +1874,24 @@ Object* Heap::AllocateSubString(String* buffer, Object* Heap::AllocateExternalStringFromAscii( ExternalAsciiString::Resource* resource) { Map* map; - int length = resource->length(); - if (length <= String::kMaxShortSize) { + size_t length = resource->length(); + if (length <= static_cast<size_t>(String::kMaxShortSize)) { map = short_external_ascii_string_map(); - } else if (length <= String::kMaxMediumSize) { + } else if (length <= static_cast<size_t>(String::kMaxMediumSize)) { map = medium_external_ascii_string_map(); - } else { + } else if (length <= static_cast<size_t>(String::kMaxLength)) { map = long_external_ascii_string_map(); + } else { + Top::context()->mark_out_of_memory(); + return Failure::OutOfMemoryException(); } - Object* result = Allocate(map, NEW_SPACE); + Object* result = Allocate(map, + always_allocate() ? OLD_DATA_SPACE : NEW_SPACE); if (result->IsFailure()) return result; ExternalAsciiString* external_string = ExternalAsciiString::cast(result); - external_string->set_length(length); + external_string->set_length(static_cast<int>(length)); external_string->set_resource(resource); return result; @@ -1933,14 +1900,18 @@ Object* Heap::AllocateExternalStringFromAscii( Object* Heap::AllocateExternalStringFromTwoByte( ExternalTwoByteString::Resource* resource) { - int length = resource->length(); - - Map* map = ExternalTwoByteString::StringMap(length); - Object* result = Allocate(map, NEW_SPACE); + size_t length = resource->length(); + if (length > static_cast<size_t>(String::kMaxLength)) { + Top::context()->mark_out_of_memory(); + return Failure::OutOfMemoryException(); + } + Map* map = ExternalTwoByteString::StringMap(static_cast<int>(length)); + Object* result = Allocate(map, + always_allocate() ? OLD_DATA_SPACE : NEW_SPACE); if (result->IsFailure()) return result; ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result); - external_string->set_length(length); + external_string->set_length(static_cast<int>(length)); external_string->set_resource(resource); return result; @@ -2321,6 +2292,7 @@ Object* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) { AllocationSpace space = (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE; if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE; + if (always_allocate()) space = OLD_POINTER_SPACE; Object* obj = Allocate(map, space); if (obj->IsFailure()) return obj; @@ -2603,20 +2575,6 @@ Map* Heap::SymbolMapForString(String* string) { return long_cons_ascii_symbol_map(); } - if (map == short_sliced_string_map()) return short_sliced_symbol_map(); - if (map == medium_sliced_string_map()) return medium_sliced_symbol_map(); - if (map == long_sliced_string_map()) return long_sliced_symbol_map(); - - if (map == short_sliced_ascii_string_map()) { - return short_sliced_ascii_symbol_map(); - } - if (map == medium_sliced_ascii_string_map()) { - return medium_sliced_ascii_symbol_map(); - } - if (map == long_sliced_ascii_string_map()) { - return long_sliced_ascii_symbol_map(); - } - if (map == short_external_string_map()) { return short_external_symbol_map(); } @@ -3117,7 +3075,7 @@ void Heap::Verify() { ASSERT(HasBeenSetup()); VerifyPointersVisitor visitor; - IterateRoots(&visitor); + IterateRoots(&visitor, VISIT_ONLY_STRONG); new_space_.Verify(); @@ -3244,14 +3202,14 @@ void Heap::IterateRSet(PagedSpace* space, ObjectSlotCallback copy_object_func) { } -void Heap::IterateRoots(ObjectVisitor* v) { - IterateStrongRoots(v); +void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) { + IterateStrongRoots(v, mode); v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex])); v->Synchronize("symbol_table"); } -void Heap::IterateStrongRoots(ObjectVisitor* v) { +void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) { v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]); v->Synchronize("strong_root_list"); @@ -3284,7 +3242,11 @@ void Heap::IterateStrongRoots(ObjectVisitor* v) { v->Synchronize("builtins"); // Iterate over global handles. - GlobalHandles::IterateRoots(v); + if (mode == VISIT_ONLY_STRONG) { + GlobalHandles::IterateStrongRoots(v); + } else { + GlobalHandles::IterateAllRoots(v); + } v->Synchronize("globalhandles"); // Iterate over pointers being held by inactive threads. @@ -3455,14 +3417,18 @@ bool Heap::Setup(bool create_heap_objects) { } -void Heap::SetStackLimit(intptr_t limit) { +void Heap::SetStackLimits() { // On 64 bit machines, pointers are generally out of range of Smis. We write // something that looks like an out of range Smi to the GC. - // Set up the special root array entry containing the stack guard. - // This is actually an address, but the tag makes the GC ignore it. + // Set up the special root array entries containing the stack limits. + // These are actually addresses, but the tag makes the GC ignore it. roots_[kStackLimitRootIndex] = - reinterpret_cast<Object*>((limit & ~kSmiTagMask) | kSmiTag); + reinterpret_cast<Object*>( + (StackGuard::jslimit() & ~kSmiTagMask) | kSmiTag); + roots_[kRealStackLimitRootIndex] = + reinterpret_cast<Object*>( + (StackGuard::real_jslimit() & ~kSmiTagMask) | kSmiTag); } @@ -3889,7 +3855,7 @@ void Heap::TracePathToObject() { search_for_any_global = false; MarkRootVisitor root_visitor; - IterateRoots(&root_visitor); + IterateRoots(&root_visitor, VISIT_ONLY_STRONG); } @@ -3901,7 +3867,7 @@ void Heap::TracePathToGlobal() { search_for_any_global = true; MarkRootVisitor root_visitor; - IterateRoots(&root_visitor); + IterateRoots(&root_visitor, VISIT_ONLY_STRONG); } #endif diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h index 285260594f..8c1bb1887b 100644 --- a/deps/v8/src/heap.h +++ b/deps/v8/src/heap.h @@ -77,12 +77,6 @@ namespace internal { V(Map, short_cons_ascii_symbol_map, ShortConsAsciiSymbolMap) \ V(Map, medium_cons_ascii_symbol_map, MediumConsAsciiSymbolMap) \ V(Map, long_cons_ascii_symbol_map, LongConsAsciiSymbolMap) \ - V(Map, short_sliced_symbol_map, ShortSlicedSymbolMap) \ - V(Map, medium_sliced_symbol_map, MediumSlicedSymbolMap) \ - V(Map, long_sliced_symbol_map, LongSlicedSymbolMap) \ - V(Map, short_sliced_ascii_symbol_map, ShortSlicedAsciiSymbolMap) \ - V(Map, medium_sliced_ascii_symbol_map, MediumSlicedAsciiSymbolMap) \ - V(Map, long_sliced_ascii_symbol_map, LongSlicedAsciiSymbolMap) \ V(Map, short_external_symbol_map, ShortExternalSymbolMap) \ V(Map, medium_external_symbol_map, MediumExternalSymbolMap) \ V(Map, long_external_symbol_map, LongExternalSymbolMap) \ @@ -95,12 +89,6 @@ namespace internal { V(Map, short_cons_ascii_string_map, ShortConsAsciiStringMap) \ V(Map, medium_cons_ascii_string_map, MediumConsAsciiStringMap) \ V(Map, long_cons_ascii_string_map, LongConsAsciiStringMap) \ - V(Map, short_sliced_string_map, ShortSlicedStringMap) \ - V(Map, medium_sliced_string_map, MediumSlicedStringMap) \ - V(Map, long_sliced_string_map, LongSlicedStringMap) \ - V(Map, short_sliced_ascii_string_map, ShortSlicedAsciiStringMap) \ - V(Map, medium_sliced_ascii_string_map, MediumSlicedAsciiStringMap) \ - V(Map, long_sliced_ascii_string_map, LongSlicedAsciiStringMap) \ V(Map, short_external_string_map, ShortExternalStringMap) \ V(Map, medium_external_string_map, MediumExternalStringMap) \ V(Map, long_external_string_map, LongExternalStringMap) \ @@ -148,6 +136,7 @@ namespace internal { V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \ V(FixedArray, natives_source_cache, NativesSourceCache) \ V(Object, last_script_id, LastScriptId) \ + V(Smi, real_stack_limit, RealStackLimit) \ #if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP #define STRONG_ROOT_LIST(V) \ @@ -250,10 +239,10 @@ class Heap : public AllStatic { // Destroys all memory allocated by the heap. static void TearDown(); - // Sets the stack limit in the roots_ array. Some architectures generate code - // that looks here, because it is faster than loading from the static jslimit_ - // variable. - static void SetStackLimit(intptr_t limit); + // Set the stack limit in the roots_ array. Some architectures generate + // code that looks here, because it is faster than loading from the static + // jslimit_/real_jslimit_ variable in the StackGuard. + static void SetStackLimits(); // Returns whether Setup has been called. static bool HasBeenSetup(); @@ -586,16 +575,6 @@ class Heap : public AllStatic { // Please note this does not perform a garbage collection. static Object* AllocateConsString(String* first, String* second); - // Allocates a new sliced string object which is a slice of an underlying - // string buffer stretching from the index start (inclusive) to the index - // end (exclusive). - // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation - // failed. - // Please note this does not perform a garbage collection. - static Object* AllocateSlicedString(String* buffer, - int start, - int end); - // Allocates a new sub string object which is a substring of an underlying // string buffer stretching from the index start (inclusive) to the index // end (exclusive). @@ -729,9 +708,9 @@ class Heap : public AllStatic { static String* hidden_symbol() { return hidden_symbol_; } // Iterates over all roots in the heap. - static void IterateRoots(ObjectVisitor* v); + static void IterateRoots(ObjectVisitor* v, VisitMode mode); // Iterates over all strong roots in the heap. - static void IterateStrongRoots(ObjectVisitor* v); + static void IterateStrongRoots(ObjectVisitor* v, VisitMode mode); // Iterates remembered set of an old space. static void IterateRSet(PagedSpace* space, ObjectSlotCallback callback); diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h index 5fa75ec8d7..69f2a8da3b 100644 --- a/deps/v8/src/ia32/assembler-ia32-inl.h +++ b/deps/v8/src/ia32/assembler-ia32-inl.h @@ -89,7 +89,7 @@ Object* RelocInfo::target_object() { } -Handle<Object> RelocInfo::target_object_handle(Assembler *origin) { +Handle<Object> RelocInfo::target_object_handle(Assembler* origin) { ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); return Memory::Object_Handle_at(pc_); } diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc index 698377a0c8..d6f555082a 100644 --- a/deps/v8/src/ia32/assembler-ia32.cc +++ b/deps/v8/src/ia32/assembler-ia32.cc @@ -49,6 +49,7 @@ namespace internal { // Safe default is no features. uint64_t CpuFeatures::supported_ = 0; uint64_t CpuFeatures::enabled_ = 0; +uint64_t CpuFeatures::found_by_runtime_probing_ = 0; // The Probe method needs executable memory, so it uses Heap::CreateCode. @@ -56,7 +57,10 @@ uint64_t CpuFeatures::enabled_ = 0; void CpuFeatures::Probe() { ASSERT(Heap::HasBeenSetup()); ASSERT(supported_ == 0); - if (Serializer::enabled()) return; // No features if we might serialize. + if (Serializer::enabled()) { + supported_ |= OS::CpuFeaturesImpliedByPlatform(); + return; // No features if we might serialize. + } Assembler assm(NULL, 0); Label cpuid, done; @@ -124,6 +128,10 @@ void CpuFeatures::Probe() { typedef uint64_t (*F0)(); F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry()); supported_ = probe(); + found_by_runtime_probing_ = supported_; + uint64_t os_guarantees = OS::CpuFeaturesImpliedByPlatform(); + supported_ |= os_guarantees; + found_by_runtime_probing_ &= ~os_guarantees; } @@ -360,7 +368,7 @@ void Assembler::Align(int m) { void Assembler::cpuid() { - ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CPUID)); + ASSERT(CpuFeatures::IsEnabled(CPUID)); EnsureSpace ensure_space(this); last_pc_ = pc_; EMIT(0x0F); @@ -712,7 +720,7 @@ void Assembler::movzx_w(Register dst, const Operand& src) { void Assembler::cmov(Condition cc, Register dst, int32_t imm32) { - ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CMOV)); + ASSERT(CpuFeatures::IsEnabled(CMOV)); EnsureSpace ensure_space(this); last_pc_ = pc_; UNIMPLEMENTED(); @@ -723,7 +731,7 @@ void Assembler::cmov(Condition cc, Register dst, int32_t imm32) { void Assembler::cmov(Condition cc, Register dst, Handle<Object> handle) { - ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CMOV)); + ASSERT(CpuFeatures::IsEnabled(CMOV)); EnsureSpace ensure_space(this); last_pc_ = pc_; UNIMPLEMENTED(); @@ -734,7 +742,7 @@ void Assembler::cmov(Condition cc, Register dst, Handle<Object> handle) { void Assembler::cmov(Condition cc, Register dst, const Operand& src) { - ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CMOV)); + ASSERT(CpuFeatures::IsEnabled(CMOV)); EnsureSpace ensure_space(this); last_pc_ = pc_; // Opcode: 0f 40 + cc /r @@ -1083,7 +1091,7 @@ void Assembler::sar(Register dst, uint8_t imm8) { } -void Assembler::sar(Register dst) { +void Assembler::sar_cl(Register dst) { EnsureSpace ensure_space(this); last_pc_ = pc_; EMIT(0xD3); @@ -1123,7 +1131,7 @@ void Assembler::shl(Register dst, uint8_t imm8) { } -void Assembler::shl(Register dst) { +void Assembler::shl_cl(Register dst) { EnsureSpace ensure_space(this); last_pc_ = pc_; EMIT(0xD3); @@ -1144,24 +1152,21 @@ void Assembler::shr(Register dst, uint8_t imm8) { EnsureSpace ensure_space(this); last_pc_ = pc_; ASSERT(is_uint5(imm8)); // illegal shift count - EMIT(0xC1); - EMIT(0xE8 | dst.code()); - EMIT(imm8); -} - - -void Assembler::shr(Register dst) { - EnsureSpace ensure_space(this); - last_pc_ = pc_; - EMIT(0xD3); - EMIT(0xE8 | dst.code()); + if (imm8 == 1) { + EMIT(0xD1); + EMIT(0xE8 | dst.code()); + } else { + EMIT(0xC1); + EMIT(0xE8 | dst.code()); + EMIT(imm8); + } } void Assembler::shr_cl(Register dst) { EnsureSpace ensure_space(this); last_pc_ = pc_; - EMIT(0xD1); + EMIT(0xD3); EMIT(0xE8 | dst.code()); } @@ -1316,7 +1321,7 @@ void Assembler::nop() { void Assembler::rdtsc() { - ASSERT(CpuFeatures::IsEnabled(CpuFeatures::RDTSC)); + ASSERT(CpuFeatures::IsEnabled(RDTSC)); EnsureSpace ensure_space(this); last_pc_ = pc_; EMIT(0x0F); @@ -1662,7 +1667,7 @@ void Assembler::fistp_s(const Operand& adr) { void Assembler::fisttp_s(const Operand& adr) { - ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE3)); + ASSERT(CpuFeatures::IsEnabled(SSE3)); EnsureSpace ensure_space(this); last_pc_ = pc_; EMIT(0xDB); @@ -1923,7 +1928,7 @@ void Assembler::setcc(Condition cc, Register reg) { void Assembler::cvttss2si(Register dst, const Operand& src) { - ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2)); + ASSERT(CpuFeatures::IsEnabled(SSE2)); EnsureSpace ensure_space(this); last_pc_ = pc_; EMIT(0xF3); @@ -1934,7 +1939,7 @@ void Assembler::cvttss2si(Register dst, const Operand& src) { void Assembler::cvttsd2si(Register dst, const Operand& src) { - ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2)); + ASSERT(CpuFeatures::IsEnabled(SSE2)); EnsureSpace ensure_space(this); last_pc_ = pc_; EMIT(0xF2); @@ -1945,7 +1950,7 @@ void Assembler::cvttsd2si(Register dst, const Operand& src) { void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) { - ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2)); + ASSERT(CpuFeatures::IsEnabled(SSE2)); EnsureSpace ensure_space(this); last_pc_ = pc_; EMIT(0xF2); @@ -1956,7 +1961,7 @@ void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) { void Assembler::addsd(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2)); + ASSERT(CpuFeatures::IsEnabled(SSE2)); EnsureSpace ensure_space(this); last_pc_ = pc_; EMIT(0xF2); @@ -1967,7 +1972,7 @@ void Assembler::addsd(XMMRegister dst, XMMRegister src) { void Assembler::mulsd(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2)); + ASSERT(CpuFeatures::IsEnabled(SSE2)); EnsureSpace ensure_space(this); last_pc_ = pc_; EMIT(0xF2); @@ -1978,7 +1983,7 @@ void Assembler::mulsd(XMMRegister dst, XMMRegister src) { void Assembler::subsd(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2)); + ASSERT(CpuFeatures::IsEnabled(SSE2)); EnsureSpace ensure_space(this); last_pc_ = pc_; EMIT(0xF2); @@ -1989,7 +1994,7 @@ void Assembler::subsd(XMMRegister dst, XMMRegister src) { void Assembler::divsd(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2)); + ASSERT(CpuFeatures::IsEnabled(SSE2)); EnsureSpace ensure_space(this); last_pc_ = pc_; EMIT(0xF2); @@ -2000,7 +2005,7 @@ void Assembler::divsd(XMMRegister dst, XMMRegister src) { void Assembler::comisd(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2)); + ASSERT(CpuFeatures::IsEnabled(SSE2)); EnsureSpace ensure_space(this); last_pc_ = pc_; EMIT(0x66); @@ -2025,7 +2030,7 @@ void Assembler::movdbl(const Operand& dst, XMMRegister src) { void Assembler::movsd(const Operand& dst, XMMRegister src ) { - ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2)); + ASSERT(CpuFeatures::IsEnabled(SSE2)); EnsureSpace ensure_space(this); last_pc_ = pc_; EMIT(0xF2); // double @@ -2036,7 +2041,7 @@ void Assembler::movsd(const Operand& dst, XMMRegister src ) { void Assembler::movsd(XMMRegister dst, const Operand& src) { - ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE2)); + ASSERT(CpuFeatures::IsEnabled(SSE2)); EnsureSpace ensure_space(this); last_pc_ = pc_; EMIT(0xF2); // double @@ -2245,10 +2250,15 @@ void Assembler::dd(uint32_t data, RelocInfo::Mode reloc_info) { void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { ASSERT(rmode != RelocInfo::NONE); // Don't record external references unless the heap will be serialized. - if (rmode == RelocInfo::EXTERNAL_REFERENCE && - !Serializer::enabled() && - !FLAG_debug_code) { - return; + if (rmode == RelocInfo::EXTERNAL_REFERENCE) { +#ifdef DEBUG + if (!Serializer::enabled()) { + Serializer::TooLateToEnableNow(); + } +#endif + if (!Serializer::enabled() && !FLAG_debug_code) { + return; + } } RelocInfo rinfo(pc_, rmode, data); reloc_info_writer.Write(&rinfo); diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h index a431d04c66..962206fb74 100644 --- a/deps/v8/src/ia32/assembler-ia32.h +++ b/deps/v8/src/ia32/assembler-ia32.h @@ -37,6 +37,8 @@ #ifndef V8_IA32_ASSEMBLER_IA32_H_ #define V8_IA32_ASSEMBLER_IA32_H_ +#include "serialize.h" + namespace v8 { namespace internal { @@ -358,15 +360,11 @@ class Displacement BASE_EMBEDDED { // } class CpuFeatures : public AllStatic { public: - // Feature flags bit positions. They are mostly based on the CPUID spec. - // (We assign CPUID itself to one of the currently reserved bits -- - // feel free to change this if needed.) - enum Feature { SSE3 = 32, SSE2 = 26, CMOV = 15, RDTSC = 4, CPUID = 10 }; // Detect features of the target CPU. Set safe defaults if the serializer // is enabled (snapshots must be portable). static void Probe(); // Check whether a feature is supported by the target CPU. - static bool IsSupported(Feature f) { + static bool IsSupported(CpuFeature f) { if (f == SSE2 && !FLAG_enable_sse2) return false; if (f == SSE3 && !FLAG_enable_sse3) return false; if (f == CMOV && !FLAG_enable_cmov) return false; @@ -374,29 +372,32 @@ class CpuFeatures : public AllStatic { return (supported_ & (static_cast<uint64_t>(1) << f)) != 0; } // Check whether a feature is currently enabled. - static bool IsEnabled(Feature f) { + static bool IsEnabled(CpuFeature f) { return (enabled_ & (static_cast<uint64_t>(1) << f)) != 0; } // Enable a specified feature within a scope. class Scope BASE_EMBEDDED { #ifdef DEBUG public: - explicit Scope(Feature f) { + explicit Scope(CpuFeature f) { + uint64_t mask = static_cast<uint64_t>(1) << f; ASSERT(CpuFeatures::IsSupported(f)); + ASSERT(!Serializer::enabled() || (found_by_runtime_probing_ & mask) == 0); old_enabled_ = CpuFeatures::enabled_; - CpuFeatures::enabled_ |= (static_cast<uint64_t>(1) << f); + CpuFeatures::enabled_ |= mask; } ~Scope() { CpuFeatures::enabled_ = old_enabled_; } private: uint64_t old_enabled_; #else public: - explicit Scope(Feature f) {} + explicit Scope(CpuFeature f) {} #endif }; private: static uint64_t supported_; static uint64_t enabled_; + static uint64_t found_by_runtime_probing_; }; @@ -440,12 +441,21 @@ class Assembler : public Malloced { inline static void set_target_address_at(Address pc, Address target); // This sets the branch destination (which is in the instruction on x86). + // This is for calls and branches within generated code. inline static void set_target_at(Address instruction_payload, Address target) { set_target_address_at(instruction_payload, target); } + // This sets the branch destination (which is in the instruction on x86). + // This is for calls and branches to runtime code. + inline static void set_external_target_at(Address instruction_payload, + Address target) { + set_target_address_at(instruction_payload, target); + } + static const int kCallTargetSize = kPointerSize; + static const int kExternalTargetSize = kPointerSize; // Distance between the address of the code target in the call instruction // and the return address @@ -587,19 +597,18 @@ class Assembler : public Malloced { void rcl(Register dst, uint8_t imm8); void sar(Register dst, uint8_t imm8); - void sar(Register dst); + void sar_cl(Register dst); void sbb(Register dst, const Operand& src); void shld(Register dst, const Operand& src); void shl(Register dst, uint8_t imm8); - void shl(Register dst); + void shl_cl(Register dst); void shrd(Register dst, const Operand& src); void shr(Register dst, uint8_t imm8); - void shr(Register dst); void shr_cl(Register dst); void subb(const Operand& dst, int8_t imm8); diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc index 963b0e3ac8..a164cfa85c 100644 --- a/deps/v8/src/ia32/builtins-ia32.cc +++ b/deps/v8/src/ia32/builtins-ia32.cc @@ -522,43 +522,26 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { __ push(Operand(ebp, 2 * kPointerSize)); // push arguments __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION); - // Check the stack for overflow or a break request. - // We need to catch preemptions right here, otherwise an unlucky preemption - // could show up as a failed apply. - ExternalReference stack_guard_limit = - ExternalReference::address_of_stack_guard_limit(); - Label retry_preemption; - Label no_preemption; - __ bind(&retry_preemption); - __ mov(edi, Operand::StaticVariable(stack_guard_limit)); - __ cmp(esp, Operand(edi)); - __ j(above, &no_preemption, taken); - - // Preemption! - // Because builtins always remove the receiver from the stack, we - // have to fake one to avoid underflowing the stack. - __ push(eax); - __ push(Immediate(Smi::FromInt(0))); - - // Do call to runtime routine. - __ CallRuntime(Runtime::kStackGuard, 1); - __ pop(eax); - __ jmp(&retry_preemption); - - __ bind(&no_preemption); - + // Check the stack for overflow. We are not trying need to catch + // interruptions (e.g. debug break and preemption) here, so the "real stack + // limit" is checked. Label okay; - // Make ecx the space we have left. + ExternalReference real_stack_limit = + ExternalReference::address_of_real_stack_limit(); + __ mov(edi, Operand::StaticVariable(real_stack_limit)); + // Make ecx the space we have left. The stack might already be overflowed + // here which will cause ecx to become negative. __ mov(ecx, Operand(esp)); __ sub(ecx, Operand(edi)); // Make edx the space we need for the array when it is unrolled onto the // stack. __ mov(edx, Operand(eax)); __ shl(edx, kPointerSizeLog2 - kSmiTagSize); + // Check if the arguments will overflow the stack. __ cmp(ecx, Operand(edx)); - __ j(greater, &okay, taken); + __ j(greater, &okay, taken); // Signed comparison. - // Too bad: Out of stack space. + // Out of stack space. __ push(Operand(ebp, 4 * kPointerSize)); // push this __ push(eax); __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION); @@ -898,7 +881,7 @@ static void AllocateJSArray(MacroAssembler* masm, // be preserved. static void ArrayNativeCode(MacroAssembler* masm, bool construct_call, - Label *call_generic_code) { + Label* call_generic_code) { Label argc_one_or_more, argc_two_or_more, prepare_generic_code_call; // Push the constructor and argc. No need to tag argc as a smi, as there will diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc index 4ac5527699..69a17cd9b5 100644 --- a/deps/v8/src/ia32/codegen-ia32.cc +++ b/deps/v8/src/ia32/codegen-ia32.cc @@ -29,6 +29,7 @@ #include "bootstrapper.h" #include "codegen-inl.h" +#include "compiler.h" #include "debug.h" #include "ic-inl.h" #include "parser.h" @@ -75,7 +76,6 @@ void DeferredCode::RestoreRegisters() { CodeGenState::CodeGenState(CodeGenerator* owner) : owner_(owner), - typeof_state_(NOT_INSIDE_TYPEOF), destination_(NULL), previous_(NULL) { owner_->set_state(this); @@ -83,10 +83,8 @@ CodeGenState::CodeGenState(CodeGenerator* owner) CodeGenState::CodeGenState(CodeGenerator* owner, - TypeofState typeof_state, ControlDestination* destination) : owner_(owner), - typeof_state_(typeof_state), destination_(destination), previous_(owner->state()) { owner_->set_state(this); @@ -415,13 +413,12 @@ Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot, // partially compiled) into control flow to the control destination. // If force_control is true, control flow is forced. void CodeGenerator::LoadCondition(Expression* x, - TypeofState typeof_state, ControlDestination* dest, bool force_control) { ASSERT(!in_spilled_code()); int original_height = frame_->height(); - { CodeGenState new_state(this, typeof_state, dest); + { CodeGenState new_state(this, dest); Visit(x); // If we hit a stack overflow, we may not have actually visited @@ -450,17 +447,16 @@ void CodeGenerator::LoadCondition(Expression* x, } -void CodeGenerator::LoadAndSpill(Expression* expression, - TypeofState typeof_state) { +void CodeGenerator::LoadAndSpill(Expression* expression) { ASSERT(in_spilled_code()); set_in_spilled_code(false); - Load(expression, typeof_state); + Load(expression); frame_->SpillAll(); set_in_spilled_code(true); } -void CodeGenerator::Load(Expression* x, TypeofState typeof_state) { +void CodeGenerator::Load(Expression* expr) { #ifdef DEBUG int original_height = frame_->height(); #endif @@ -468,7 +464,7 @@ void CodeGenerator::Load(Expression* x, TypeofState typeof_state) { JumpTarget true_target; JumpTarget false_target; ControlDestination dest(&true_target, &false_target, true); - LoadCondition(x, typeof_state, &dest, false); + LoadCondition(expr, &dest, false); if (dest.false_was_fall_through()) { // The false target was just bound. @@ -543,23 +539,25 @@ void CodeGenerator::LoadGlobalReceiver() { } -// TODO(1241834): Get rid of this function in favor of just using Load, now -// that we have the INSIDE_TYPEOF typeof state. => Need to handle global -// variables w/o reference errors elsewhere. -void CodeGenerator::LoadTypeofExpression(Expression* x) { - Variable* variable = x->AsVariableProxy()->AsVariable(); +void CodeGenerator::LoadTypeofExpression(Expression* expr) { + // Special handling of identifiers as subexpressions of typeof. + Variable* variable = expr->AsVariableProxy()->AsVariable(); if (variable != NULL && !variable->is_this() && variable->is_global()) { - // NOTE: This is somewhat nasty. We force the compiler to load - // the variable as if through '<global>.<variable>' to make sure we - // do not get reference errors. + // For a global variable we build the property reference + // <global>.<variable> and perform a (regular non-contextual) property + // load to make sure we do not get reference errors. Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX); Literal key(variable->name()); - // TODO(1241834): Fetch the position from the variable instead of using - // no position. Property property(&global, &key, RelocInfo::kNoPosition); - Load(&property); + Reference ref(this, &property); + ref.GetValue(); + } else if (variable != NULL && variable->slot() != NULL) { + // For a variable that rewrites to a slot, we signal it is the immediate + // subexpression of a typeof. + LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF); } else { - Load(x, INSIDE_TYPEOF); + // Anything else can be handled normally. + Load(expr); } } @@ -1190,12 +1188,12 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op, // Perform the operation. switch (op) { case Token::SAR: - __ sar(answer.reg()); + __ sar_cl(answer.reg()); // No checks of result necessary break; case Token::SHR: { Label result_ok; - __ shr(answer.reg()); + __ shr_cl(answer.reg()); // Check that the *unsigned* result fits in a smi. Neither of // the two high-order bits can be set: // * 0x80000000: high bit would be lost when smi tagging. @@ -1216,7 +1214,7 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op, } case Token::SHL: { Label result_ok; - __ shl(answer.reg()); + __ shl_cl(answer.reg()); // Check that the *signed* result fits in a smi. __ cmp(answer.reg(), 0xc0000000); __ j(positive, &result_ok); @@ -1970,27 +1968,6 @@ void CodeGenerator::Comparison(Condition cc, } -class CallFunctionStub: public CodeStub { - public: - CallFunctionStub(int argc, InLoopFlag in_loop) - : argc_(argc), in_loop_(in_loop) { } - - void Generate(MacroAssembler* masm); - - private: - int argc_; - InLoopFlag in_loop_; - -#ifdef DEBUG - void Print() { PrintF("CallFunctionStub (args %d)\n", argc_); } -#endif - - Major MajorKey() { return CallFunction; } - int MinorKey() { return argc_; } - InLoopFlag InLoop() { return in_loop_; } -}; - - // Call the function just below TOS on the stack with the given // arguments. The receiver is the TOS. void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args, @@ -2027,7 +2004,7 @@ void CodeGenerator::CallApplyLazy(Property* apply, // Load the apply function onto the stack. This will usually // give us a megamorphic load site. Not super, but it works. Reference ref(this, apply); - ref.GetValue(NOT_INSIDE_TYPEOF); + ref.GetValue(); ASSERT(ref.type() == Reference::NAMED); // Load the receiver and the existing arguments object onto the @@ -2204,9 +2181,9 @@ void DeferredStackCheck::Generate() { void CodeGenerator::CheckStack() { DeferredStackCheck* deferred = new DeferredStackCheck; - ExternalReference stack_guard_limit = - ExternalReference::address_of_stack_guard_limit(); - __ cmp(esp, Operand::StaticVariable(stack_guard_limit)); + ExternalReference stack_limit = + ExternalReference::address_of_stack_limit(); + __ cmp(esp, Operand::StaticVariable(stack_limit)); deferred->Branch(below); deferred->BindExit(); } @@ -2366,7 +2343,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) { JumpTarget then; JumpTarget else_; ControlDestination dest(&then, &else_, true); - LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true); + LoadCondition(node->condition(), &dest, true); if (dest.false_was_fall_through()) { // The else target was bound, so we compile the else part first. @@ -2393,7 +2370,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) { ASSERT(!has_else_stm); JumpTarget then; ControlDestination dest(&then, &exit, true); - LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true); + LoadCondition(node->condition(), &dest, true); if (dest.false_was_fall_through()) { // The exit label was bound. We may have dangling jumps to the @@ -2413,7 +2390,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) { ASSERT(!has_then_stm); JumpTarget else_; ControlDestination dest(&exit, &else_, false); - LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true); + LoadCondition(node->condition(), &dest, true); if (dest.true_was_fall_through()) { // The exit label was bound. We may have dangling jumps to the @@ -2435,7 +2412,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) { // or control flow effect). LoadCondition is called without // forcing control flow. ControlDestination dest(&exit, &exit, true); - LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, false); + LoadCondition(node->condition(), &dest, false); if (!dest.is_used()) { // We got a value on the frame rather than (or in addition to) // control flow. @@ -2472,6 +2449,7 @@ void CodeGenerator::VisitReturnStatement(ReturnStatement* node) { CodeForStatementPosition(node); Load(node->expression()); Result return_value = frame_->Pop(); + masm()->WriteRecordedPositions(); if (function_return_is_shadowed_) { function_return_.Jump(&return_value); } else { @@ -2735,8 +2713,10 @@ void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) { node->continue_target()->Bind(); } if (has_valid_frame()) { + Comment cmnt(masm_, "[ DoWhileCondition"); + CodeForDoWhileConditionPosition(node); ControlDestination dest(&body, node->break_target(), false); - LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true); + LoadCondition(node->cond(), &dest, true); } if (node->break_target()->is_linked()) { node->break_target()->Bind(); @@ -2791,7 +2771,7 @@ void CodeGenerator::VisitWhileStatement(WhileStatement* node) { // Compile the test with the body as the true target and preferred // fall-through and with the break target as the false target. ControlDestination dest(&body, node->break_target(), true); - LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true); + LoadCondition(node->cond(), &dest, true); if (dest.false_was_fall_through()) { // If we got the break target as fall-through, the test may have @@ -2838,7 +2818,7 @@ void CodeGenerator::VisitWhileStatement(WhileStatement* node) { // The break target is the fall-through (body is a backward // jump from here and thus an invalid fall-through). ControlDestination dest(&body, node->break_target(), false); - LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true); + LoadCondition(node->cond(), &dest, true); } } else { // If we have chosen not to recompile the test at the bottom, @@ -2929,7 +2909,7 @@ void CodeGenerator::VisitForStatement(ForStatement* node) { // Compile the test with the body as the true target and preferred // fall-through and with the break target as the false target. ControlDestination dest(&body, node->break_target(), true); - LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true); + LoadCondition(node->cond(), &dest, true); if (dest.false_was_fall_through()) { // If we got the break target as fall-through, the test may have @@ -2999,7 +2979,7 @@ void CodeGenerator::VisitForStatement(ForStatement* node) { // The break target is the fall-through (body is a backward // jump from here). ControlDestination dest(&body, node->break_target(), false); - LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true); + LoadCondition(node->cond(), &dest, true); } } else { // Otherwise, jump back to the test at the top. @@ -3574,7 +3554,8 @@ void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) { Comment cmnt(masm_, "[ FunctionLiteral"); // Build the function boilerplate and instantiate it. - Handle<JSFunction> boilerplate = BuildBoilerplate(node); + Handle<JSFunction> boilerplate = + Compiler::BuildBoilerplate(node, script_, this); // Check for stack-overflow exception. if (HasStackOverflow()) return; InstantiateBoilerplate(boilerplate); @@ -3594,25 +3575,25 @@ void CodeGenerator::VisitConditional(Conditional* node) { JumpTarget else_; JumpTarget exit; ControlDestination dest(&then, &else_, true); - LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true); + LoadCondition(node->condition(), &dest, true); if (dest.false_was_fall_through()) { // The else target was bound, so we compile the else part first. - Load(node->else_expression(), typeof_state()); + Load(node->else_expression()); if (then.is_linked()) { exit.Jump(); then.Bind(); - Load(node->then_expression(), typeof_state()); + Load(node->then_expression()); } } else { // The then target was bound, so we compile the then part first. - Load(node->then_expression(), typeof_state()); + Load(node->then_expression()); if (else_.is_linked()) { exit.Jump(); else_.Bind(); - Load(node->else_expression(), typeof_state()); + Load(node->else_expression()); } } @@ -3934,7 +3915,7 @@ void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) { void CodeGenerator::VisitSlot(Slot* node) { Comment cmnt(masm_, "[ Slot"); - LoadFromSlotCheckForArguments(node, typeof_state()); + LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF); } @@ -3947,7 +3928,7 @@ void CodeGenerator::VisitVariableProxy(VariableProxy* node) { } else { ASSERT(var->is_global()); Reference ref(this, node); - ref.GetValue(typeof_state()); + ref.GetValue(); } } @@ -3958,12 +3939,28 @@ void CodeGenerator::VisitLiteral(Literal* node) { } -void CodeGenerator::LoadUnsafeSmi(Register target, Handle<Object> value) { +void CodeGenerator::PushUnsafeSmi(Handle<Object> value) { + ASSERT(value->IsSmi()); + int bits = reinterpret_cast<int>(*value); + __ push(Immediate(bits & 0x0000FFFF)); + __ or_(Operand(esp, 0), Immediate(bits & 0xFFFF0000)); +} + + +void CodeGenerator::StoreUnsafeSmiToLocal(int offset, Handle<Object> value) { + ASSERT(value->IsSmi()); + int bits = reinterpret_cast<int>(*value); + __ mov(Operand(ebp, offset), Immediate(bits & 0x0000FFFF)); + __ or_(Operand(ebp, offset), Immediate(bits & 0xFFFF0000)); +} + + +void CodeGenerator::MoveUnsafeSmi(Register target, Handle<Object> value) { ASSERT(target.is_valid()); ASSERT(value->IsSmi()); int bits = reinterpret_cast<int>(*value); __ Set(target, Immediate(bits & 0x0000FFFF)); - __ xor_(target, bits & 0xFFFF0000); + __ or_(target, bits & 0xFFFF0000); } @@ -4354,9 +4351,9 @@ void CodeGenerator::VisitAssignment(Assignment* node) { // the target, with an implicit promise that it will be written to again // before it is read. if (literal != NULL || (right_var != NULL && right_var != var)) { - target.TakeValue(NOT_INSIDE_TYPEOF); + target.TakeValue(); } else { - target.GetValue(NOT_INSIDE_TYPEOF); + target.GetValue(); } Load(node->value()); GenericBinaryOperation(node->binary_op(), @@ -4404,7 +4401,7 @@ void CodeGenerator::VisitThrow(Throw* node) { void CodeGenerator::VisitProperty(Property* node) { Comment cmnt(masm_, "[ Property"); Reference property(this, node); - property.GetValue(typeof_state()); + property.GetValue(); } @@ -4589,7 +4586,7 @@ void CodeGenerator::VisitCall(Call* node) { // Load the function to call from the property through a reference. Reference ref(this, property); - ref.GetValue(NOT_INSIDE_TYPEOF); + ref.GetValue(); // Pass receiver to called function. if (property->is_synthetic()) { @@ -4699,10 +4696,10 @@ void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) { // This generates code that performs a charCodeAt() call or returns // undefined in order to trigger the slow case, Runtime_StringCharCodeAt. -// It can handle flat and sliced strings, 8 and 16 bit characters and -// cons strings where the answer is found in the left hand branch of the -// cons. The slow case will flatten the string, which will ensure that -// the answer is in the left hand side the next time around. +// It can handle flat, 8 and 16 bit characters and cons strings where the +// answer is found in the left hand branch of the cons. The slow case will +// flatten the string, which will ensure that the answer is in the left hand +// side the next time around. void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) { Comment(masm_, "[ GenerateFastCharCodeAt"); ASSERT(args->length() == 2); @@ -4710,7 +4707,6 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) { Label slow_case; Label end; Label not_a_flat_string; - Label a_cons_string; Label try_again_with_new_string; Label ascii_string; Label got_char_code; @@ -4792,7 +4788,7 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) { __ add(Operand(ecx), Immediate(String::kLongLengthShift)); // Fetch the length field into the temporary register. __ mov(temp.reg(), FieldOperand(object.reg(), String::kLengthOffset)); - __ shr(temp.reg()); // The shift amount in ecx is implicit operand. + __ shr_cl(temp.reg()); // Check for index out of range. __ cmp(index.reg(), Operand(temp.reg())); __ j(greater_equal, &slow_case); @@ -4832,21 +4828,16 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) { __ bind(¬_a_flat_string); __ and_(temp.reg(), kStringRepresentationMask); __ cmp(temp.reg(), kConsStringTag); - __ j(equal, &a_cons_string); - __ cmp(temp.reg(), kSlicedStringTag); __ j(not_equal, &slow_case); - // SlicedString. - // Add the offset to the index and trigger the slow case on overflow. - __ add(index.reg(), FieldOperand(object.reg(), SlicedString::kStartOffset)); - __ j(overflow, &slow_case); - // Getting the underlying string is done by running the cons string code. - // ConsString. - __ bind(&a_cons_string); - // Get the first of the two strings. Both sliced and cons strings - // store their source string at the same offset. - ASSERT(SlicedString::kBufferOffset == ConsString::kFirstOffset); + // Check that the right hand side is the empty string (ie if this is really a + // flat string in a cons string). If that is not the case we would rather go + // to the runtime system now, to flatten the string. + __ mov(temp.reg(), FieldOperand(object.reg(), ConsString::kSecondOffset)); + __ cmp(Operand(temp.reg()), Immediate(Handle<String>(Heap::empty_string()))); + __ j(not_equal, &slow_case); + // Get the first of the two strings. __ mov(object.reg(), FieldOperand(object.reg(), ConsString::kFirstOffset)); __ jmp(&try_again_with_new_string); @@ -5224,9 +5215,6 @@ void CodeGenerator::VisitCallRuntime(CallRuntime* node) { void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { - // Note that because of NOT and an optimization in comparison of a typeof - // expression to a literal string, this function can fail to leave a value - // on top of the frame or in the cc register. Comment cmnt(masm_, "[ UnaryOperation"); Token::Value op = node->op(); @@ -5235,7 +5223,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { // Swap the true and false targets but keep the same actual label // as the fall through. destination()->Invert(); - LoadCondition(node->expression(), NOT_INSIDE_TYPEOF, destination(), true); + LoadCondition(node->expression(), destination(), true); // Swap the labels back. destination()->Invert(); @@ -5485,7 +5473,7 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) { if (!is_postfix) frame_->Push(Smi::FromInt(0)); return; } - target.TakeValue(NOT_INSIDE_TYPEOF); + target.TakeValue(); Result new_value = frame_->Pop(); new_value.ToRegister(); @@ -5563,9 +5551,6 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) { void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { - // Note that due to an optimization in comparison operations (typeof - // compared to a string literal), we can evaluate a binary expression such - // as AND or OR and not leave a value on the frame or in the cc register. Comment cmnt(masm_, "[ BinaryOperation"); Token::Value op = node->op(); @@ -5581,7 +5566,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { if (op == Token::AND) { JumpTarget is_true; ControlDestination dest(&is_true, destination()->false_target(), true); - LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false); + LoadCondition(node->left(), &dest, false); if (dest.false_was_fall_through()) { // The current false target was used as the fall-through. If @@ -5600,7 +5585,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { is_true.Bind(); // The left subexpression compiled to control flow, so the // right one is free to do so as well. - LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false); + LoadCondition(node->right(), destination(), false); } else { // We have actually just jumped to or bound the current false // target but the current control destination is not marked as @@ -5611,7 +5596,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { } else if (dest.is_used()) { // The left subexpression compiled to control flow (and is_true // was just bound), so the right is free to do so as well. - LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false); + LoadCondition(node->right(), destination(), false); } else { // We have a materialized value on the frame, so we exit with @@ -5644,7 +5629,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { } else if (op == Token::OR) { JumpTarget is_false; ControlDestination dest(destination()->true_target(), &is_false, false); - LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false); + LoadCondition(node->left(), &dest, false); if (dest.true_was_fall_through()) { // The current true target was used as the fall-through. If @@ -5663,7 +5648,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { is_false.Bind(); // The left subexpression compiled to control flow, so the // right one is free to do so as well. - LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false); + LoadCondition(node->right(), destination(), false); } else { // We have just jumped to or bound the current true target but // the current control destination is not marked as used. @@ -5673,7 +5658,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { } else if (dest.is_used()) { // The left subexpression compiled to control flow (and is_false // was just bound), so the right is free to do so as well. - LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false); + LoadCondition(node->right(), destination(), false); } else { // We have a materialized value on the frame, so we exit with @@ -5805,6 +5790,9 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) { destination()->false_target()->Branch(zero); frame_->Spill(answer.reg()); __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg()); + destination()->true_target()->Branch(equal); + // Regular expressions are callable so typeof == 'function'. + __ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE); answer.Unuse(); destination()->Split(equal); @@ -5814,10 +5802,13 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) { __ cmp(answer.reg(), Factory::null_value()); destination()->true_target()->Branch(equal); - // It can be an undetectable object. Result map = allocator()->Allocate(); ASSERT(map.is_valid()); - __ mov(map.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset)); + // Regular expressions are typeof == 'function', not 'object'. + __ CmpObjectType(answer.reg(), JS_REGEXP_TYPE, map.reg()); + destination()->false_target()->Branch(equal); + + // It can be an undetectable object. __ movzx_b(map.reg(), FieldOperand(map.reg(), Map::kBitFieldOffset)); __ test(map.reg(), Immediate(1 << Map::kIsUndetectable)); destination()->false_target()->Branch(not_zero); @@ -6066,7 +6057,7 @@ Handle<String> Reference::GetName() { } -void Reference::GetValue(TypeofState typeof_state) { +void Reference::GetValue() { ASSERT(!cgen_->in_spilled_code()); ASSERT(cgen_->HasValidEntryRegisters()); ASSERT(!is_illegal()); @@ -6083,17 +6074,11 @@ void Reference::GetValue(TypeofState typeof_state) { Comment cmnt(masm, "[ Load from Slot"); Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot(); ASSERT(slot != NULL); - cgen_->LoadFromSlotCheckForArguments(slot, typeof_state); + cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF); break; } case NAMED: { - // TODO(1241834): Make sure that it is safe to ignore the - // distinction between expressions in a typeof and not in a - // typeof. If there is a chance that reference errors can be - // thrown below, we must distinguish between the two kinds of - // loads (typeof expression loads must not throw a reference - // error). Variable* var = expression_->AsVariableProxy()->AsVariable(); bool is_global = var != NULL; ASSERT(!is_global || var->is_global()); @@ -6163,8 +6148,6 @@ void Reference::GetValue(TypeofState typeof_state) { } case KEYED: { - // TODO(1241834): Make sure that this it is safe to ignore the - // distinction between expressions in a typeof and not in a typeof. Comment cmnt(masm, "[ Load from keyed Property"); Variable* var = expression_->AsVariableProxy()->AsVariable(); bool is_global = var != NULL; @@ -6283,13 +6266,13 @@ void Reference::GetValue(TypeofState typeof_state) { } -void Reference::TakeValue(TypeofState typeof_state) { +void Reference::TakeValue() { // For non-constant frame-allocated slots, we invalidate the value in the // slot. For all others, we fall back on GetValue. ASSERT(!cgen_->in_spilled_code()); ASSERT(!is_illegal()); if (type_ != SLOT) { - GetValue(typeof_state); + GetValue(); return; } @@ -6299,7 +6282,7 @@ void Reference::TakeValue(TypeofState typeof_state) { slot->type() == Slot::CONTEXT || slot->var()->mode() == Variable::CONST || slot->is_arguments()) { - GetValue(typeof_state); + GetValue(); return; } @@ -6728,11 +6711,11 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { // Perform the operation. switch (op_) { case Token::SAR: - __ sar(eax); + __ sar_cl(eax); // No checks of result necessary break; case Token::SHR: - __ shr(eax); + __ shr_cl(eax); // Check that the *unsigned* result fits in a smi. // Neither of the two high-order bits can be set: // - 0x80000000: high bit would be lost when smi tagging. @@ -6743,7 +6726,7 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { __ j(not_zero, slow, not_taken); break; case Token::SHL: - __ shl(eax); + __ shl_cl(eax); // Check that the *signed* result fits in a smi. __ cmp(eax, 0xc0000000); __ j(sign, slow, not_taken); @@ -6793,8 +6776,8 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { // eax: y // edx: x - if (CpuFeatures::IsSupported(CpuFeatures::SSE2)) { - CpuFeatures::Scope use_sse2(CpuFeatures::SSE2); + if (CpuFeatures::IsSupported(SSE2)) { + CpuFeatures::Scope use_sse2(SSE2); FloatingPointHelper::LoadSse2Operands(masm, &call_runtime); switch (op_) { @@ -6889,7 +6872,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { if (use_sse3_) { // Truncate the operands to 32-bit integers and check for // exceptions in doing so. - CpuFeatures::Scope scope(CpuFeatures::SSE3); + CpuFeatures::Scope scope(SSE3); __ fisttp_s(Operand(esp, 0 * kPointerSize)); __ fisttp_s(Operand(esp, 1 * kPointerSize)); __ fnstsw_ax(); @@ -6918,9 +6901,9 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { case Token::BIT_OR: __ or_(eax, Operand(ecx)); break; case Token::BIT_AND: __ and_(eax, Operand(ecx)); break; case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break; - case Token::SAR: __ sar(eax); break; - case Token::SHL: __ shl(eax); break; - case Token::SHR: __ shr(eax); break; + case Token::SAR: __ sar_cl(eax); break; + case Token::SHL: __ shl_cl(eax); break; + case Token::SHR: __ shr_cl(eax); break; default: UNREACHABLE(); } if (op_ == Token::SHR) { @@ -7516,9 +7499,9 @@ void CompareStub::Generate(MacroAssembler* masm) { // Call builtin if operands are not floating point or smi. Label check_for_symbols; Label unordered; - if (CpuFeatures::IsSupported(CpuFeatures::SSE2)) { - CpuFeatures::Scope use_sse2(CpuFeatures::SSE2); - CpuFeatures::Scope use_cmov(CpuFeatures::CMOV); + if (CpuFeatures::IsSupported(SSE2)) { + CpuFeatures::Scope use_sse2(SSE2); + CpuFeatures::Scope use_cmov(CMOV); FloatingPointHelper::LoadSse2Operands(masm, &check_for_symbols); __ comisd(xmm0, xmm1); @@ -7707,11 +7690,84 @@ void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { } +// If true, a Handle<T> passed by value is passed and returned by +// using the location_ field directly. If false, it is passed and +// returned as a pointer to a handle. +#ifdef USING_MAC_ABI +static const bool kPassHandlesDirectly = true; +#else +static const bool kPassHandlesDirectly = false; +#endif + + +void ApiGetterEntryStub::Generate(MacroAssembler* masm) { + Label get_result; + Label prologue; + Label promote_scheduled_exception; + __ EnterApiExitFrame(ExitFrame::MODE_NORMAL, kStackSpace, kArgc); + ASSERT_EQ(kArgc, 4); + if (kPassHandlesDirectly) { + // When handles as passed directly we don't have to allocate extra + // space for and pass an out parameter. + __ mov(Operand(esp, 0 * kPointerSize), ebx); // name. + __ mov(Operand(esp, 1 * kPointerSize), eax); // arguments pointer. + } else { + // The function expects three arguments to be passed but we allocate + // four to get space for the output cell. The argument slots are filled + // as follows: + // + // 3: output cell + // 2: arguments pointer + // 1: name + // 0: pointer to the output cell + // + // Note that this is one more "argument" than the function expects + // so the out cell will have to be popped explicitly after returning + // from the function. + __ mov(Operand(esp, 1 * kPointerSize), ebx); // name. + __ mov(Operand(esp, 2 * kPointerSize), eax); // arguments pointer. + __ mov(ebx, esp); + __ add(Operand(ebx), Immediate(3 * kPointerSize)); + __ mov(Operand(esp, 0 * kPointerSize), ebx); // output + __ mov(Operand(esp, 3 * kPointerSize), Immediate(0)); // out cell. + } + // Call the api function! + __ call(fun()->address(), RelocInfo::RUNTIME_ENTRY); + // Check if the function scheduled an exception. + ExternalReference scheduled_exception_address = + ExternalReference::scheduled_exception_address(); + __ cmp(Operand::StaticVariable(scheduled_exception_address), + Immediate(Factory::the_hole_value())); + __ j(not_equal, &promote_scheduled_exception, not_taken); + if (!kPassHandlesDirectly) { + // The returned value is a pointer to the handle holding the result. + // Dereference this to get to the location. + __ mov(eax, Operand(eax, 0)); + } + // Check if the result handle holds 0 + __ test(eax, Operand(eax)); + __ j(not_zero, &get_result, taken); + // It was zero; the result is undefined. + __ mov(eax, Factory::undefined_value()); + __ jmp(&prologue); + // It was non-zero. Dereference to get the result value. + __ bind(&get_result); + __ mov(eax, Operand(eax, 0)); + __ bind(&prologue); + __ LeaveExitFrame(ExitFrame::MODE_NORMAL); + __ ret(0); + __ bind(&promote_scheduled_exception); + __ TailCallRuntime(ExternalReference(Runtime::kPromoteScheduledException), + 0, + 1); +} + + void CEntryStub::GenerateCore(MacroAssembler* masm, Label* throw_normal_exception, Label* throw_termination_exception, Label* throw_out_of_memory_exception, - StackFrame::Type frame_type, + ExitFrame::Mode mode, bool do_gc, bool always_allocate_scope) { // eax: result parameter for PerformGC, if any @@ -7761,7 +7817,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, __ j(zero, &failure_returned, not_taken); // Exit the JavaScript to C++ exit frame. - __ LeaveExitFrame(frame_type); + __ LeaveExitFrame(mode); __ ret(0); // Handling of failure. @@ -7860,12 +7916,12 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) { // of a proper result. The builtin entry handles this by performing // a garbage collection and retrying the builtin (twice). - StackFrame::Type frame_type = is_debug_break ? - StackFrame::EXIT_DEBUG : - StackFrame::EXIT; + ExitFrame::Mode mode = is_debug_break + ? ExitFrame::MODE_DEBUG + : ExitFrame::MODE_NORMAL; // Enter the exit frame that transitions from JavaScript to C++. - __ EnterExitFrame(frame_type); + __ EnterExitFrame(mode); // eax: result parameter for PerformGC, if any (setup below) // ebx: pointer to builtin function (C callee-saved) @@ -7883,7 +7939,7 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) { &throw_normal_exception, &throw_termination_exception, &throw_out_of_memory_exception, - frame_type, + mode, false, false); @@ -7892,7 +7948,7 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) { &throw_normal_exception, &throw_termination_exception, &throw_out_of_memory_exception, - frame_type, + mode, true, false); @@ -7903,7 +7959,7 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) { &throw_normal_exception, &throw_termination_exception, &throw_out_of_memory_exception, - frame_type, + mode, true, true); diff --git a/deps/v8/src/ia32/codegen-ia32.h b/deps/v8/src/ia32/codegen-ia32.h index 3669e9d10d..0e69a63d89 100644 --- a/deps/v8/src/ia32/codegen-ia32.h +++ b/deps/v8/src/ia32/codegen-ia32.h @@ -77,12 +77,12 @@ class Reference BASE_EMBEDDED { // Generate code to push the value of the reference on top of the // expression stack. The reference is expected to be already on top of // the expression stack, and it is left in place with its value above it. - void GetValue(TypeofState typeof_state); + void GetValue(); // Like GetValue except that the slot is expected to be written to before // being read from again. Thae value of the reference may be invalidated, // causing subsequent attempts to read it to fail. - void TakeValue(TypeofState typeof_state); + void TakeValue(); // Generate code to store the value on top of the expression stack in the // reference. The reference is expected to be immediately below the value @@ -241,28 +241,20 @@ class CodeGenState BASE_EMBEDDED { explicit CodeGenState(CodeGenerator* owner); // Create a code generator state based on a code generator's current - // state. The new state may or may not be inside a typeof, and has its - // own control destination. - CodeGenState(CodeGenerator* owner, - TypeofState typeof_state, - ControlDestination* destination); + // state. The new state has its own control destination. + CodeGenState(CodeGenerator* owner, ControlDestination* destination); // Destroy a code generator state and restore the owning code generator's // previous state. ~CodeGenState(); // Accessors for the state. - TypeofState typeof_state() const { return typeof_state_; } ControlDestination* destination() const { return destination_; } private: // The owning code generator. CodeGenerator* owner_; - // A flag indicating whether we are compiling the immediate subexpression - // of a typeof expression. - TypeofState typeof_state_; - // A control destination in case the expression has a control-flow // effect. ControlDestination* destination_; @@ -307,17 +299,12 @@ class CodeGenerator: public AstVisitor { static bool ShouldGenerateLog(Expression* type); #endif - static void SetFunctionInfo(Handle<JSFunction> fun, - FunctionLiteral* lit, - bool is_toplevel, - Handle<Script> script); - static void RecordPositions(MacroAssembler* masm, int pos); // Accessors MacroAssembler* masm() { return masm_; } - VirtualFrame* frame() const { return frame_; } + Handle<Script> script() { return script_; } bool has_valid_frame() const { return frame_ != NULL; } @@ -352,7 +339,6 @@ class CodeGenerator: public AstVisitor { void ProcessDeferred(); // State - TypeofState typeof_state() const { return state_->typeof_state(); } ControlDestination* destination() const { return state_->destination(); } // Track loop nesting level. @@ -412,18 +398,16 @@ class CodeGenerator: public AstVisitor { } void LoadCondition(Expression* x, - TypeofState typeof_state, ControlDestination* destination, bool force_control); - void Load(Expression* x, TypeofState typeof_state = NOT_INSIDE_TYPEOF); + void Load(Expression* expr); void LoadGlobal(); void LoadGlobalReceiver(); // Generate code to push the value of an expression on top of the frame // and then spill the frame fully to memory. This function is used // temporarily while the code generator is being transformed. - void LoadAndSpill(Expression* expression, - TypeofState typeof_state = NOT_INSIDE_TYPEOF); + void LoadAndSpill(Expression* expression); // Read a value from a slot and leave it on top of the expression stack. void LoadFromSlot(Slot* slot, TypeofState typeof_state); @@ -484,9 +468,11 @@ class CodeGenerator: public AstVisitor { // than 16 bits. static const int kMaxSmiInlinedBits = 16; bool IsUnsafeSmi(Handle<Object> value); - // Load an integer constant x into a register target using + // Load an integer constant x into a register target or into the stack using // at most 16 bits of user-controlled data per assembly operation. - void LoadUnsafeSmi(Register target, Handle<Object> value); + void MoveUnsafeSmi(Register target, Handle<Object> value); + void StoreUnsafeSmiToLocal(int offset, Handle<Object> value); + void PushUnsafeSmi(Handle<Object> value); void CallWithArguments(ZoneList<Expression*>* arguments, int position); @@ -511,8 +497,6 @@ class CodeGenerator: public AstVisitor { const InlineRuntimeLUT& new_entry, InlineRuntimeLUT* old_entry); - static Handle<Code> ComputeLazyCompile(int argc); - Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node); void ProcessDeclarations(ZoneList<Declaration*>* declarations); static Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop); @@ -574,6 +558,7 @@ class CodeGenerator: public AstVisitor { void CodeForFunctionPosition(FunctionLiteral* fun); void CodeForReturnPosition(FunctionLiteral* fun); void CodeForStatementPosition(Statement* stmt); + void CodeForDoWhileConditionPosition(DoWhileStatement* stmt); void CodeForSourcePosition(int pos); #ifdef DEBUG @@ -626,6 +611,27 @@ class CodeGenerator: public AstVisitor { }; +class CallFunctionStub: public CodeStub { + public: + CallFunctionStub(int argc, InLoopFlag in_loop) + : argc_(argc), in_loop_(in_loop) { } + + void Generate(MacroAssembler* masm); + + private: + int argc_; + InLoopFlag in_loop_; + +#ifdef DEBUG + void Print() { PrintF("CallFunctionStub (args %d)\n", argc_); } +#endif + + Major MajorKey() { return CallFunction; } + int MinorKey() { return argc_; } + InLoopFlag InLoop() { return in_loop_; } +}; + + class ToBooleanStub: public CodeStub { public: ToBooleanStub() { } @@ -655,7 +661,7 @@ class GenericBinaryOpStub: public CodeStub { flags_(flags), args_in_registers_(false), args_reversed_(false) { - use_sse3_ = CpuFeatures::IsSupported(CpuFeatures::SSE3); + use_sse3_ = CpuFeatures::IsSupported(SSE3); ASSERT(OpBits::is_valid(Token::NUM_TOKENS)); } diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc index 3e3ca73e6b..df5a28a54b 100644 --- a/deps/v8/src/ia32/disasm-ia32.cc +++ b/deps/v8/src/ia32/disasm-ia32.cc @@ -272,6 +272,17 @@ class DisassemblerIA32 { }; + enum ShiftOpcodeExtension { + kROL = 0, + kROR = 1, + kRCL = 2, + kRCR = 3, + kSHL = 4, + KSHR = 5, + kSAR = 7 + }; + + const char* NameOfCPURegister(int reg) const { return converter_.NameOfCPURegister(reg); } @@ -536,31 +547,22 @@ int DisassemblerIA32::D1D3C1Instruction(byte* data) { int num_bytes = 2; if (mod == 3) { const char* mnem = NULL; + switch (regop) { + case kROL: mnem = "rol"; break; + case kROR: mnem = "ror"; break; + case kRCL: mnem = "rcl"; break; + case kSHL: mnem = "shl"; break; + case KSHR: mnem = "shr"; break; + case kSAR: mnem = "sar"; break; + default: UnimplementedInstruction(); + } if (op == 0xD1) { imm8 = 1; - switch (regop) { - case edx: mnem = "rcl"; break; - case edi: mnem = "sar"; break; - case esp: mnem = "shl"; break; - default: UnimplementedInstruction(); - } } else if (op == 0xC1) { imm8 = *(data+2); num_bytes = 3; - switch (regop) { - case edx: mnem = "rcl"; break; - case esp: mnem = "shl"; break; - case ebp: mnem = "shr"; break; - case edi: mnem = "sar"; break; - default: UnimplementedInstruction(); - } } else if (op == 0xD3) { - switch (regop) { - case esp: mnem = "shl"; break; - case ebp: mnem = "shr"; break; - case edi: mnem = "sar"; break; - default: UnimplementedInstruction(); - } + // Shift/rotate by cl. } ASSERT_NE(NULL, mnem); AppendToBuffer("%s %s,", mnem, NameOfCPURegister(rm)); diff --git a/deps/v8/src/ia32/fast-codegen-ia32.cc b/deps/v8/src/ia32/fast-codegen-ia32.cc index 247f124962..a01d754e47 100644 --- a/deps/v8/src/ia32/fast-codegen-ia32.cc +++ b/deps/v8/src/ia32/fast-codegen-ia32.cc @@ -28,8 +28,10 @@ #include "v8.h" #include "codegen-inl.h" +#include "compiler.h" #include "fast-codegen.h" #include "parser.h" +#include "debug.h" namespace v8 { namespace internal { @@ -60,102 +62,341 @@ void FastCodeGenerator::Generate(FunctionLiteral* fun) { { Comment cmnt(masm_, "[ Allocate locals"); int locals_count = fun->scope()->num_stack_slots(); - for (int i = 0; i < locals_count; i++) { + if (locals_count == 1) { __ push(Immediate(Factory::undefined_value())); + } else if (locals_count > 1) { + __ mov(eax, Immediate(Factory::undefined_value())); + for (int i = 0; i < locals_count; i++) { + __ push(eax); + } + } + } + + bool function_in_register = true; + + Variable* arguments = fun->scope()->arguments()->AsVariable(); + if (arguments != NULL) { + // Function uses arguments object. + Comment cmnt(masm_, "[ Allocate arguments object"); + __ push(edi); + // Receiver is just before the parameters on the caller's stack. + __ lea(edx, Operand(ebp, StandardFrameConstants::kCallerSPOffset + + fun->num_parameters() * kPointerSize)); + __ push(edx); + __ push(Immediate(Smi::FromInt(fun->num_parameters()))); + // Arguments to ArgumentsAccessStub: + // function, receiver address, parameter count. + // The stub will rewrite receiever and parameter count if the previous + // stack frame was an arguments adapter frame. + ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT); + __ CallStub(&stub); + __ mov(Operand(ebp, SlotOffset(arguments->slot())), eax); + Slot* dot_arguments_slot = + fun->scope()->arguments_shadow()->AsVariable()->slot(); + __ mov(Operand(ebp, SlotOffset(dot_arguments_slot)), eax); + + function_in_register = false; + } + + // Possibly allocate a local context. + if (fun->scope()->num_heap_slots() > 0) { + Comment cmnt(masm_, "[ Allocate local context"); + if (function_in_register) { + // Argument to NewContext is the function, still in edi. + __ push(edi); + } else { + // Argument to NewContext is the function, no longer in edi. + __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); + } + __ CallRuntime(Runtime::kNewContext, 1); + // Context is returned in both eax and esi. It replaces the context + // passed to us. It's saved in the stack and kept live in esi. + __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi); +#ifdef DEBUG + // Assert we do not have to copy any parameters into the context. + for (int i = 0, len = fun->scope()->num_parameters(); i < len; i++) { + Slot* slot = fun->scope()->parameter(i)->slot(); + ASSERT(slot != NULL && slot->type() != Slot::CONTEXT); } +#endif + } + + { Comment cmnt(masm_, "[ Declarations"); + VisitDeclarations(fun->scope()->declarations()); } { Comment cmnt(masm_, "[ Stack check"); Label ok; - ExternalReference stack_guard_limit = - ExternalReference::address_of_stack_guard_limit(); - __ cmp(esp, Operand::StaticVariable(stack_guard_limit)); + ExternalReference stack_limit = + ExternalReference::address_of_stack_limit(); + __ cmp(esp, Operand::StaticVariable(stack_limit)); __ j(above_equal, &ok, taken); StackCheckStub stub; __ CallStub(&stub); __ bind(&ok); } - { Comment cmnt(masm_, "[ Declarations"); - VisitDeclarations(fun->scope()->declarations()); - } - if (FLAG_trace) { __ CallRuntime(Runtime::kTraceEnter, 0); } { Comment cmnt(masm_, "[ Body"); + ASSERT(loop_depth() == 0); VisitStatements(fun->body()); + ASSERT(loop_depth() == 0); } { Comment cmnt(masm_, "[ return <undefined>;"); - // Emit a 'return undefined' in case control fell off the end of the - // body. + // Emit a 'return undefined' in case control fell off the end of the body. __ mov(eax, Factory::undefined_value()); - SetReturnPosition(fun); + EmitReturnSequence(function_->end_position()); + } +} + +void FastCodeGenerator::EmitReturnSequence(int position) { + Comment cmnt(masm_, "[ Return sequence"); + if (return_label_.is_bound()) { + __ jmp(&return_label_); + } else { + // Common return label + __ bind(&return_label_); if (FLAG_trace) { __ push(eax); __ CallRuntime(Runtime::kTraceExit, 1); } +#ifdef DEBUG + // Add a label for checking the size of the code used for returning. + Label check_exit_codesize; + masm_->bind(&check_exit_codesize); +#endif + CodeGenerator::RecordPositions(masm_, position); __ RecordJSReturn(); // Do not use the leave instruction here because it is too short to // patch with the code required by the debugger. __ mov(esp, ebp); __ pop(ebp); - __ ret((fun->scope()->num_parameters() + 1) * kPointerSize); + __ ret((function_->scope()->num_parameters() + 1) * kPointerSize); +#ifdef ENABLE_DEBUGGER_SUPPORT + // Check that the size of the code used for returning matches what is + // expected by the debugger. + ASSERT_EQ(Debug::kIa32JSReturnSequenceLength, + masm_->SizeOfCodeGeneratedSince(&check_exit_codesize)); +#endif } } -void FastCodeGenerator::Move(Location destination, Slot* source) { - switch (destination.type()) { - case Location::kUninitialized: +void FastCodeGenerator::Move(Expression::Context context, Register source) { + switch (context) { + case Expression::kUninitialized: UNREACHABLE(); - case Location::kEffect: + case Expression::kEffect: break; - case Location::kValue: - __ push(Operand(ebp, SlotOffset(source))); + case Expression::kValue: + __ push(source); + break; + case Expression::kTest: + TestAndBranch(source, true_label_, false_label_); break; + case Expression::kValueTest: { + Label discard; + __ push(source); + TestAndBranch(source, true_label_, &discard); + __ bind(&discard); + __ add(Operand(esp), Immediate(kPointerSize)); + __ jmp(false_label_); + break; + } + case Expression::kTestValue: { + Label discard; + __ push(source); + TestAndBranch(source, &discard, false_label_); + __ bind(&discard); + __ add(Operand(esp), Immediate(kPointerSize)); + __ jmp(true_label_); + } } } -void FastCodeGenerator::Move(Location destination, Literal* expr) { - switch (destination.type()) { - case Location::kUninitialized: +void FastCodeGenerator::Move(Expression::Context context, Slot* source) { + switch (context) { + case Expression::kUninitialized: UNREACHABLE(); - case Location::kEffect: + case Expression::kEffect: break; - case Location::kValue: - __ push(Immediate(expr->handle())); + case Expression::kValue: + __ push(Operand(ebp, SlotOffset(source))); + break; + case Expression::kTest: // Fall through. + case Expression::kValueTest: // Fall through. + case Expression::kTestValue: + __ mov(eax, Operand(ebp, SlotOffset(source))); + Move(context, eax); break; } } -void FastCodeGenerator::Move(Slot* destination, Location source) { - switch (source.type()) { - case Location::kUninitialized: // Fall through. - case Location::kEffect: +void FastCodeGenerator::Move(Expression::Context context, Literal* expr) { + switch (context) { + case Expression::kUninitialized: UNREACHABLE(); - case Location::kValue: - __ pop(Operand(ebp, SlotOffset(destination))); + case Expression::kEffect: + break; + case Expression::kValue: + __ push(Immediate(expr->handle())); + break; + case Expression::kTest: // Fall through. + case Expression::kValueTest: // Fall through. + case Expression::kTestValue: + __ mov(eax, expr->handle()); + Move(context, eax); break; } } -void FastCodeGenerator::DropAndMove(Location destination, Register source) { - switch (destination.type()) { - case Location::kUninitialized: +void FastCodeGenerator::DropAndMove(Expression::Context context, + Register source) { + switch (context) { + case Expression::kUninitialized: UNREACHABLE(); - case Location::kEffect: + case Expression::kEffect: + __ add(Operand(esp), Immediate(kPointerSize)); + break; + case Expression::kValue: + __ mov(Operand(esp, 0), source); + break; + case Expression::kTest: + ASSERT(!source.is(esp)); __ add(Operand(esp), Immediate(kPointerSize)); + TestAndBranch(source, true_label_, false_label_); break; - case Location::kValue: + case Expression::kValueTest: { + Label discard; + __ mov(Operand(esp, 0), source); + TestAndBranch(source, true_label_, &discard); + __ bind(&discard); + __ add(Operand(esp), Immediate(kPointerSize)); + __ jmp(false_label_); + break; + } + case Expression::kTestValue: { + Label discard; __ mov(Operand(esp, 0), source); + TestAndBranch(source, &discard, false_label_); + __ bind(&discard); + __ add(Operand(esp), Immediate(kPointerSize)); + __ jmp(true_label_); + break; + } + } +} + + +void FastCodeGenerator::TestAndBranch(Register source, + Label* true_label, + Label* false_label) { + ASSERT_NE(NULL, true_label); + ASSERT_NE(NULL, false_label); + // Use the shared ToBoolean stub to compile the value in the register into + // control flow to the code generator's true and false labels. Perform + // the fast checks assumed by the stub. + __ cmp(source, Factory::undefined_value()); // The undefined value is false. + __ j(equal, false_label); + __ cmp(source, Factory::true_value()); // True is true. + __ j(equal, true_label); + __ cmp(source, Factory::false_value()); // False is false. + __ j(equal, false_label); + ASSERT_EQ(0, kSmiTag); + __ test(source, Operand(source)); // The smi zero is false. + __ j(zero, false_label); + __ test(source, Immediate(kSmiTagMask)); // All other smis are true. + __ j(zero, true_label); + + // Call the stub for all other cases. + __ push(source); + ToBooleanStub stub; + __ CallStub(&stub); + __ test(eax, Operand(eax)); // The stub returns nonzero for true. + __ j(not_zero, true_label); + __ jmp(false_label); +} + + +void FastCodeGenerator::VisitDeclaration(Declaration* decl) { + Comment cmnt(masm_, "[ Declaration"); + Variable* var = decl->proxy()->var(); + ASSERT(var != NULL); // Must have been resolved. + Slot* slot = var->slot(); + ASSERT(slot != NULL); // No global declarations here. + + // We have 3 cases for slots: LOOKUP, LOCAL, CONTEXT. + switch (slot->type()) { + case Slot::LOOKUP: { + __ push(esi); + __ push(Immediate(var->name())); + // Declaration nodes are always introduced in one of two modes. + ASSERT(decl->mode() == Variable::VAR || decl->mode() == Variable::CONST); + PropertyAttributes attr = + (decl->mode() == Variable::VAR) ? NONE : READ_ONLY; + __ push(Immediate(Smi::FromInt(attr))); + // Push initial value, if any. + // Note: For variables we must not push an initial value (such as + // 'undefined') because we may have a (legal) redeclaration and we + // must not destroy the current value. + if (decl->mode() == Variable::CONST) { + __ push(Immediate(Factory::the_hole_value())); + } else if (decl->fun() != NULL) { + Visit(decl->fun()); + } else { + __ push(Immediate(Smi::FromInt(0))); // No initial value! + } + __ CallRuntime(Runtime::kDeclareContextSlot, 4); + break; + } + case Slot::LOCAL: + if (decl->mode() == Variable::CONST) { + __ mov(Operand(ebp, SlotOffset(var->slot())), + Immediate(Factory::the_hole_value())); + } else if (decl->fun() != NULL) { + Visit(decl->fun()); + __ pop(Operand(ebp, SlotOffset(var->slot()))); + } + break; + case Slot::CONTEXT: + // The variable in the decl always resides in the current context. + ASSERT(function_->scope()->ContextChainLength(slot->var()->scope()) == 0); + if (decl->mode() == Variable::CONST) { + __ mov(eax, Immediate(Factory::the_hole_value())); + if (FLAG_debug_code) { + // Check if we have the correct context pointer. + __ mov(ebx, + CodeGenerator::ContextOperand(esi, Context::FCONTEXT_INDEX)); + __ cmp(ebx, Operand(esi)); + __ Check(equal, "Unexpected declaration in current context."); + } + __ mov(CodeGenerator::ContextOperand(esi, slot->index()), eax); + // No write barrier since the_hole_value is in old space. + } else if (decl->fun() != NULL) { + Visit(decl->fun()); + __ pop(eax); + if (FLAG_debug_code) { + // Check if we have the correct context pointer. + __ mov(ebx, + CodeGenerator::ContextOperand(esi, Context::FCONTEXT_INDEX)); + __ cmp(ebx, Operand(esi)); + __ Check(equal, "Unexpected declaration in current context."); + } + __ mov(CodeGenerator::ContextOperand(esi, slot->index()), eax); + int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize; + __ RecordWrite(esi, offset, eax, ecx); + } break; + default: + UNREACHABLE(); } } @@ -172,27 +413,15 @@ void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) { void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) { Comment cmnt(masm_, "[ ReturnStatement"); - SetStatementPosition(stmt); Expression* expr = stmt->expression(); - // Complete the statement based on the type of the subexpression. if (expr->AsLiteral() != NULL) { __ mov(eax, expr->AsLiteral()->handle()); } else { + ASSERT_EQ(Expression::kValue, expr->context()); Visit(expr); - Move(eax, expr->location()); - } - - if (FLAG_trace) { - __ push(eax); - __ CallRuntime(Runtime::kTraceExit, 1); + __ pop(eax); } - __ RecordJSReturn(); - - // Do not use the leave instruction here because it is too short to - // patch with the code required by the debugger. - __ mov(esp, ebp); - __ pop(ebp); - __ ret((function_->scope()->num_parameters() + 1) * kPointerSize); + EmitReturnSequence(stmt->statement_pos()); } @@ -200,7 +429,8 @@ void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) { Comment cmnt(masm_, "[ FunctionLiteral"); // Build the function boilerplate and instantiate it. - Handle<JSFunction> boilerplate = BuildBoilerplate(expr); + Handle<JSFunction> boilerplate = + Compiler::BuildBoilerplate(expr, script_, this); if (HasStackOverflow()) return; ASSERT(boilerplate->IsBoilerplate()); @@ -209,7 +439,7 @@ void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) { __ push(esi); __ push(Immediate(boilerplate)); __ CallRuntime(Runtime::kNewClosure, 2); - Move(expr->location(), eax); + Move(expr->context(), eax); } @@ -217,6 +447,7 @@ void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) { Comment cmnt(masm_, "[ VariableProxy"); Expression* rewrite = expr->var()->rewrite(); if (rewrite == NULL) { + ASSERT(expr->var()->is_global()); Comment cmnt(masm_, "Global variable"); // Use inline caching. Variable name is passed in ecx and the global // object on the stack. @@ -230,16 +461,62 @@ void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) { // (eg, push/pop elimination). __ nop(); - DropAndMove(expr->location(), eax); + DropAndMove(expr->context(), eax); + } else if (rewrite->AsSlot() != NULL) { + Slot* slot = rewrite->AsSlot(); + switch (slot->type()) { + case Slot::LOCAL: + case Slot::PARAMETER: { + Comment cmnt(masm_, "Stack slot"); + Move(expr->context(), slot); + break; + } + + case Slot::CONTEXT: { + Comment cmnt(masm_, "Context slot"); + int chain_length = + function_->scope()->ContextChainLength(slot->var()->scope()); + if (chain_length > 0) { + // Move up the chain of contexts to the context containing the slot. + __ mov(eax, + Operand(esi, Context::SlotOffset(Context::CLOSURE_INDEX))); + // Load the function context (which is the incoming, outer context). + __ mov(eax, FieldOperand(eax, JSFunction::kContextOffset)); + for (int i = 1; i < chain_length; i++) { + __ mov(eax, + Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX))); + __ mov(eax, FieldOperand(eax, JSFunction::kContextOffset)); + } + // The context may be an intermediate context, not a function context. + __ mov(eax, + Operand(eax, Context::SlotOffset(Context::FCONTEXT_INDEX))); + } else { // Slot is in the current function context. + // The context may be an intermediate context, not a function context. + __ mov(eax, + Operand(esi, Context::SlotOffset(Context::FCONTEXT_INDEX))); + } + __ mov(eax, Operand(eax, Context::SlotOffset(slot->index()))); + Move(expr->context(), eax); + break; + } + + case Slot::LOOKUP: + UNREACHABLE(); + break; + } } else { - Comment cmnt(masm_, "Stack slot"); - Move(expr->location(), rewrite->AsSlot()); + // The parameter variable has been rewritten into an explict access to + // the arguments object. + Property* property = rewrite->AsProperty(); + ASSERT_NOT_NULL(property); + ASSERT_EQ(expr->context(), property->context()); + Visit(property); } } void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) { - Comment cmnt(masm_, "[ RegExp Literal"); + Comment cmnt(masm_, "[ RegExpLiteral"); Label done; // Registers will be used as follows: // edi = JS function. @@ -261,7 +538,7 @@ void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) { __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4); // Label done: __ bind(&done); - Move(expr->location(), eax); + Move(expr->context(), eax); } @@ -318,7 +595,8 @@ void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { case ObjectLiteral::Property::COMPUTED: if (key->handle()->IsSymbol()) { Visit(value); - Move(eax, value->location()); + ASSERT_EQ(Expression::kValue, value->context()); + __ pop(eax); __ mov(ecx, Immediate(key->handle())); Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); __ call(ic, RelocInfo::CODE_TARGET); @@ -329,9 +607,9 @@ void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { case ObjectLiteral::Property::PROTOTYPE: __ push(eax); Visit(key); - ASSERT(key->location().is_value()); + ASSERT_EQ(Expression::kValue, key->context()); Visit(value); - ASSERT(value->location().is_value()); + ASSERT_EQ(Expression::kValue, value->context()); __ CallRuntime(Runtime::kSetProperty, 3); __ mov(eax, Operand(esp, 0)); // Restore result into eax. break; @@ -339,27 +617,49 @@ void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { case ObjectLiteral::Property::GETTER: __ push(eax); Visit(key); - ASSERT(key->location().is_value()); + ASSERT_EQ(Expression::kValue, key->context()); __ push(Immediate(property->kind() == ObjectLiteral::Property::SETTER ? Smi::FromInt(1) : Smi::FromInt(0))); Visit(value); - ASSERT(value->location().is_value()); + ASSERT_EQ(Expression::kValue, value->context()); __ CallRuntime(Runtime::kDefineAccessor, 4); __ mov(eax, Operand(esp, 0)); // Restore result into eax. break; default: UNREACHABLE(); } } - switch (expr->location().type()) { - case Location::kUninitialized: + switch (expr->context()) { + case Expression::kUninitialized: UNREACHABLE(); - case Location::kEffect: + case Expression::kEffect: if (result_saved) __ add(Operand(esp), Immediate(kPointerSize)); break; - case Location::kValue: + case Expression::kValue: if (!result_saved) __ push(eax); break; + case Expression::kTest: + if (result_saved) __ pop(eax); + TestAndBranch(eax, true_label_, false_label_); + break; + case Expression::kValueTest: { + Label discard; + if (!result_saved) __ push(eax); + TestAndBranch(eax, true_label_, &discard); + __ bind(&discard); + __ add(Operand(esp), Immediate(kPointerSize)); + __ jmp(false_label_); + break; + } + case Expression::kTestValue: { + Label discard; + if (!result_saved) __ push(eax); + TestAndBranch(eax, &discard, false_label_); + __ bind(&discard); + __ add(Operand(esp), Immediate(kPointerSize)); + __ jmp(true_label_); + break; + } } } @@ -412,7 +712,7 @@ void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { result_saved = true; } Visit(subexpr); - ASSERT(subexpr->location().is_value()); + ASSERT_EQ(Expression::kValue, subexpr->context()); // Store the subexpression value in the array's elements. __ pop(eax); // Subexpression value. @@ -425,80 +725,218 @@ void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { __ RecordWrite(ebx, offset, eax, ecx); } - switch (expr->location().type()) { - case Location::kUninitialized: + switch (expr->context()) { + case Expression::kUninitialized: UNREACHABLE(); - case Location::kEffect: + case Expression::kEffect: if (result_saved) __ add(Operand(esp), Immediate(kPointerSize)); break; - case Location::kValue: + case Expression::kValue: + if (!result_saved) __ push(eax); + break; + case Expression::kTest: + if (result_saved) __ pop(eax); + TestAndBranch(eax, true_label_, false_label_); + break; + case Expression::kValueTest: { + Label discard; if (!result_saved) __ push(eax); + TestAndBranch(eax, true_label_, &discard); + __ bind(&discard); + __ add(Operand(esp), Immediate(kPointerSize)); + __ jmp(false_label_); + break; + } + case Expression::kTestValue: { + Label discard; + if (!result_saved) __ push(eax); + TestAndBranch(eax, &discard, false_label_); + __ bind(&discard); + __ add(Operand(esp), Immediate(kPointerSize)); + __ jmp(true_label_); break; + } } } -void FastCodeGenerator::VisitAssignment(Assignment* expr) { - Comment cmnt(masm_, "[ Assignment"); - ASSERT(expr->op() == Token::ASSIGN || expr->op() == Token::INIT_VAR); - - // Left-hand side can only be a global or a (parameter or local) slot. +void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) { Variable* var = expr->target()->AsVariableProxy()->AsVariable(); ASSERT(var != NULL); - ASSERT(var->is_global() || var->slot() != NULL); - Expression* rhs = expr->value(); if (var->is_global()) { - // Assignment to a global variable, use inline caching. Right-hand-side - // value is passed in eax, variable name in ecx, and the global object - // on the stack. - - // Code for the right-hand-side expression depends on its type. - if (rhs->AsLiteral() != NULL) { - __ mov(eax, rhs->AsLiteral()->handle()); - } else { - ASSERT(rhs->location().is_value()); - Visit(rhs); - __ pop(eax); - } + // Assignment to a global variable. Use inline caching for the + // assignment. Right-hand-side value is passed in eax, variable name in + // ecx, and the global object on the stack. + __ pop(eax); __ mov(ecx, var->name()); __ push(CodeGenerator::GlobalObject()); Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); __ call(ic, RelocInfo::CODE_TARGET); - // Overwrite the global object on the stack with the result if needed. - DropAndMove(expr->location(), eax); + // Overwrite the receiver on the stack with the result if needed. + DropAndMove(expr->context(), eax); + } else { - // Local or parameter assignment. - - // Code for the right-hand side expression depends on its type. - if (rhs->AsLiteral() != NULL) { - // Two cases: 'temp <- (var = constant)', or 'var = constant' with a - // discarded result. Always perform the assignment. - __ mov(eax, rhs->AsLiteral()->handle()); - __ mov(Operand(ebp, SlotOffset(var->slot())), eax); - Move(expr->location(), eax); - } else { - ASSERT(rhs->location().is_value()); - Visit(rhs); - switch (expr->location().type()) { - case Location::kUninitialized: - UNREACHABLE(); - case Location::kEffect: - // Case 'var = temp'. Discard right-hand-side temporary. - Move(var->slot(), rhs->location()); - break; - case Location::kValue: - // Case 'temp1 <- (var = temp0)'. Preserve right-hand-side - // temporary on the stack. - __ mov(eax, Operand(esp, 0)); - __ mov(Operand(ebp, SlotOffset(var->slot())), eax); - break; + Slot* slot = var->slot(); + ASSERT_NOT_NULL(slot); // Variables rewritten as properties not handled. + switch (slot->type()) { + case Slot::LOCAL: + case Slot::PARAMETER: { + switch (expr->context()) { + case Expression::kUninitialized: + UNREACHABLE(); + case Expression::kEffect: + // Perform assignment and discard value. + __ pop(Operand(ebp, SlotOffset(var->slot()))); + break; + case Expression::kValue: + // Perform assignment and preserve value. + __ mov(eax, Operand(esp, 0)); + __ mov(Operand(ebp, SlotOffset(var->slot())), eax); + break; + case Expression::kTest: + // Perform assignment and test (and discard) value. + __ pop(eax); + __ mov(Operand(ebp, SlotOffset(var->slot())), eax); + TestAndBranch(eax, true_label_, false_label_); + break; + case Expression::kValueTest: { + Label discard; + __ mov(eax, Operand(esp, 0)); + __ mov(Operand(ebp, SlotOffset(var->slot())), eax); + TestAndBranch(eax, true_label_, &discard); + __ bind(&discard); + __ add(Operand(esp), Immediate(kPointerSize)); + __ jmp(false_label_); + break; + } + case Expression::kTestValue: { + Label discard; + __ mov(eax, Operand(esp, 0)); + __ mov(Operand(ebp, SlotOffset(var->slot())), eax); + TestAndBranch(eax, &discard, false_label_); + __ bind(&discard); + __ add(Operand(esp), Immediate(kPointerSize)); + __ jmp(true_label_); + break; + } + } + break; + } + + case Slot::CONTEXT: { + int chain_length = + function_->scope()->ContextChainLength(slot->var()->scope()); + if (chain_length > 0) { + // Move up the context chain to the context containing the slot. + __ mov(eax, + Operand(esi, Context::SlotOffset(Context::CLOSURE_INDEX))); + // Load the function context (which is the incoming, outer context). + __ mov(eax, FieldOperand(eax, JSFunction::kContextOffset)); + for (int i = 1; i < chain_length; i++) { + __ mov(eax, + Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX))); + __ mov(eax, FieldOperand(eax, JSFunction::kContextOffset)); + } + } else { // Slot is in the current context. Generate optimized code. + __ mov(eax, esi); // RecordWrite destroys the object register. + } + if (FLAG_debug_code) { + __ cmp(eax, + Operand(eax, Context::SlotOffset(Context::FCONTEXT_INDEX))); + __ Check(equal, "Context Slot chain length wrong."); + } + __ pop(ecx); + __ mov(Operand(eax, Context::SlotOffset(slot->index())), ecx); + + // RecordWrite may destroy all its register arguments. + if (expr->context() == Expression::kValue) { + __ push(ecx); + } else if (expr->context() != Expression::kEffect) { + __ mov(edx, ecx); + } + int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize; + __ RecordWrite(eax, offset, ecx, ebx); + if (expr->context() != Expression::kEffect && + expr->context() != Expression::kValue) { + Move(expr->context(), edx); + } + break; } + + case Slot::LOOKUP: + UNREACHABLE(); + break; } } } +void FastCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) { + // Assignment to a property, using a named store IC. + Property* prop = expr->target()->AsProperty(); + ASSERT(prop != NULL); + ASSERT(prop->key()->AsLiteral() != NULL); + + // If the assignment starts a block of assignments to the same object, + // change to slow case to avoid the quadratic behavior of repeatedly + // adding fast properties. + if (expr->starts_initialization_block()) { + __ push(Operand(esp, kPointerSize)); // Receiver is under value. + __ CallRuntime(Runtime::kToSlowProperties, 1); + } + + __ pop(eax); + __ mov(ecx, prop->key()->AsLiteral()->handle()); + Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); + __ call(ic, RelocInfo::CODE_TARGET); + + // If the assignment ends an initialization block, revert to fast case. + if (expr->ends_initialization_block()) { + __ push(eax); // Result of assignment, saved even if not needed. + __ push(Operand(esp, kPointerSize)); // Receiver is under value. + __ CallRuntime(Runtime::kToFastProperties, 1); + __ pop(eax); + } + + DropAndMove(expr->context(), eax); +} + + +void FastCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) { + // Assignment to a property, using a keyed store IC. + + // If the assignment starts a block of assignments to the same object, + // change to slow case to avoid the quadratic behavior of repeatedly + // adding fast properties. + if (expr->starts_initialization_block()) { + // Reciever is under the key and value. + __ push(Operand(esp, 2 * kPointerSize)); + __ CallRuntime(Runtime::kToSlowProperties, 1); + } + + __ pop(eax); + Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); + __ call(ic, RelocInfo::CODE_TARGET); + // This nop signals to the IC that there is no inlined code at the call + // site for it to patch. + __ nop(); + + // If the assignment ends an initialization block, revert to fast case. + if (expr->ends_initialization_block()) { + __ push(eax); // Result of assignment, saved even if not needed. + // Reciever is under the key and value. + __ push(Operand(esp, 2 * kPointerSize)); + __ CallRuntime(Runtime::kToFastProperties, 1); + __ pop(eax); + } + + // Receiver and key are still on stack. + __ add(Operand(esp), Immediate(2 * kPointerSize)); + Move(expr->context(), eax); +} + + void FastCodeGenerator::VisitProperty(Property* expr) { Comment cmnt(masm_, "[ Property"); Expression* key = expr->key(); @@ -531,72 +969,147 @@ void FastCodeGenerator::VisitProperty(Property* expr) { // Drop key left on the stack by IC. __ add(Operand(esp), Immediate(kPointerSize)); } - switch (expr->location().type()) { - case Location::kUninitialized: - UNREACHABLE(); - case Location::kValue: - __ mov(Operand(esp, 0), eax); - break; - case Location::kEffect: - __ add(Operand(esp), Immediate(kPointerSize)); - break; - } + DropAndMove(expr->context(), eax); } -void FastCodeGenerator::VisitCall(Call* expr) { - Expression* fun = expr->expression(); +void FastCodeGenerator::EmitCallWithIC(Call* expr, RelocInfo::Mode reloc_info) { + // Code common for calls using the IC. ZoneList<Expression*>* args = expr->arguments(); - Variable* var = fun->AsVariableProxy()->AsVariable(); - ASSERT(var != NULL && !var->is_this() && var->is_global()); - ASSERT(!var->is_possibly_eval()); - - __ push(Immediate(var->name())); - // Push global object (receiver). - __ push(CodeGenerator::GlobalObject()); int arg_count = args->length(); for (int i = 0; i < arg_count; i++) { Visit(args->at(i)); - ASSERT(args->at(i)->location().is_value()); + ASSERT_EQ(Expression::kValue, args->at(i)->context()); } - // Record source position for debugger + // Record source position for debugger. SetSourcePosition(expr->position()); // Call the IC initialization code. Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count, NOT_IN_LOOP); - __ call(ic, RelocInfo::CODE_TARGET_CONTEXT); + __ call(ic, reloc_info); + // Restore context register. + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); + // Discard the function left on TOS. + DropAndMove(expr->context(), eax); +} + + +void FastCodeGenerator::EmitCallWithStub(Call* expr) { + // Code common for calls using the call stub. + ZoneList<Expression*>* args = expr->arguments(); + int arg_count = args->length(); + for (int i = 0; i < arg_count; i++) { + Visit(args->at(i)); + } + // Record source position for debugger. + SetSourcePosition(expr->position()); + CallFunctionStub stub(arg_count, NOT_IN_LOOP); + __ CallStub(&stub); // Restore context register. __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); // Discard the function left on TOS. - DropAndMove(expr->location(), eax); + DropAndMove(expr->context(), eax); +} + + +void FastCodeGenerator::VisitCall(Call* expr) { + Comment cmnt(masm_, "[ Call"); + Expression* fun = expr->expression(); + Variable* var = fun->AsVariableProxy()->AsVariable(); + + if (var != NULL && var->is_possibly_eval()) { + // Call to the identifier 'eval'. + UNREACHABLE(); + } else if (var != NULL && !var->is_this() && var->is_global()) { + // Call to a global variable. + __ push(Immediate(var->name())); + // Push global object as receiver for the call IC lookup. + __ push(CodeGenerator::GlobalObject()); + EmitCallWithIC(expr, RelocInfo::CODE_TARGET_CONTEXT); + } else if (var != NULL && var->slot() != NULL && + var->slot()->type() == Slot::LOOKUP) { + // Call to a lookup slot. + UNREACHABLE(); + } else if (fun->AsProperty() != NULL) { + // Call to an object property. + Property* prop = fun->AsProperty(); + Literal* key = prop->key()->AsLiteral(); + if (key != NULL && key->handle()->IsSymbol()) { + // Call to a named property, use call IC. + __ push(Immediate(key->handle())); + Visit(prop->obj()); + EmitCallWithIC(expr, RelocInfo::CODE_TARGET); + } else { + // Call to a keyed property, use keyed load IC followed by function + // call. + Visit(prop->obj()); + Visit(prop->key()); + // Record source code position for IC call. + SetSourcePosition(prop->position()); + Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); + __ call(ic, RelocInfo::CODE_TARGET); + // By emitting a nop we make sure that we do not have a "test eax,..." + // instruction after the call it is treated specially by the LoadIC code. + __ nop(); + // Drop key left on the stack by IC. + __ add(Operand(esp), Immediate(kPointerSize)); + // Pop receiver. + __ pop(ebx); + // Push result (function). + __ push(eax); + // Push receiver object on stack. + if (prop->is_synthetic()) { + __ push(CodeGenerator::GlobalObject()); + } else { + __ push(ebx); + } + EmitCallWithStub(expr); + } + } else { + // Call to some other expression. If the expression is an anonymous + // function literal not called in a loop, mark it as one that should + // also use the fast code generator. + FunctionLiteral* lit = fun->AsFunctionLiteral(); + if (lit != NULL && + lit->name()->Equals(Heap::empty_string()) && + loop_depth() == 0) { + lit->set_try_fast_codegen(true); + } + Visit(fun); + // Load global receiver object. + __ mov(ebx, CodeGenerator::GlobalObject()); + __ push(FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset)); + // Emit function call. + EmitCallWithStub(expr); + } } -void FastCodeGenerator::VisitCallNew(CallNew* node) { +void FastCodeGenerator::VisitCallNew(CallNew* expr) { Comment cmnt(masm_, "[ CallNew"); // According to ECMA-262, section 11.2.2, page 44, the function // expression in new calls must be evaluated before the // arguments. // Push function on the stack. - Visit(node->expression()); - ASSERT(node->expression()->location().is_value()); + Visit(expr->expression()); + ASSERT_EQ(Expression::kValue, expr->expression()->context()); // Push global object (receiver). __ push(CodeGenerator::GlobalObject()); // Push the arguments ("left-to-right") on the stack. - ZoneList<Expression*>* args = node->arguments(); + ZoneList<Expression*>* args = expr->arguments(); int arg_count = args->length(); for (int i = 0; i < arg_count; i++) { Visit(args->at(i)); - ASSERT(args->at(i)->location().is_value()); + ASSERT_EQ(Expression::kValue, args->at(i)->context()); // If location is value, it is already on the stack, // so nothing to do here. } // Call the construct call builtin that handles allocation and // constructor invocation. - SetSourcePosition(node->position()); + SetSourcePosition(expr->position()); // Load function, arg_count into edi and eax. __ Set(eax, Immediate(arg_count)); @@ -607,7 +1120,7 @@ void FastCodeGenerator::VisitCallNew(CallNew* node) { __ call(construct_builtin, RelocInfo::CONSTRUCT_CALL); // Replace function on TOS with result in eax, or pop it. - DropAndMove(node->location(), eax); + DropAndMove(expr->context(), eax); } @@ -622,19 +1135,221 @@ void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) { int arg_count = args->length(); for (int i = 0; i < arg_count; i++) { Visit(args->at(i)); - ASSERT(args->at(i)->location().is_value()); + ASSERT_EQ(Expression::kValue, args->at(i)->context()); } __ CallRuntime(function, arg_count); - Move(expr->location(), eax); + Move(expr->context(), eax); +} + + +void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { + switch (expr->op()) { + case Token::VOID: { + Comment cmnt(masm_, "[ UnaryOperation (VOID)"); + Visit(expr->expression()); + ASSERT_EQ(Expression::kEffect, expr->expression()->context()); + switch (expr->context()) { + case Expression::kUninitialized: + UNREACHABLE(); + break; + case Expression::kEffect: + break; + case Expression::kValue: + __ push(Immediate(Factory::undefined_value())); + break; + case Expression::kTestValue: + // Value is false so it's needed. + __ push(Immediate(Factory::undefined_value())); + // Fall through. + case Expression::kTest: // Fall through. + case Expression::kValueTest: + __ jmp(false_label_); + break; + } + break; + } + + case Token::NOT: { + Comment cmnt(masm_, "[ UnaryOperation (NOT)"); + ASSERT_EQ(Expression::kTest, expr->expression()->context()); + + Label push_true; + Label push_false; + Label done; + Label* saved_true = true_label_; + Label* saved_false = false_label_; + switch (expr->context()) { + case Expression::kUninitialized: + UNREACHABLE(); + break; + + case Expression::kValue: + true_label_ = &push_false; + false_label_ = &push_true; + Visit(expr->expression()); + __ bind(&push_true); + __ push(Immediate(Factory::true_value())); + __ jmp(&done); + __ bind(&push_false); + __ push(Immediate(Factory::false_value())); + __ bind(&done); + break; + + case Expression::kEffect: + true_label_ = &done; + false_label_ = &done; + Visit(expr->expression()); + __ bind(&done); + break; + + case Expression::kTest: + true_label_ = saved_false; + false_label_ = saved_true; + Visit(expr->expression()); + break; + + case Expression::kValueTest: + true_label_ = saved_false; + false_label_ = &push_true; + Visit(expr->expression()); + __ bind(&push_true); + __ push(Immediate(Factory::true_value())); + __ jmp(saved_true); + break; + + case Expression::kTestValue: + true_label_ = &push_false; + false_label_ = saved_true; + Visit(expr->expression()); + __ bind(&push_false); + __ push(Immediate(Factory::false_value())); + __ jmp(saved_false); + break; + } + true_label_ = saved_true; + false_label_ = saved_false; + break; + } + + case Token::TYPEOF: { + Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)"); + ASSERT_EQ(Expression::kValue, expr->expression()->context()); + + VariableProxy* proxy = expr->expression()->AsVariableProxy(); + if (proxy != NULL && + !proxy->var()->is_this() && + proxy->var()->is_global()) { + Comment cmnt(masm_, "Global variable"); + __ push(CodeGenerator::GlobalObject()); + __ mov(ecx, Immediate(proxy->name())); + Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize)); + // Use a regular load, not a contextual load, to avoid a reference + // error. + __ call(ic, RelocInfo::CODE_TARGET); + __ mov(Operand(esp, 0), eax); + } else if (proxy != NULL && + proxy->var()->slot() != NULL && + proxy->var()->slot()->type() == Slot::LOOKUP) { + __ push(esi); + __ push(Immediate(proxy->name())); + __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2); + __ push(eax); + } else { + // This expression cannot throw a reference error at the top level. + Visit(expr->expression()); + } + + __ CallRuntime(Runtime::kTypeof, 1); + Move(expr->context(), eax); + break; + } + + default: + UNREACHABLE(); + } +} + + +void FastCodeGenerator::VisitCountOperation(CountOperation* expr) { + Comment cmnt(masm_, "[ CountOperation"); + VariableProxy* proxy = expr->expression()->AsVariableProxy(); + ASSERT(proxy->AsVariable() != NULL); + ASSERT(proxy->AsVariable()->is_global()); + + Visit(proxy); + __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION); + + switch (expr->context()) { + case Expression::kUninitialized: + UNREACHABLE(); + case Expression::kValue: // Fall through + case Expression::kTest: // Fall through + case Expression::kTestValue: // Fall through + case Expression::kValueTest: + // Duplicate the result on the stack. + __ push(eax); + break; + case Expression::kEffect: + // Do not save result. + break; + } + // Call runtime for +1/-1. + __ push(eax); + __ push(Immediate(Smi::FromInt(1))); + if (expr->op() == Token::INC) { + __ CallRuntime(Runtime::kNumberAdd, 2); + } else { + __ CallRuntime(Runtime::kNumberSub, 2); + } + // Call Store IC. + __ mov(ecx, proxy->AsVariable()->name()); + __ push(CodeGenerator::GlobalObject()); + Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); + __ call(ic, RelocInfo::CODE_TARGET); + // Restore up stack after store IC. + __ add(Operand(esp), Immediate(kPointerSize)); + + switch (expr->context()) { + case Expression::kUninitialized: + UNREACHABLE(); + case Expression::kEffect: // Fall through + case Expression::kValue: + // Do nothing. Result in either on the stack for value context + // or discarded for effect context. + break; + case Expression::kTest: + __ pop(eax); + TestAndBranch(eax, true_label_, false_label_); + break; + case Expression::kValueTest: { + Label discard; + __ mov(eax, Operand(esp, 0)); + TestAndBranch(eax, true_label_, &discard); + __ bind(&discard); + __ add(Operand(esp), Immediate(kPointerSize)); + __ jmp(false_label_); + break; + } + case Expression::kTestValue: { + Label discard; + __ mov(eax, Operand(esp, 0)); + TestAndBranch(eax, &discard, false_label_); + __ bind(&discard); + __ add(Operand(esp), Immediate(kPointerSize)); + __ jmp(true_label_); + break; + } + } } void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) { + Comment cmnt(masm_, "[ BinaryOperation"); switch (expr->op()) { case Token::COMMA: - ASSERT(expr->left()->location().is_effect()); - ASSERT_EQ(expr->right()->location().type(), expr->location().type()); + ASSERT_EQ(Expression::kEffect, expr->left()->context()); + ASSERT_EQ(expr->context(), expr->right()->context()); Visit(expr->left()); Visit(expr->right()); break; @@ -655,8 +1370,8 @@ void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) { case Token::SHL: case Token::SHR: case Token::SAR: { - ASSERT(expr->left()->location().is_value()); - ASSERT(expr->right()->location().is_value()); + ASSERT_EQ(Expression::kValue, expr->left()->context()); + ASSERT_EQ(Expression::kValue, expr->right()->context()); Visit(expr->left()); Visit(expr->right()); @@ -664,7 +1379,7 @@ void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) { NO_OVERWRITE, NO_GENERIC_BINARY_FLAGS); __ CallStub(&stub); - Move(expr->location(), eax); + Move(expr->context(), eax); break; } @@ -674,90 +1389,166 @@ void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) { } -void FastCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) { - // Compile a short-circuited boolean operation in a non-test context. +void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) { + Comment cmnt(masm_, "[ CompareOperation"); + ASSERT_EQ(Expression::kValue, expr->left()->context()); + ASSERT_EQ(Expression::kValue, expr->right()->context()); + Visit(expr->left()); + Visit(expr->right()); + + // Convert current context to test context: Pre-test code. + Label push_true; + Label push_false; + Label done; + Label* saved_true = true_label_; + Label* saved_false = false_label_; + switch (expr->context()) { + case Expression::kUninitialized: + UNREACHABLE(); + break; - // Compile (e0 || e1) or (e0 && e1) as if it were - // (let (temp = e0) temp [or !temp, for &&] ? temp : e1). + case Expression::kValue: + true_label_ = &push_true; + false_label_ = &push_false; + break; - Label eval_right, done; - Label *left_true, *left_false; // Where to branch to if lhs has that value. - if (expr->op() == Token::OR) { - left_true = &done; - left_false = &eval_right; - } else { - left_true = &eval_right; - left_false = &done; - } - Location destination = expr->location(); - Expression* left = expr->left(); - Expression* right = expr->right(); - - // Use the shared ToBoolean stub to find the boolean value of the - // left-hand subexpression. Load the value into eax to perform some - // inlined checks assumed by the stub. - - // Compile the left-hand value into eax. Put it on the stack if we may - // need it as the value of the whole expression. - if (left->AsLiteral() != NULL) { - __ mov(eax, left->AsLiteral()->handle()); - if (destination.is_value()) __ push(eax); - } else { - Visit(left); - ASSERT(left->location().is_value()); - switch (destination.type()) { - case Location::kUninitialized: - UNREACHABLE(); - case Location::kEffect: - // Pop the left-hand value into eax because we will not need it as the - // final result. - __ pop(eax); - break; - case Location::kValue: - // Copy the left-hand value into eax because we may need it as the - // final result. - __ mov(eax, Operand(esp, 0)); - break; - } - } - // The left-hand value is in eax. It is also on the stack iff the - // destination location is value. - - // Perform fast checks assumed by the stub. - __ cmp(eax, Factory::undefined_value()); // The undefined value is false. - __ j(equal, left_false); - __ cmp(eax, Factory::true_value()); // True is true. - __ j(equal, left_true); - __ cmp(eax, Factory::false_value()); // False is false. - __ j(equal, left_false); - ASSERT(kSmiTag == 0); - __ test(eax, Operand(eax)); // The smi zero is false. - __ j(zero, left_false); - __ test(eax, Immediate(kSmiTagMask)); // All other smis are true. - __ j(zero, left_true); + case Expression::kEffect: + true_label_ = &done; + false_label_ = &done; + break; - // Call the stub for all other cases. - __ push(eax); - ToBooleanStub stub; - __ CallStub(&stub); - __ test(eax, Operand(eax)); // The stub returns nonzero for true. - if (expr->op() == Token::OR) { - __ j(not_zero, &done); - } else { - __ j(zero, &done); + case Expression::kTest: + break; + + case Expression::kValueTest: + true_label_ = &push_true; + break; + + case Expression::kTestValue: + false_label_ = &push_false; + break; } + // Convert current context to test context: End pre-test code. - __ bind(&eval_right); - // Discard the left-hand value if present on the stack. - if (destination.is_value()) { - __ add(Operand(esp), Immediate(kPointerSize)); + switch (expr->op()) { + case Token::IN: { + __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION); + __ cmp(eax, Factory::true_value()); + __ j(equal, true_label_); + __ jmp(false_label_); + break; + } + + case Token::INSTANCEOF: { + InstanceofStub stub; + __ CallStub(&stub); + __ test(eax, Operand(eax)); + __ j(zero, true_label_); // The stub returns 0 for true. + __ jmp(false_label_); + break; + } + + default: { + Condition cc = no_condition; + bool strict = false; + switch (expr->op()) { + case Token::EQ_STRICT: + strict = true; + // Fall through + case Token::EQ: + cc = equal; + __ pop(eax); + __ pop(edx); + break; + case Token::LT: + cc = less; + __ pop(eax); + __ pop(edx); + break; + case Token::GT: + // Reverse left and right sizes to obtain ECMA-262 conversion order. + cc = less; + __ pop(edx); + __ pop(eax); + break; + case Token::LTE: + // Reverse left and right sizes to obtain ECMA-262 conversion order. + cc = greater_equal; + __ pop(edx); + __ pop(eax); + break; + case Token::GTE: + cc = greater_equal; + __ pop(eax); + __ pop(edx); + break; + case Token::IN: + case Token::INSTANCEOF: + default: + UNREACHABLE(); + } + + // The comparison stub expects the smi vs. smi case to be handled + // before it is called. + Label slow_case; + __ mov(ecx, Operand(edx)); + __ or_(ecx, Operand(eax)); + __ test(ecx, Immediate(kSmiTagMask)); + __ j(not_zero, &slow_case, not_taken); + __ cmp(edx, Operand(eax)); + __ j(cc, true_label_); + __ jmp(false_label_); + + __ bind(&slow_case); + CompareStub stub(cc, strict); + __ CallStub(&stub); + __ test(eax, Operand(eax)); + __ j(cc, true_label_); + __ jmp(false_label_); + } } - // Save or discard the right-hand value as needed. - Visit(right); - ASSERT_EQ(destination.type(), right->location().type()); - __ bind(&done); + // Convert current context to test context: Post-test code. + switch (expr->context()) { + case Expression::kUninitialized: + UNREACHABLE(); + break; + + case Expression::kValue: + __ bind(&push_true); + __ push(Immediate(Factory::true_value())); + __ jmp(&done); + __ bind(&push_false); + __ push(Immediate(Factory::false_value())); + __ bind(&done); + break; + + case Expression::kEffect: + __ bind(&done); + break; + + case Expression::kTest: + break; + + case Expression::kValueTest: + __ bind(&push_true); + __ push(Immediate(Factory::true_value())); + __ jmp(saved_true); + break; + + case Expression::kTestValue: + __ bind(&push_false); + __ push(Immediate(Factory::false_value())); + __ jmp(saved_false); + break; + } + true_label_ = saved_true; + false_label_ = saved_false; + // Convert current context to test context: End post-test code. } +#undef __ + + } } // namespace v8::internal diff --git a/deps/v8/src/ia32/frames-ia32.cc b/deps/v8/src/ia32/frames-ia32.cc index dea439f24b..5c900bedd7 100644 --- a/deps/v8/src/ia32/frames-ia32.cc +++ b/deps/v8/src/ia32/frames-ia32.cc @@ -56,19 +56,14 @@ StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) { state->fp = fp; state->sp = sp; state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize); - // Determine frame type. - if (Memory::Address_at(fp + ExitFrameConstants::kDebugMarkOffset) != 0) { - return EXIT_DEBUG; - } else { - return EXIT; - } + return EXIT; } void ExitFrame::Iterate(ObjectVisitor* v) const { - // Exit frames on IA-32 do not contain any pointers. The arguments - // are traversed as part of the expression stack of the calling - // frame. + v->VisitPointer(&code_slot()); + // The arguments are traversed as part of the expression stack of + // the calling frame. } diff --git a/deps/v8/src/ia32/frames-ia32.h b/deps/v8/src/ia32/frames-ia32.h index 3a7c86bf73..c3fe6c748d 100644 --- a/deps/v8/src/ia32/frames-ia32.h +++ b/deps/v8/src/ia32/frames-ia32.h @@ -76,7 +76,7 @@ class EntryFrameConstants : public AllStatic { class ExitFrameConstants : public AllStatic { public: - static const int kDebugMarkOffset = -2 * kPointerSize; + static const int kCodeOffset = -2 * kPointerSize; static const int kSPOffset = -1 * kPointerSize; static const int kCallerFPOffset = 0 * kPointerSize; diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc index 08c4c0c51b..010433e163 100644 --- a/deps/v8/src/ia32/macro-assembler-ia32.cc +++ b/deps/v8/src/ia32/macro-assembler-ia32.cc @@ -319,7 +319,7 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) { void MacroAssembler::FCmp() { - if (CpuFeatures::IsSupported(CpuFeatures::CMOV)) { + if (CpuFeatures::IsSupported(CMOV)) { fucomip(); ffree(0); fincstp(); @@ -355,10 +355,7 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) { leave(); } - -void MacroAssembler::EnterExitFrame(StackFrame::Type type) { - ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG); - +void MacroAssembler::EnterExitFramePrologue(ExitFrame::Mode mode) { // Setup the frame structure on the stack. ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize); ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize); @@ -369,23 +366,24 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) { // Reserve room for entry stack pointer and push the debug marker. ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize); push(Immediate(0)); // saved entry sp, patched before call - push(Immediate(type == StackFrame::EXIT_DEBUG ? 1 : 0)); + if (mode == ExitFrame::MODE_DEBUG) { + push(Immediate(0)); + } else { + push(Immediate(CodeObject())); + } // Save the frame pointer and the context in top. ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address); ExternalReference context_address(Top::k_context_address); mov(Operand::StaticVariable(c_entry_fp_address), ebp); mov(Operand::StaticVariable(context_address), esi); +} - // Setup argc and argv in callee-saved registers. - int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize; - mov(edi, Operand(eax)); - lea(esi, Operand(ebp, eax, times_4, offset)); - +void MacroAssembler::EnterExitFrameEpilogue(ExitFrame::Mode mode, int argc) { #ifdef ENABLE_DEBUGGER_SUPPORT // Save the state of all registers to the stack from the memory // location. This is needed to allow nested break points. - if (type == StackFrame::EXIT_DEBUG) { + if (mode == ExitFrame::MODE_DEBUG) { // TODO(1243899): This should be symmetric to // CopyRegistersFromStackToMemory() but it isn't! esp is assumed // correct here, but computed for the other call. Very error @@ -396,8 +394,8 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) { } #endif - // Reserve space for two arguments: argc and argv. - sub(Operand(esp), Immediate(2 * kPointerSize)); + // Reserve space for arguments. + sub(Operand(esp), Immediate(argc * kPointerSize)); // Get the required frame alignment for the OS. static const int kFrameAlignment = OS::ActivationFrameAlignment(); @@ -411,15 +409,39 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) { } -void MacroAssembler::LeaveExitFrame(StackFrame::Type type) { +void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode) { + EnterExitFramePrologue(mode); + + // Setup argc and argv in callee-saved registers. + int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize; + mov(edi, Operand(eax)); + lea(esi, Operand(ebp, eax, times_4, offset)); + + EnterExitFrameEpilogue(mode, 2); +} + + +void MacroAssembler::EnterApiExitFrame(ExitFrame::Mode mode, + int stack_space, + int argc) { + EnterExitFramePrologue(mode); + + int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize; + lea(esi, Operand(ebp, (stack_space * kPointerSize) + offset)); + + EnterExitFrameEpilogue(mode, argc); +} + + +void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) { #ifdef ENABLE_DEBUGGER_SUPPORT // Restore the memory copy of the registers by digging them out from // the stack. This is needed to allow nested break points. - if (type == StackFrame::EXIT_DEBUG) { + if (mode == ExitFrame::MODE_DEBUG) { // It's okay to clobber register ebx below because we don't need // the function pointer after this. const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize; - int kOffset = ExitFrameConstants::kDebugMarkOffset - kCallerSavedSize; + int kOffset = ExitFrameConstants::kCodeOffset - kCallerSavedSize; lea(ebx, Operand(ebp, kOffset)); CopyRegistersFromStackToMemory(ebx, ecx, kJSCallerSaved); } @@ -931,6 +953,52 @@ void MacroAssembler::TailCallRuntime(const ExternalReference& ext, } +void MacroAssembler::PushHandleScope(Register scratch) { + // Push the number of extensions, smi-tagged so the gc will ignore it. + ExternalReference extensions_address = + ExternalReference::handle_scope_extensions_address(); + mov(scratch, Operand::StaticVariable(extensions_address)); + ASSERT_EQ(0, kSmiTag); + shl(scratch, kSmiTagSize); + push(scratch); + mov(Operand::StaticVariable(extensions_address), Immediate(0)); + // Push next and limit pointers which will be wordsize aligned and + // hence automatically smi tagged. + ExternalReference next_address = + ExternalReference::handle_scope_next_address(); + push(Operand::StaticVariable(next_address)); + ExternalReference limit_address = + ExternalReference::handle_scope_limit_address(); + push(Operand::StaticVariable(limit_address)); +} + + +void MacroAssembler::PopHandleScope(Register saved, Register scratch) { + ExternalReference extensions_address = + ExternalReference::handle_scope_extensions_address(); + Label write_back; + mov(scratch, Operand::StaticVariable(extensions_address)); + cmp(Operand(scratch), Immediate(0)); + j(equal, &write_back); + // Calling a runtime function messes with registers so we save and + // restore any one we're asked not to change + if (saved.is_valid()) push(saved); + CallRuntime(Runtime::kDeleteHandleScopeExtensions, 0); + if (saved.is_valid()) pop(saved); + + bind(&write_back); + ExternalReference limit_address = + ExternalReference::handle_scope_limit_address(); + pop(Operand::StaticVariable(limit_address)); + ExternalReference next_address = + ExternalReference::handle_scope_next_address(); + pop(Operand::StaticVariable(next_address)); + pop(scratch); + shr(scratch, kSmiTagSize); + mov(Operand::StaticVariable(extensions_address), scratch); +} + + void MacroAssembler::JumpToRuntime(const ExternalReference& ext) { // Set the entry point and jump to the C entry runtime stub. mov(ebx, Immediate(ext)); diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h index a0a242806b..248aa7776e 100644 --- a/deps/v8/src/ia32/macro-assembler-ia32.h +++ b/deps/v8/src/ia32/macro-assembler-ia32.h @@ -77,16 +77,18 @@ class MacroAssembler: public Assembler { void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); } void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); } - // Enter specific kind of exit frame; either EXIT or - // EXIT_DEBUG. Expects the number of arguments in register eax and + // Enter specific kind of exit frame; either in normal or debug mode. + // Expects the number of arguments in register eax and // sets up the number of arguments in register edi and the pointer // to the first argument in register esi. - void EnterExitFrame(StackFrame::Type type); + void EnterExitFrame(ExitFrame::Mode mode); + + void EnterApiExitFrame(ExitFrame::Mode mode, int stack_space, int argc); // Leave the current exit frame. Expects the return value in // register eax:edx (untouched) and the pointer to the first // argument in register esi. - void LeaveExitFrame(StackFrame::Type type); + void LeaveExitFrame(ExitFrame::Mode mode); // --------------------------------------------------------------------------- @@ -269,6 +271,12 @@ class MacroAssembler: public Assembler { int num_arguments, int result_size); + void PushHandleScope(Register scratch); + + // Pops a handle scope using the specified scratch register and + // ensuring that saved register, it is not no_reg, is left unchanged. + void PopHandleScope(Register saved, Register scratch); + // Jump to a runtime routine. void JumpToRuntime(const ExternalReference& ext); @@ -346,6 +354,9 @@ class MacroAssembler: public Assembler { void EnterFrame(StackFrame::Type type); void LeaveFrame(StackFrame::Type type); + void EnterExitFramePrologue(ExitFrame::Mode mode); + void EnterExitFrameEpilogue(ExitFrame::Mode mode, int argc); + // Allocation support helpers. void LoadAllocationTopHelper(Register result, Register result_end, diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc index 76d36a939c..2e13d8aeed 100644 --- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc +++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc @@ -598,10 +598,10 @@ Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) { Label stack_limit_hit; Label stack_ok; - ExternalReference stack_guard_limit = - ExternalReference::address_of_stack_guard_limit(); + ExternalReference stack_limit = + ExternalReference::address_of_stack_limit(); __ mov(ecx, esp); - __ sub(ecx, Operand::StaticVariable(stack_guard_limit)); + __ sub(ecx, Operand::StaticVariable(stack_limit)); // Handle it if the stack pointer is already below the stack limit. __ j(below_equal, &stack_limit_hit, not_taken); // Check if there is room for the variable number of registers above @@ -1081,9 +1081,9 @@ void RegExpMacroAssemblerIA32::Pop(Register target) { void RegExpMacroAssemblerIA32::CheckPreemption() { // Check for preemption. Label no_preempt; - ExternalReference stack_guard_limit = - ExternalReference::address_of_stack_guard_limit(); - __ cmp(esp, Operand::StaticVariable(stack_guard_limit)); + ExternalReference stack_limit = + ExternalReference::address_of_stack_limit(); + __ cmp(esp, Operand::StaticVariable(stack_limit)); __ j(above, &no_preempt, taken); SafeCall(&check_preempt_label_); diff --git a/deps/v8/src/ia32/register-allocator-ia32.cc b/deps/v8/src/ia32/register-allocator-ia32.cc index 2914960eac..0bad87d082 100644 --- a/deps/v8/src/ia32/register-allocator-ia32.cc +++ b/deps/v8/src/ia32/register-allocator-ia32.cc @@ -42,7 +42,7 @@ void Result::ToRegister() { Result fresh = CodeGeneratorScope::Current()->allocator()->Allocate(); ASSERT(fresh.is_valid()); if (CodeGeneratorScope::Current()->IsUnsafeSmi(handle())) { - CodeGeneratorScope::Current()->LoadUnsafeSmi(fresh.reg(), handle()); + CodeGeneratorScope::Current()->MoveUnsafeSmi(fresh.reg(), handle()); } else { CodeGeneratorScope::Current()->masm()->Set(fresh.reg(), Immediate(handle())); @@ -64,7 +64,7 @@ void Result::ToRegister(Register target) { } else { ASSERT(is_constant()); if (CodeGeneratorScope::Current()->IsUnsafeSmi(handle())) { - CodeGeneratorScope::Current()->LoadUnsafeSmi(fresh.reg(), handle()); + CodeGeneratorScope::Current()->MoveUnsafeSmi(fresh.reg(), handle()); } else { CodeGeneratorScope::Current()->masm()->Set(fresh.reg(), Immediate(handle())); diff --git a/deps/v8/src/ia32/simulator-ia32.h b/deps/v8/src/ia32/simulator-ia32.h index 8fa4287f76..ce7ed0ec9c 100644 --- a/deps/v8/src/ia32/simulator-ia32.h +++ b/deps/v8/src/ia32/simulator-ia32.h @@ -43,6 +43,12 @@ class SimulatorStack : public v8::internal::AllStatic { static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) { return c_limit; } + + static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) { + return try_catch_address; + } + + static inline void UnregisterCTryCatch() { } }; // Call the generated regexp code directly. The entry function pointer should @@ -50,4 +56,7 @@ class SimulatorStack : public v8::internal::AllStatic { #define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \ entry(p0, p1, p2, p3, p4, p5, p6) +#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ + reinterpret_cast<TryCatch*>(try_catch_address) + #endif // V8_IA32_SIMULATOR_IA32_H_ diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc index ca4e142101..f9f986afea 100644 --- a/deps/v8/src/ia32/stub-cache-ia32.cc +++ b/deps/v8/src/ia32/stub-cache-ia32.cc @@ -240,7 +240,7 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm, __ mov(eax, FieldOperand(receiver, String::kLengthOffset)); // ecx is also the receiver. __ lea(ecx, Operand(scratch, String::kLongLengthShift)); - __ shr(eax); // ecx is implicit shift register. + __ shr_cl(eax); __ shl(eax, kSmiTagSize); __ ret(0); @@ -776,20 +776,40 @@ void StubCompiler::GenerateLoadCallback(JSObject* object, CheckPrototypes(object, receiver, holder, scratch1, scratch2, name, miss); - // Push the arguments on the JS stack of the caller. - __ pop(scratch2); // remove return address + Handle<AccessorInfo> callback_handle(callback); + + Register other = reg.is(scratch1) ? scratch2 : scratch1; + __ EnterInternalFrame(); + __ PushHandleScope(other); + // Push the stack address where the list of arguments ends + __ mov(other, esp); + __ sub(Operand(other), Immediate(2 * kPointerSize)); + __ push(other); __ push(receiver); // receiver __ push(reg); // holder - __ mov(reg, Immediate(Handle<AccessorInfo>(callback))); // callback data - __ push(reg); - __ push(FieldOperand(reg, AccessorInfo::kDataOffset)); + __ mov(other, Immediate(callback_handle)); + __ push(other); + __ push(FieldOperand(other, AccessorInfo::kDataOffset)); // data __ push(name_reg); // name - __ push(scratch2); // restore return address + // Save a pointer to where we pushed the arguments pointer. + // This will be passed as the const Arguments& to the C++ callback. + __ mov(eax, esp); + __ add(Operand(eax), Immediate(5 * kPointerSize)); + __ mov(ebx, esp); + + // Do call through the api. + ASSERT_EQ(6, ApiGetterEntryStub::kStackSpace); + Address getter_address = v8::ToCData<Address>(callback->getter()); + ApiFunction fun(getter_address); + ApiGetterEntryStub stub(callback_handle, &fun); + __ CallStub(&stub); - // Do tail-call to the runtime system. - ExternalReference load_callback_property = - ExternalReference(IC_Utility(IC::kLoadCallbackProperty)); - __ TailCallRuntime(load_callback_property, 5, 1); + // We need to avoid using eax since that now holds the result. + Register tmp = other.is(eax) ? reg : other; + __ PopHandleScope(eax, tmp); + __ LeaveInternalFrame(); + + __ ret(0); } diff --git a/deps/v8/src/ia32/virtual-frame-ia32.cc b/deps/v8/src/ia32/virtual-frame-ia32.cc index 980cec8eb7..e770cddb15 100644 --- a/deps/v8/src/ia32/virtual-frame-ia32.cc +++ b/deps/v8/src/ia32/virtual-frame-ia32.cc @@ -75,10 +75,7 @@ void VirtualFrame::SyncElementBelowStackPointer(int index) { case FrameElement::CONSTANT: if (cgen()->IsUnsafeSmi(element.handle())) { - Result temp = cgen()->allocator()->Allocate(); - ASSERT(temp.is_valid()); - cgen()->LoadUnsafeSmi(temp.reg(), element.handle()); - __ mov(Operand(ebp, fp_relative(index)), temp.reg()); + cgen()->StoreUnsafeSmiToLocal(fp_relative(index), element.handle()); } else { __ Set(Operand(ebp, fp_relative(index)), Immediate(element.handle())); @@ -127,10 +124,7 @@ void VirtualFrame::SyncElementByPushing(int index) { case FrameElement::CONSTANT: if (cgen()->IsUnsafeSmi(element.handle())) { - Result temp = cgen()->allocator()->Allocate(); - ASSERT(temp.is_valid()); - cgen()->LoadUnsafeSmi(temp.reg(), element.handle()); - __ push(temp.reg()); + cgen()->PushUnsafeSmi(element.handle()); } else { __ push(Immediate(element.handle())); } @@ -161,7 +155,7 @@ void VirtualFrame::SyncRange(int begin, int end) { // on the stack. int start = Min(begin, stack_pointer_ + 1); - // Emit normal 'push' instructions for elements above stack pointer + // Emit normal push instructions for elements above stack pointer // and use mov instructions if we are below stack pointer. for (int i = start; i <= end; i++) { if (!elements_[i].is_synced()) { @@ -199,7 +193,7 @@ void VirtualFrame::MakeMergable() { // Emit a move. if (element.is_constant()) { if (cgen()->IsUnsafeSmi(element.handle())) { - cgen()->LoadUnsafeSmi(fresh.reg(), element.handle()); + cgen()->MoveUnsafeSmi(fresh.reg(), element.handle()); } else { __ Set(fresh.reg(), Immediate(element.handle())); } @@ -300,7 +294,7 @@ void VirtualFrame::MergeMoveRegistersToMemory(VirtualFrame* expected) { if (!source.is_synced()) { if (cgen()->IsUnsafeSmi(source.handle())) { esi_caches = i; - cgen()->LoadUnsafeSmi(esi, source.handle()); + cgen()->MoveUnsafeSmi(esi, source.handle()); __ mov(Operand(ebp, fp_relative(i)), esi); } else { __ Set(Operand(ebp, fp_relative(i)), Immediate(source.handle())); @@ -408,7 +402,7 @@ void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame* expected) { case FrameElement::CONSTANT: if (cgen()->IsUnsafeSmi(source.handle())) { - cgen()->LoadUnsafeSmi(target_reg, source.handle()); + cgen()->MoveUnsafeSmi(target_reg, source.handle()); } else { __ Set(target_reg, Immediate(source.handle())); } diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc index c12dba7bb0..2779356c0e 100644 --- a/deps/v8/src/ic.cc +++ b/deps/v8/src/ic.cc @@ -126,7 +126,8 @@ Address IC::OriginalCodeAddress() { // Return the address in the original code. This is the place where // the call which has been overwritten by the DebugBreakXXX resides // and the place where the inline cache system should look. - int delta = original_code->instruction_start() - code->instruction_start(); + intptr_t delta = + original_code->instruction_start() - code->instruction_start(); return addr + delta; } #endif diff --git a/deps/v8/src/interpreter-irregexp.cc b/deps/v8/src/interpreter-irregexp.cc index ae914d39df..a904447f37 100644 --- a/deps/v8/src/interpreter-irregexp.cc +++ b/deps/v8/src/interpreter-irregexp.cc @@ -117,17 +117,17 @@ static void TraceInterpreter(const byte* code_base, } -#define BYTECODE(name) \ - case BC_##name: \ - TraceInterpreter(code_base, \ - pc, \ - backtrack_sp - backtrack_stack_base, \ - current, \ - current_char, \ - BC_##name##_LENGTH, \ +#define BYTECODE(name) \ + case BC_##name: \ + TraceInterpreter(code_base, \ + pc, \ + static_cast<int>(backtrack_sp - backtrack_stack_base), \ + current, \ + current_char, \ + BC_##name##_LENGTH, \ #name); #else -#define BYTECODE(name) \ +#define BYTECODE(name) \ case BC_##name: #endif @@ -250,13 +250,14 @@ static bool RawMatch(const byte* code_base, pc += BC_SET_CP_TO_REGISTER_LENGTH; break; BYTECODE(SET_REGISTER_TO_SP) - registers[insn >> BYTECODE_SHIFT] = backtrack_sp - backtrack_stack_base; + registers[insn >> BYTECODE_SHIFT] = + static_cast<int>(backtrack_sp - backtrack_stack_base); pc += BC_SET_REGISTER_TO_SP_LENGTH; break; BYTECODE(SET_SP_TO_REGISTER) backtrack_sp = backtrack_stack_base + registers[insn >> BYTECODE_SHIFT]; backtrack_stack_space = backtrack_stack.max_size() - - (backtrack_sp - backtrack_stack_base); + static_cast<int>(backtrack_sp - backtrack_stack_base); pc += BC_SET_SP_TO_REGISTER_LENGTH; break; BYTECODE(POP_CP) diff --git a/deps/v8/src/jsregexp.cc b/deps/v8/src/jsregexp.cc index c77f32d1e4..04d194419f 100644 --- a/deps/v8/src/jsregexp.cc +++ b/deps/v8/src/jsregexp.cc @@ -2432,16 +2432,19 @@ void Trace::AdvanceCurrentPositionInTrace(int by, RegExpCompiler* compiler) { } -void TextNode::MakeCaseIndependent() { +void TextNode::MakeCaseIndependent(bool is_ascii) { int element_count = elms_->length(); for (int i = 0; i < element_count; i++) { TextElement elm = elms_->at(i); if (elm.type == TextElement::CHAR_CLASS) { RegExpCharacterClass* cc = elm.data.u_char_class; + // None of the standard character classses is different in the case + // independent case and it slows us down if we don't know that. + if (cc->is_standard()) continue; ZoneList<CharacterRange>* ranges = cc->ranges(); int range_count = ranges->length(); - for (int i = 0; i < range_count; i++) { - ranges->at(i).AddCaseEquivalents(ranges); + for (int j = 0; j < range_count; j++) { + ranges->at(j).AddCaseEquivalents(ranges, is_ascii); } } } @@ -3912,19 +3915,31 @@ void CharacterRange::Split(ZoneList<CharacterRange>* base, } -void CharacterRange::AddCaseEquivalents(ZoneList<CharacterRange>* ranges) { +static void AddUncanonicals(ZoneList<CharacterRange>* ranges, + int bottom, + int top); + + +void CharacterRange::AddCaseEquivalents(ZoneList<CharacterRange>* ranges, + bool is_ascii) { + uc16 bottom = from(); + uc16 top = to(); + if (is_ascii) { + if (bottom > String::kMaxAsciiCharCode) return; + if (top > String::kMaxAsciiCharCode) top = String::kMaxAsciiCharCode; + } unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth]; - if (IsSingleton()) { + if (top == bottom) { // If this is a singleton we just expand the one character. - int length = uncanonicalize.get(from(), '\0', chars); + int length = uncanonicalize.get(bottom, '\0', chars); for (int i = 0; i < length; i++) { uc32 chr = chars[i]; - if (chr != from()) { + if (chr != bottom) { ranges->Add(CharacterRange::Singleton(chars[i])); } } - } else if (from() <= kRangeCanonicalizeMax && - to() <= kRangeCanonicalizeMax) { + } else if (bottom <= kRangeCanonicalizeMax && + top <= kRangeCanonicalizeMax) { // If this is a range we expand the characters block by block, // expanding contiguous subranges (blocks) one at a time. // The approach is as follows. For a given start character we @@ -3943,14 +3958,14 @@ void CharacterRange::AddCaseEquivalents(ZoneList<CharacterRange>* ranges) { // completely contained in a block we do this for all the blocks // covered by the range. unibrow::uchar range[unibrow::Ecma262UnCanonicalize::kMaxWidth]; - // First, look up the block that contains the 'from' character. - int length = canonrange.get(from(), '\0', range); + // First, look up the block that contains the 'bottom' character. + int length = canonrange.get(bottom, '\0', range); if (length == 0) { - range[0] = from(); + range[0] = bottom; } else { ASSERT_EQ(1, length); } - int pos = from(); + int pos = bottom; // The start of the current block. Note that except for the first // iteration 'start' is always equal to 'pos'. int start; @@ -3961,10 +3976,10 @@ void CharacterRange::AddCaseEquivalents(ZoneList<CharacterRange>* ranges) { } else { start = pos; } - // Then we add the ranges on at a time, incrementing the current + // Then we add the ranges one at a time, incrementing the current // position to be after the last block each time. The position // always points to the start of a block. - while (pos < to()) { + while (pos < top) { length = canonrange.get(start, '\0', range); if (length == 0) { range[0] = start; @@ -3975,20 +3990,122 @@ void CharacterRange::AddCaseEquivalents(ZoneList<CharacterRange>* ranges) { // The start point of a block contains the distance to the end // of the range. int block_end = start + (range[0] & kPayloadMask) - 1; - int end = (block_end > to()) ? to() : block_end; + int end = (block_end > top) ? top : block_end; length = uncanonicalize.get(start, '\0', range); for (int i = 0; i < length; i++) { uc32 c = range[i]; uc16 range_from = c + (pos - start); uc16 range_to = c + (end - start); - if (!(from() <= range_from && range_to <= to())) { + if (!(bottom <= range_from && range_to <= top)) { ranges->Add(CharacterRange(range_from, range_to)); } } start = pos = block_end + 1; } } else { - // TODO(plesner) when we've fixed the 2^11 bug in unibrow. + // Unibrow ranges don't work for high characters due to the "2^11 bug". + // Therefore we do something dumber for these ranges. + AddUncanonicals(ranges, bottom, top); + } +} + + +static void AddUncanonicals(ZoneList<CharacterRange>* ranges, + int bottom, + int top) { + unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth]; + // Zones with no case mappings. There is a DEBUG-mode loop to assert that + // this table is correct. + // 0x0600 - 0x0fff + // 0x1100 - 0x1cff + // 0x2000 - 0x20ff + // 0x2200 - 0x23ff + // 0x2500 - 0x2bff + // 0x2e00 - 0xa5ff + // 0xa800 - 0xfaff + // 0xfc00 - 0xfeff + const int boundary_count = 18; + // The ASCII boundary and the kRangeCanonicalizeMax boundary are also in this + // array. This is to split up big ranges and not because they actually denote + // a case-mapping-free-zone. + ASSERT(CharacterRange::kRangeCanonicalizeMax < 0x600); + const int kFirstRealCaselessZoneIndex = 2; + int boundaries[] = {0x80, CharacterRange::kRangeCanonicalizeMax, + 0x600, 0x1000, 0x1100, 0x1d00, 0x2000, 0x2100, 0x2200, 0x2400, 0x2500, + 0x2c00, 0x2e00, 0xa600, 0xa800, 0xfb00, 0xfc00, 0xff00}; + + // Special ASCII rule from spec can save us some work here. + if (bottom == 0x80 && top == 0xffff) return; + + // We have optimized support for this range. + if (top <= CharacterRange::kRangeCanonicalizeMax) { + CharacterRange range(bottom, top); + range.AddCaseEquivalents(ranges, false); + return; + } + + // Split up very large ranges. This helps remove ranges where there are no + // case mappings. + for (int i = 0; i < boundary_count; i++) { + if (bottom < boundaries[i] && top >= boundaries[i]) { + AddUncanonicals(ranges, bottom, boundaries[i] - 1); + AddUncanonicals(ranges, boundaries[i], top); + return; + } + } + + // If we are completely in a zone with no case mappings then we are done. + // We start at 2 so as not to except the ASCII range from mappings. + for (int i = kFirstRealCaselessZoneIndex; i < boundary_count; i += 2) { + if (bottom >= boundaries[i] && top < boundaries[i + 1]) { +#ifdef DEBUG + for (int j = bottom; j <= top; j++) { + unsigned current_char = j; + int length = uncanonicalize.get(current_char, '\0', chars); + for (int k = 0; k < length; k++) { + ASSERT(chars[k] == current_char); + } + } +#endif + return; + } + } + + // Step through the range finding equivalent characters. + ZoneList<unibrow::uchar> *characters = new ZoneList<unibrow::uchar>(100); + for (int i = bottom; i <= top; i++) { + int length = uncanonicalize.get(i, '\0', chars); + for (int j = 0; j < length; j++) { + uc32 chr = chars[j]; + if (chr != i && (chr < bottom || chr > top)) { + characters->Add(chr); + } + } + } + + // Step through the equivalent characters finding simple ranges and + // adding ranges to the character class. + if (characters->length() > 0) { + int new_from = characters->at(0); + int new_to = new_from; + for (int i = 1; i < characters->length(); i++) { + int chr = characters->at(i); + if (chr == new_to + 1) { + new_to++; + } else { + if (new_to == new_from) { + ranges->Add(CharacterRange::Singleton(new_from)); + } else { + ranges->Add(CharacterRange(new_from, new_to)); + } + new_from = new_to = chr; + } + } + if (new_to == new_from) { + ranges->Add(CharacterRange::Singleton(new_from)); + } else { + ranges->Add(CharacterRange(new_from, new_to)); + } } } @@ -4234,7 +4351,7 @@ void TextNode::CalculateOffsets() { void Analysis::VisitText(TextNode* that) { if (ignore_case_) { - that->MakeCaseIndependent(); + that->MakeCaseIndependent(is_ascii_); } EnsureAnalyzed(that->on_success()); if (!has_failed()) { @@ -4452,7 +4569,7 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(RegExpCompileData* data, } } data->node = node; - Analysis analysis(ignore_case); + Analysis analysis(ignore_case, is_ascii); analysis.EnsureAnalyzed(node); if (analysis.has_failed()) { const char* error_message = analysis.error_message(); diff --git a/deps/v8/src/jsregexp.h b/deps/v8/src/jsregexp.h index 84f8d98ce8..b6811194c0 100644 --- a/deps/v8/src/jsregexp.h +++ b/deps/v8/src/jsregexp.h @@ -200,7 +200,7 @@ class CharacterRange { bool is_valid() { return from_ <= to_; } bool IsEverything(uc16 max) { return from_ == 0 && to_ >= max; } bool IsSingleton() { return (from_ == to_); } - void AddCaseEquivalents(ZoneList<CharacterRange>* ranges); + void AddCaseEquivalents(ZoneList<CharacterRange>* ranges, bool is_ascii); static void Split(ZoneList<CharacterRange>* base, Vector<const uc16> overlay, ZoneList<CharacterRange>** included, @@ -703,7 +703,7 @@ class TextNode: public SeqRegExpNode { int characters_filled_in, bool not_at_start); ZoneList<TextElement>* elements() { return elms_; } - void MakeCaseIndependent(); + void MakeCaseIndependent(bool is_ascii); virtual int GreedyLoopTextLength(); virtual TextNode* Clone() { TextNode* result = new TextNode(*this); @@ -1212,8 +1212,10 @@ FOR_EACH_NODE_TYPE(DECLARE_VISIT) // +-------+ +------------+ class Analysis: public NodeVisitor { public: - explicit Analysis(bool ignore_case) - : ignore_case_(ignore_case), error_message_(NULL) { } + Analysis(bool ignore_case, bool is_ascii) + : ignore_case_(ignore_case), + is_ascii_(is_ascii), + error_message_(NULL) { } void EnsureAnalyzed(RegExpNode* node); #define DECLARE_VISIT(Type) \ @@ -1232,6 +1234,7 @@ FOR_EACH_NODE_TYPE(DECLARE_VISIT) } private: bool ignore_case_; + bool is_ascii_; const char* error_message_; DISALLOW_IMPLICIT_CONSTRUCTORS(Analysis); diff --git a/deps/v8/src/list.h b/deps/v8/src/list.h index 19dc7337d3..aff63c3828 100644 --- a/deps/v8/src/list.h +++ b/deps/v8/src/list.h @@ -59,7 +59,9 @@ class List { Initialize(0); } - INLINE(void* operator new(size_t size)) { return P::New(size); } + INLINE(void* operator new(size_t size)) { + return P::New(static_cast<int>(size)); + } INLINE(void operator delete(void* p, size_t)) { return P::Delete(p); } // Returns a reference to the element at index i. This reference is diff --git a/deps/v8/src/log-utils.cc b/deps/v8/src/log-utils.cc index f327a0a04a..fd9560418c 100644 --- a/deps/v8/src/log-utils.cc +++ b/deps/v8/src/log-utils.cc @@ -155,7 +155,7 @@ void Log::OpenMemoryBuffer() { ASSERT(!IsEnabled()); output_buffer_ = new LogDynamicBuffer( kDynamicBufferBlockSize, kMaxDynamicBufferSize, - kDynamicBufferSeal, strlen(kDynamicBufferSeal)); + kDynamicBufferSeal, StrLength(kDynamicBufferSeal)); Write = WriteToMemory; Init(); } @@ -195,7 +195,7 @@ int Log::GetLogLines(int from_pos, char* dest_buf, int max_size) { // Find previous log line boundary. char* end_pos = dest_buf + actual_size - 1; while (end_pos >= dest_buf && *end_pos != '\n') --end_pos; - actual_size = end_pos - dest_buf + 1; + actual_size = static_cast<int>(end_pos - dest_buf + 1); ASSERT(actual_size <= max_size); return actual_size; } @@ -352,7 +352,7 @@ void LogMessageBuilder::WriteToLogFile() { void LogMessageBuilder::WriteCStringToLogFile(const char* str) { - const int len = strlen(str); + const int len = StrLength(str); const int written = Log::Write(str, len); if (written != len && write_failure_handler != NULL) { write_failure_handler(); @@ -461,7 +461,7 @@ bool LogRecordCompressor::RetrievePreviousCompressed( --data_ptr; } const intptr_t truncated_len = prev_end - prev_ptr; - const int copy_from_pos = data_ptr - data.start(); + const int copy_from_pos = static_cast<int>(data_ptr - data.start()); // Check if the length of compressed tail is enough. if (truncated_len <= kMaxBackwardReferenceSize && truncated_len <= GetBackwardReferenceSize(distance, copy_from_pos)) { @@ -493,7 +493,7 @@ bool LogRecordCompressor::RetrievePreviousCompressed( prev_record->start() + unchanged_len, best.backref_size + 1); PrintBackwardReference(backref, best.distance, best.copy_from_pos); ASSERT(strlen(backref.start()) - best.backref_size == 0); - prev_record->Truncate(unchanged_len + best.backref_size); + prev_record->Truncate(static_cast<int>(unchanged_len + best.backref_size)); } return true; } diff --git a/deps/v8/src/log-utils.h b/deps/v8/src/log-utils.h index 117f098cc8..3e25b0e757 100644 --- a/deps/v8/src/log-utils.h +++ b/deps/v8/src/log-utils.h @@ -129,9 +129,10 @@ class Log : public AllStatic { // Implementation of writing to a log file. static int WriteToFile(const char* msg, int length) { ASSERT(output_handle_ != NULL); - int rv = fwrite(msg, 1, length, output_handle_); - ASSERT(length == rv); - return rv; + size_t rv = fwrite(msg, 1, length, output_handle_); + ASSERT(static_cast<size_t>(length) == rv); + USE(rv); + return length; } // Implementation of writing to a memory buffer. diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc index 2e7796a7ac..9acb7f7857 100644 --- a/deps/v8/src/log.cc +++ b/deps/v8/src/log.cc @@ -915,8 +915,9 @@ void Logger::HeapSampleJSRetainersEvent( // Event starts with comma, so we don't have it in the format string. static const char* event_text = "heap-js-ret-item,%s"; // We take placeholder strings into account, but it's OK to be conservative. - static const int event_text_len = strlen(event_text); - const int cons_len = strlen(constructor), event_len = strlen(event); + static const int event_text_len = StrLength(event_text); + const int cons_len = StrLength(constructor); + const int event_len = StrLength(event); int pos = 0; // Retainer lists can be long. We may need to split them into multiple events. do { @@ -1120,6 +1121,48 @@ static int EnumerateCompiledFunctions(Handle<SharedFunctionInfo>* sfis) { } +void Logger::LogCodeObject(Object* object) { + if (FLAG_log_code) { + Code* code_object = Code::cast(object); + LogEventsAndTags tag = Logger::STUB_TAG; + const char* description = "Unknown code from the snapshot"; + switch (code_object->kind()) { + case Code::FUNCTION: + return; // We log this later using LogCompiledFunctions. + case Code::STUB: + description = CodeStub::MajorName(code_object->major_key()); + tag = Logger::STUB_TAG; + break; + case Code::BUILTIN: + description = "A builtin from the snapshot"; + tag = Logger::BUILTIN_TAG; + break; + case Code::KEYED_LOAD_IC: + description = "A keyed load IC from the snapshot"; + tag = Logger::KEYED_LOAD_IC_TAG; + break; + case Code::LOAD_IC: + description = "A load IC from the snapshot"; + tag = Logger::LOAD_IC_TAG; + break; + case Code::STORE_IC: + description = "A store IC from the snapshot"; + tag = Logger::STORE_IC_TAG; + break; + case Code::KEYED_STORE_IC: + description = "A keyed store IC from the snapshot"; + tag = Logger::KEYED_STORE_IC_TAG; + break; + case Code::CALL_IC: + description = "A call IC from the snapshot"; + tag = Logger::CALL_IC_TAG; + break; + } + LOG(CodeCreateEvent(tag, code_object, description)); + } +} + + void Logger::LogCompiledFunctions() { HandleScope scope; const int compiled_funcs_count = EnumerateCompiledFunctions(NULL); diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h index 13d45d2e32..e7931ca42d 100644 --- a/deps/v8/src/log.h +++ b/deps/v8/src/log.h @@ -265,6 +265,8 @@ class Logger { // Logs all compiled functions found in the heap. static void LogCompiledFunctions(); + // Used for logging stubs found in the snapshot. + static void LogCodeObject(Object* code_object); private: diff --git a/deps/v8/src/macros.py b/deps/v8/src/macros.py index ddd2f13bc1..d6a2426c5a 100644 --- a/deps/v8/src/macros.py +++ b/deps/v8/src/macros.py @@ -77,12 +77,13 @@ const kMonthShift = 5; macro IS_NULL(arg) = (arg === null); macro IS_NULL_OR_UNDEFINED(arg) = (arg == null); macro IS_UNDEFINED(arg) = (typeof(arg) === 'undefined'); -macro IS_FUNCTION(arg) = (typeof(arg) === 'function'); macro IS_NUMBER(arg) = (typeof(arg) === 'number'); macro IS_STRING(arg) = (typeof(arg) === 'string'); -macro IS_OBJECT(arg) = (typeof(arg) === 'object'); +macro IS_OBJECT(arg) = (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp'); macro IS_BOOLEAN(arg) = (typeof(arg) === 'boolean'); macro IS_ARRAY(arg) = (%_IsArray(arg)); +# IS_FUNCTION uses %_ClassOf rather than typeof so as to exclude regexps. +macro IS_FUNCTION(arg) = (%_ClassOf(arg) === 'Function'); macro IS_REGEXP(arg) = (%_ClassOf(arg) === 'RegExp'); macro IS_DATE(arg) = (%_ClassOf(arg) === 'Date'); macro IS_NUMBER_WRAPPER(arg) = (%_ClassOf(arg) === 'Number'); diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc index 5a3ab89057..81819b7f67 100644 --- a/deps/v8/src/mark-compact.cc +++ b/deps/v8/src/mark-compact.cc @@ -572,9 +572,8 @@ class SymbolMarkingVisitor : public ObjectVisitor { void MarkCompactCollector::MarkSymbolTable() { // Objects reachable from symbols are marked as live so as to ensure // that if the symbol itself remains alive after GC for any reason, - // and if it is a sliced string or a cons string backed by an - // external string (even indirectly), then the external string does - // not receive a weak reference callback. + // and if it is a cons string backed by an external string (even indirectly), + // then the external string does not receive a weak reference callback. SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table(); // Mark the symbol table itself. SetMark(symbol_table); @@ -593,7 +592,7 @@ void MarkCompactCollector::MarkSymbolTable() { void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) { // Mark the heap roots including global variables, stack variables, // etc., and all objects reachable from them. - Heap::IterateStrongRoots(visitor); + Heap::IterateStrongRoots(visitor, VISIT_ONLY_STRONG); // Handle the symbol table specially. MarkSymbolTable(); @@ -1074,7 +1073,7 @@ inline void EncodeForwardingAddressesInRange(Address start, } #endif if (!is_prev_alive) { // Transition from non-live to live. - EncodeFreeRegion(free_start, current - free_start); + EncodeFreeRegion(free_start, static_cast<int>(current - free_start)); is_prev_alive = true; } } else { // Non-live object. @@ -1088,7 +1087,9 @@ inline void EncodeForwardingAddressesInRange(Address start, } // If we ended on a free region, mark it. - if (!is_prev_alive) EncodeFreeRegion(free_start, end - free_start); + if (!is_prev_alive) { + EncodeFreeRegion(free_start, static_cast<int>(end - free_start)); + } } @@ -1169,7 +1170,7 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) { object->ClearMark(); MarkCompactCollector::tracer()->decrement_marked_count(); if (!is_previous_alive) { // Transition from free to live. - dealloc(free_start, current - free_start); + dealloc(free_start, static_cast<int>(current - free_start)); is_previous_alive = true; } } else { @@ -1189,7 +1190,7 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) { // If the last region was not live we need to deallocate from // free_start to the allocation top in the page. if (!is_previous_alive) { - int free_size = p->AllocationTop() - free_start; + int free_size = static_cast<int>(p->AllocationTop() - free_start); if (free_size > 0) { dealloc(free_start, free_size); } @@ -1455,7 +1456,7 @@ void MarkCompactCollector::UpdatePointers() { state_ = UPDATE_POINTERS; #endif UpdatingVisitor updating_visitor; - Heap::IterateRoots(&updating_visitor); + Heap::IterateRoots(&updating_visitor, VISIT_ONLY_STRONG); GlobalHandles::IterateWeakRoots(&updating_visitor); int live_maps = IterateLiveObjects(Heap::map_space(), diff --git a/deps/v8/src/mirror-delay.js b/deps/v8/src/mirror-delay.js index cde5534321..82fb9c2283 100644 --- a/deps/v8/src/mirror-delay.js +++ b/deps/v8/src/mirror-delay.js @@ -849,6 +849,33 @@ FunctionMirror.prototype.script = function() { /** + * Returns the script source position for the function. Only makes sense + * for functions which has a script defined. + * @return {Number or undefined} in-script position for the function + */ +FunctionMirror.prototype.sourcePosition_ = function() { + // Return script if function is resolved. Otherwise just fall through + // to return undefined. + if (this.resolved()) { + return %FunctionGetScriptSourcePosition(this.value_); + } +}; + + +/** + * Returns the script source location object for the function. Only makes sense + * for functions which has a script defined. + * @return {Location or undefined} in-script location for the function begin + */ +FunctionMirror.prototype.sourceLocation = function() { + if (this.resolved() && this.script()) { + return this.script().locationFromPosition(this.sourcePosition_(), + true); + } +}; + + +/** * Returns objects constructed by this function. * @param {number} opt_max_instances Optional parameter specifying the maximum * number of instances to return. @@ -2119,6 +2146,9 @@ JSONProtocolSerializer.prototype.serializeObject_ = function(mirror, content, } if (mirror.script()) { content.script = this.serializeReference(mirror.script()); + content.scriptId = mirror.script().id(); + + serializeLocationFields(mirror.sourceLocation(), content); } } @@ -2151,6 +2181,31 @@ JSONProtocolSerializer.prototype.serializeObject_ = function(mirror, content, /** + * Serialize location information to the following JSON format: + * + * "position":"<position>", + * "line":"<line>", + * "column":"<column>", + * + * @param {SourceLocation} location The location to serialize, may be undefined. + */ +function serializeLocationFields (location, content) { + if (!location) { + return; + } + content.position = location.position; + var line = location.line; + if (!IS_UNDEFINED(line)) { + content.line = line; + } + var column = location.column; + if (!IS_UNDEFINED(column)) { + content.column = column; + } +} + + +/** * Serialize property information to the following JSON format for building the * array of properties. * @@ -2218,15 +2273,7 @@ JSONProtocolSerializer.prototype.serializeFrame_ = function(mirror, content) { x[i] = local; } content.locals = x; - content.position = mirror.sourcePosition(); - var line = mirror.sourceLine(); - if (!IS_UNDEFINED(line)) { - content.line = line; - } - var column = mirror.sourceColumn(); - if (!IS_UNDEFINED(column)) { - content.column = column; - } + serializeLocationFields(mirror.sourceLocation(), content); var source_line_text = mirror.sourceLineText(); if (!IS_UNDEFINED(source_line_text)) { content.sourceLineText = source_line_text; diff --git a/deps/v8/src/mksnapshot.cc b/deps/v8/src/mksnapshot.cc index 80789ebbea..eb743f81f8 100644 --- a/deps/v8/src/mksnapshot.cc +++ b/deps/v8/src/mksnapshot.cc @@ -87,57 +87,53 @@ class CounterCollection { // We statically allocate a set of local counters to be used if we // don't want to store the stats in a memory-mapped file static CounterCollection local_counters; -static CounterCollection* counters = &local_counters; typedef std::map<std::string, int*> CounterMap; typedef std::map<std::string, int*>::iterator CounterMapIterator; static CounterMap counter_table_; -// Callback receiver when v8 has a counter to track. -static int* counter_callback(const char* name) { - std::string counter = name; - // See if this counter name is already known. - if (counter_table_.find(counter) != counter_table_.end()) - return counter_table_[counter]; - - Counter* ctr = counters->GetNextCounter(); - if (ctr == NULL) return NULL; - int* ptr = ctr->Bind(name); - counter_table_[counter] = ptr; - return ptr; -} +class CppByteSink : public i::SnapshotByteSink { + public: + explicit CppByteSink(const char* snapshot_file) : bytes_written_(0) { + fp_ = i::OS::FOpen(snapshot_file, "wb"); + if (fp_ == NULL) { + i::PrintF("Unable to write to snapshot file \"%s\"\n", snapshot_file); + exit(1); + } + fprintf(fp_, "// Autogenerated snapshot file. Do not edit.\n\n"); + fprintf(fp_, "#include \"v8.h\"\n"); + fprintf(fp_, "#include \"platform.h\"\n\n"); + fprintf(fp_, "#include \"snapshot.h\"\n\n"); + fprintf(fp_, "namespace v8 {\nnamespace internal {\n\n"); + fprintf(fp_, "const byte Snapshot::data_[] = {"); + } -// Write C++ code that defines Snapshot::snapshot_ to contain the snapshot -// to the file given by filename. Only the first size chars are written. -static int WriteInternalSnapshotToFile(const char* filename, - const v8::internal::byte* bytes, - int size) { - FILE* f = i::OS::FOpen(filename, "wb"); - if (f == NULL) { - i::OS::PrintError("Cannot open file %s for reading.\n", filename); - return 0; + virtual ~CppByteSink() { + if (fp_ != NULL) { + fprintf(fp_, "};\n\n"); + fprintf(fp_, "int Snapshot::size_ = %d;\n\n", bytes_written_); + fprintf(fp_, "} } // namespace v8::internal\n"); + fclose(fp_); + } } - fprintf(f, "// Autogenerated snapshot file. Do not edit.\n\n"); - fprintf(f, "#include \"v8.h\"\n"); - fprintf(f, "#include \"platform.h\"\n\n"); - fprintf(f, "#include \"snapshot.h\"\n\n"); - fprintf(f, "namespace v8 {\nnamespace internal {\n\n"); - fprintf(f, "const byte Snapshot::data_[] = {"); - int written = 0; - written += fprintf(f, "0x%x", bytes[0]); - for (int i = 1; i < size; ++i) { - written += fprintf(f, ",0x%x", bytes[i]); - // The following is needed to keep the line length low on Visual C++: - if (i % 512 == 0) fprintf(f, "\n"); + + virtual void Put(int byte, const char* description) { + if (bytes_written_ != 0) { + fprintf(fp_, ","); + } + fprintf(fp_, "%d", byte); + bytes_written_++; + if ((bytes_written_ & 0x3f) == 0) { + fprintf(fp_, "\n"); + } } - fprintf(f, "};\n\n"); - fprintf(f, "int Snapshot::size_ = %d;\n\n", size); - fprintf(f, "} } // namespace v8::internal\n"); - fclose(f); - return written; -} + + private: + FILE* fp_; + int bytes_written_; +}; int main(int argc, char** argv) { @@ -153,34 +149,20 @@ int main(int argc, char** argv) { i::FlagList::PrintHelp(); return !i::FLAG_help; } - - v8::V8::SetCounterFunction(counter_callback); - v8::HandleScope scope; - - const int kExtensionCount = 1; - const char* extension_list[kExtensionCount] = { "v8/gc" }; - v8::ExtensionConfiguration extensions(kExtensionCount, extension_list); - i::Serializer::Enable(); - v8::Context::New(&extensions); - + Persistent<Context> context = v8::Context::New(); // Make sure all builtin scripts are cached. { HandleScope scope; for (int i = 0; i < i::Natives::GetBuiltinsCount(); i++) { i::Bootstrapper::NativesSourceLookup(i); } } - // Get rid of unreferenced scripts with a global GC. - i::Heap::CollectAllGarbage(false); - i::Serializer ser; + context.Dispose(); + CppByteSink sink(argv[1]); + i::Serializer ser(&sink); + // This results in a somewhat smaller snapshot, probably because it gets rid + // of some things that are cached between garbage collections. + i::Heap::CollectAllGarbage(true); ser.Serialize(); - v8::internal::byte* bytes; - int len; - ser.Finalize(&bytes, &len); - - WriteInternalSnapshotToFile(argv[1], bytes, len); - - i::DeleteArray(bytes); - return 0; } diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc index 01881346e1..19c945d987 100644 --- a/deps/v8/src/objects-debug.cc +++ b/deps/v8/src/objects-debug.cc @@ -553,12 +553,6 @@ static const char* TypeToString(InstanceType type) { case SHORT_ASCII_SYMBOL_TYPE: case MEDIUM_ASCII_SYMBOL_TYPE: case LONG_ASCII_SYMBOL_TYPE: return "ASCII_SYMBOL"; - case SHORT_SLICED_SYMBOL_TYPE: - case MEDIUM_SLICED_SYMBOL_TYPE: - case LONG_SLICED_SYMBOL_TYPE: return "SLICED_SYMBOL"; - case SHORT_SLICED_ASCII_SYMBOL_TYPE: - case MEDIUM_SLICED_ASCII_SYMBOL_TYPE: - case LONG_SLICED_ASCII_SYMBOL_TYPE: return "SLICED_ASCII_SYMBOL"; case SHORT_CONS_SYMBOL_TYPE: case MEDIUM_CONS_SYMBOL_TYPE: case LONG_CONS_SYMBOL_TYPE: return "CONS_SYMBOL"; @@ -583,12 +577,6 @@ static const char* TypeToString(InstanceType type) { case SHORT_CONS_ASCII_STRING_TYPE: case MEDIUM_CONS_ASCII_STRING_TYPE: case LONG_CONS_ASCII_STRING_TYPE: return "CONS_STRING"; - case SHORT_SLICED_STRING_TYPE: - case MEDIUM_SLICED_STRING_TYPE: - case LONG_SLICED_STRING_TYPE: - case SHORT_SLICED_ASCII_STRING_TYPE: - case MEDIUM_SLICED_ASCII_STRING_TYPE: - case LONG_SLICED_ASCII_STRING_TYPE: return "SLICED_STRING"; case SHORT_EXTERNAL_ASCII_STRING_TYPE: case MEDIUM_EXTERNAL_ASCII_STRING_TYPE: case LONG_EXTERNAL_ASCII_STRING_TYPE: @@ -796,8 +784,6 @@ void SharedFunctionInfo::SharedFunctionInfoPrint() { PrintF("\n - debug info = "); debug_info()->ShortPrint(); PrintF("\n - length = %d", length()); - PrintF("\n - has_only_this_property_assignments = %d", - has_only_this_property_assignments()); PrintF("\n - has_only_simple_this_property_assignments = %d", has_only_simple_this_property_assignments()); PrintF("\n - this_property_assignments = "); @@ -979,6 +965,7 @@ void AccessorInfo::AccessorInfoVerify() { VerifyPointer(name()); VerifyPointer(data()); VerifyPointer(flag()); + VerifyPointer(load_stub_cache()); } void AccessorInfo::AccessorInfoPrint() { @@ -1153,7 +1140,8 @@ void Script::ScriptVerify() { VerifyPointer(data()); VerifyPointer(wrapper()); type()->SmiVerify(); - VerifyPointer(line_ends()); + VerifyPointer(line_ends_fixed_array()); + VerifyPointer(line_ends_js_array()); VerifyPointer(id()); } diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h index 2350a35ba8..507a3ab6b1 100644 --- a/deps/v8/src/objects-inl.h +++ b/deps/v8/src/objects-inl.h @@ -163,11 +163,6 @@ bool Object::IsConsString() { } -#ifdef DEBUG -// These are for cast checks. If you need one of these in release -// mode you should consider using a StringShape before moving it out -// of the ifdef - bool Object::IsSeqString() { if (!IsString()) return false; return StringShape(String::cast(this)).IsSequential(); @@ -208,15 +203,6 @@ bool Object::IsExternalTwoByteString() { } -bool Object::IsSlicedString() { - if (!IsString()) return false; - return StringShape(String::cast(this)).IsSliced(); -} - - -#endif // DEBUG - - StringShape::StringShape(String* str) : type_(str->map()->instance_type()) { set_valid(); @@ -246,9 +232,6 @@ bool StringShape::IsSymbol() { bool String::IsAsciiRepresentation() { uint32_t type = map()->instance_type(); - if ((type & kStringRepresentationMask) == kSlicedStringTag) { - return SlicedString::cast(this)->buffer()->IsAsciiRepresentation(); - } if ((type & kStringRepresentationMask) == kConsStringTag && ConsString::cast(this)->second()->length() == 0) { return ConsString::cast(this)->first()->IsAsciiRepresentation(); @@ -259,9 +242,7 @@ bool String::IsAsciiRepresentation() { bool String::IsTwoByteRepresentation() { uint32_t type = map()->instance_type(); - if ((type & kStringRepresentationMask) == kSlicedStringTag) { - return SlicedString::cast(this)->buffer()->IsTwoByteRepresentation(); - } else if ((type & kStringRepresentationMask) == kConsStringTag && + if ((type & kStringRepresentationMask) == kConsStringTag && ConsString::cast(this)->second()->length() == 0) { return ConsString::cast(this)->first()->IsTwoByteRepresentation(); } @@ -274,11 +255,6 @@ bool StringShape::IsCons() { } -bool StringShape::IsSliced() { - return (type_ & kStringRepresentationMask) == kSlicedStringTag; -} - - bool StringShape::IsExternal() { return (type_ & kStringRepresentationMask) == kExternalStringTag; } @@ -879,7 +855,7 @@ Failure* Failure::RetryAfterGC(int requested_bytes) { requested = static_cast<intptr_t>( (~static_cast<uintptr_t>(0)) >> (tag_bits + 1)); } - int value = (requested << kSpaceTagSize) | NEW_SPACE; + int value = static_cast<int>(requested << kSpaceTagSize) | NEW_SPACE; return Construct(RETRY_AFTER_GC, value); } @@ -1033,9 +1009,9 @@ Address MapWord::DecodeMapAddress(MapSpace* map_space) { int MapWord::DecodeOffset() { // The offset field is represented in the kForwardingOffsetBits // most-significant bits. - int offset = (value_ >> kForwardingOffsetShift) << kObjectAlignmentBits; - ASSERT(0 <= offset && offset < Page::kObjectAreaSize); - return offset; + uintptr_t offset = (value_ >> kForwardingOffsetShift) << kObjectAlignmentBits; + ASSERT(offset < static_cast<uintptr_t>(Page::kObjectAreaSize)); + return static_cast<int>(offset); } @@ -1610,7 +1586,6 @@ CAST_ACCESSOR(SeqString) CAST_ACCESSOR(SeqAsciiString) CAST_ACCESSOR(SeqTwoByteString) CAST_ACCESSOR(ConsString) -CAST_ACCESSOR(SlicedString) CAST_ACCESSOR(ExternalString) CAST_ACCESSOR(ExternalAsciiString) CAST_ACCESSOR(ExternalTwoByteString) @@ -1721,9 +1696,6 @@ uint16_t String::Get(int index) { case kConsStringTag | kAsciiStringTag: case kConsStringTag | kTwoByteStringTag: return ConsString::cast(this)->ConsStringGet(index); - case kSlicedStringTag | kAsciiStringTag: - case kSlicedStringTag | kTwoByteStringTag: - return SlicedString::cast(this)->SlicedStringGet(index); case kExternalStringTag | kAsciiStringTag: return ExternalAsciiString::cast(this)->ExternalAsciiStringGet(index); case kExternalStringTag | kTwoByteStringTag: @@ -1754,11 +1726,6 @@ bool String::IsFlat() { // Only flattened strings have second part empty. return second->length() == 0; } - case kSlicedStringTag: { - StringRepresentationTag tag = - StringShape(SlicedString::cast(this)->buffer()).representation_tag(); - return tag == kSeqStringTag || tag == kExternalStringTag; - } default: return true; } @@ -1872,27 +1839,6 @@ void ConsString::set_second(String* value, WriteBarrierMode mode) { } -String* SlicedString::buffer() { - return String::cast(READ_FIELD(this, kBufferOffset)); -} - - -void SlicedString::set_buffer(String* buffer) { - WRITE_FIELD(this, kBufferOffset, buffer); - WRITE_BARRIER(this, kBufferOffset); -} - - -int SlicedString::start() { - return READ_INT_FIELD(this, kStartOffset); -} - - -void SlicedString::set_start(int start) { - WRITE_INT_FIELD(this, kStartOffset, start); -} - - ExternalAsciiString::Resource* ExternalAsciiString::resource() { return *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)); } @@ -2436,6 +2382,7 @@ ACCESSORS(AccessorInfo, setter, Object, kSetterOffset) ACCESSORS(AccessorInfo, data, Object, kDataOffset) ACCESSORS(AccessorInfo, name, Object, kNameOffset) ACCESSORS(AccessorInfo, flag, Smi, kFlagOffset) +ACCESSORS(AccessorInfo, load_stub_cache, Object, kLoadStubCacheOffset) ACCESSORS(AccessCheckInfo, named_callback, Object, kNamedCallbackOffset) ACCESSORS(AccessCheckInfo, indexed_callback, Object, kIndexedCallbackOffset) @@ -2494,7 +2441,8 @@ ACCESSORS(Script, context_data, Object, kContextOffset) ACCESSORS(Script, wrapper, Proxy, kWrapperOffset) ACCESSORS(Script, type, Smi, kTypeOffset) ACCESSORS(Script, compilation_type, Smi, kCompilationTypeOffset) -ACCESSORS(Script, line_ends, Object, kLineEndsOffset) +ACCESSORS(Script, line_ends_fixed_array, Object, kLineEndsFixedArrayOffset) +ACCESSORS(Script, line_ends_js_array, Object, kLineEndsJSArrayOffset) ACCESSORS(Script, eval_from_function, Object, kEvalFromFunctionOffset) ACCESSORS(Script, eval_from_instructions_offset, Smi, kEvalFrominstructionsOffsetOffset) @@ -2533,12 +2481,12 @@ BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_expression, BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_toplevel, kIsTopLevelBit) BOOL_GETTER(SharedFunctionInfo, compiler_hints, - has_only_this_property_assignments, - kHasOnlyThisPropertyAssignments) -BOOL_GETTER(SharedFunctionInfo, compiler_hints, has_only_simple_this_property_assignments, kHasOnlySimpleThisPropertyAssignments) - +BOOL_ACCESSORS(SharedFunctionInfo, + compiler_hints, + try_fast_codegen, + kTryFastCodegen) INT_ACCESSORS(SharedFunctionInfo, length, kLengthOffset) INT_ACCESSORS(SharedFunctionInfo, formal_parameter_count, @@ -3046,6 +2994,43 @@ PropertyAttributes JSObject::GetPropertyAttribute(String* key) { return GetPropertyAttributeWithReceiver(this, key); } +// TODO(504): this may be useful in other places too where JSGlobalProxy +// is used. +Object* JSObject::BypassGlobalProxy() { + if (IsJSGlobalProxy()) { + Object* proto = GetPrototype(); + if (proto->IsNull()) return Heap::undefined_value(); + ASSERT(proto->IsJSGlobalObject()); + return proto; + } + return this; +} + + +bool JSObject::HasHiddenPropertiesObject() { + ASSERT(!IsJSGlobalProxy()); + return GetPropertyAttributePostInterceptor(this, + Heap::hidden_symbol(), + false) != ABSENT; +} + + +Object* JSObject::GetHiddenPropertiesObject() { + ASSERT(!IsJSGlobalProxy()); + PropertyAttributes attributes; + return GetLocalPropertyPostInterceptor(this, + Heap::hidden_symbol(), + &attributes); +} + + +Object* JSObject::SetHiddenPropertiesObject(Object* hidden_obj) { + ASSERT(!IsJSGlobalProxy()); + return SetPropertyPostInterceptor(Heap::hidden_symbol(), + hidden_obj, + DONT_ENUM); +} + bool JSObject::HasElement(uint32_t index) { return HasElementWithReceiver(this, index); diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc index b14ec5c1a0..5ccacb7860 100644 --- a/deps/v8/src/objects.cc +++ b/deps/v8/src/objects.cc @@ -683,23 +683,6 @@ Object* String::TryFlatten() { #endif switch (StringShape(this).representation_tag()) { - case kSlicedStringTag: { - SlicedString* ss = SlicedString::cast(this); - // The SlicedString constructor should ensure that there are no - // SlicedStrings that are constructed directly on top of other - // SlicedStrings. - String* buf = ss->buffer(); - ASSERT(!buf->IsSlicedString()); - Object* ok = buf->TryFlatten(); - if (ok->IsFailure()) return ok; - // Under certain circumstances (TryFlattenIfNotFlat fails in - // String::Slice) we can have a cons string under a slice. - // In this case we need to get the flat string out of the cons! - if (StringShape(String::cast(ok)).IsCons()) { - ss->set_buffer(ConsString::cast(ok)->first()); - } - return this; - } case kConsStringTag: { ConsString* cs = ConsString::cast(this); if (cs->second()->length() == 0) { @@ -1135,8 +1118,14 @@ void HeapObject::IterateBody(InstanceType type, int object_size, case kConsStringTag: reinterpret_cast<ConsString*>(this)->ConsStringIterateBody(v); break; - case kSlicedStringTag: - reinterpret_cast<SlicedString*>(this)->SlicedStringIterateBody(v); + case kExternalStringTag: + if ((type & kStringEncodingMask) == kAsciiStringTag) { + reinterpret_cast<ExternalAsciiString*>(this)-> + ExternalAsciiStringIterateBody(v); + } else { + reinterpret_cast<ExternalTwoByteString*>(this)-> + ExternalTwoByteStringIterateBody(v); + } break; } return; @@ -3562,12 +3551,7 @@ Vector<const char> String::ToAsciiVector() { int length = this->length(); StringRepresentationTag string_tag = StringShape(this).representation_tag(); String* string = this; - if (string_tag == kSlicedStringTag) { - SlicedString* sliced = SlicedString::cast(string); - offset += sliced->start(); - string = sliced->buffer(); - string_tag = StringShape(string).representation_tag(); - } else if (string_tag == kConsStringTag) { + if (string_tag == kConsStringTag) { ConsString* cons = ConsString::cast(string); ASSERT(cons->second()->length() == 0); string = cons->first(); @@ -3593,12 +3577,7 @@ Vector<const uc16> String::ToUC16Vector() { int length = this->length(); StringRepresentationTag string_tag = StringShape(this).representation_tag(); String* string = this; - if (string_tag == kSlicedStringTag) { - SlicedString* sliced = SlicedString::cast(string); - offset += sliced->start(); - string = String::cast(sliced->buffer()); - string_tag = StringShape(string).representation_tag(); - } else if (string_tag == kConsStringTag) { + if (string_tag == kConsStringTag) { ConsString* cons = ConsString::cast(string); ASSERT(cons->second()->length() == 0); string = cons->first(); @@ -3689,17 +3668,6 @@ const uc16* String::GetTwoByteData(unsigned start) { case kExternalStringTag: return ExternalTwoByteString::cast(this)-> ExternalTwoByteStringGetData(start); - case kSlicedStringTag: { - SlicedString* sliced_string = SlicedString::cast(this); - String* buffer = sliced_string->buffer(); - if (StringShape(buffer).IsCons()) { - ConsString* cs = ConsString::cast(buffer); - // Flattened string. - ASSERT(cs->second()->length() == 0); - buffer = cs->first(); - } - return buffer->GetTwoByteData(start + sliced_string->start()); - } case kConsStringTag: UNREACHABLE(); return NULL; @@ -3854,22 +3822,6 @@ const unibrow::byte* ConsString::ConsStringReadBlock(ReadBlockBuffer* rbb, } -const unibrow::byte* SlicedString::SlicedStringReadBlock(ReadBlockBuffer* rbb, - unsigned* offset_ptr, - unsigned max_chars) { - String* backing = buffer(); - unsigned offset = start() + *offset_ptr; - unsigned length = backing->length(); - if (max_chars > length - offset) { - max_chars = length - offset; - } - const unibrow::byte* answer = - String::ReadBlock(backing, rbb, &offset, max_chars); - *offset_ptr = offset - start(); - return answer; -} - - uint16_t ExternalAsciiString::ExternalAsciiStringGet(int index) { ASSERT(index >= 0 && index < length()); return resource()->data()[index]; @@ -3993,10 +3945,6 @@ const unibrow::byte* String::ReadBlock(String* input, return ConsString::cast(input)->ConsStringReadBlock(rbb, offset_ptr, max_chars); - case kSlicedStringTag: - return SlicedString::cast(input)->SlicedStringReadBlock(rbb, - offset_ptr, - max_chars); case kExternalStringTag: if (input->IsAsciiRepresentation()) { return ExternalAsciiString::cast(input)->ExternalAsciiStringReadBlock( @@ -4139,20 +4087,15 @@ void String::ReadBlockIntoBuffer(String* input, offset_ptr, max_chars); return; - case kSlicedStringTag: - SlicedString::cast(input)->SlicedStringReadBlockIntoBuffer(rbb, - offset_ptr, - max_chars); - return; case kExternalStringTag: if (input->IsAsciiRepresentation()) { - ExternalAsciiString::cast(input)-> - ExternalAsciiStringReadBlockIntoBuffer(rbb, offset_ptr, max_chars); - } else { - ExternalTwoByteString::cast(input)-> - ExternalTwoByteStringReadBlockIntoBuffer(rbb, - offset_ptr, - max_chars); + ExternalAsciiString::cast(input)-> + ExternalAsciiStringReadBlockIntoBuffer(rbb, offset_ptr, max_chars); + } else { + ExternalTwoByteString::cast(input)-> + ExternalTwoByteStringReadBlockIntoBuffer(rbb, + offset_ptr, + max_chars); } return; default: @@ -4258,20 +4201,6 @@ void ConsString::ConsStringReadBlockIntoBuffer(ReadBlockBuffer* rbb, } -void SlicedString::SlicedStringReadBlockIntoBuffer(ReadBlockBuffer* rbb, - unsigned* offset_ptr, - unsigned max_chars) { - String* backing = buffer(); - unsigned offset = start() + *offset_ptr; - unsigned length = backing->length(); - if (max_chars > length - offset) { - max_chars = length - offset; - } - String::ReadBlockIntoBuffer(backing, rbb, &offset, max_chars); - *offset_ptr = offset - start(); -} - - void ConsString::ConsStringIterateBody(ObjectVisitor* v) { IteratePointers(v, kFirstOffset, kSecondOffset + kPointerSize); } @@ -4350,15 +4279,6 @@ void String::WriteToFlat(String* src, to - from); return; } - case kAsciiStringTag | kSlicedStringTag: - case kTwoByteStringTag | kSlicedStringTag: { - SlicedString* sliced_string = SlicedString::cast(source); - int start = sliced_string->start(); - from += start; - to += start; - source = String::cast(sliced_string->buffer()); - break; - } case kAsciiStringTag | kConsStringTag: case kTwoByteStringTag | kConsStringTag: { ConsString* cons_string = ConsString::cast(source); @@ -4394,18 +4314,23 @@ void String::WriteToFlat(String* src, } -void SlicedString::SlicedStringIterateBody(ObjectVisitor* v) { - IteratePointer(v, kBufferOffset); +#define FIELD_ADDR(p, offset) \ + (reinterpret_cast<byte*>(p) + offset - kHeapObjectTag) + +void ExternalAsciiString::ExternalAsciiStringIterateBody(ObjectVisitor* v) { + typedef v8::String::ExternalAsciiStringResource Resource; + v->VisitExternalAsciiString( + reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset))); } -uint16_t SlicedString::SlicedStringGet(int index) { - ASSERT(index >= 0 && index < this->length()); - // Delegate to the buffer string. - String* underlying = buffer(); - return underlying->Get(start() + index); +void ExternalTwoByteString::ExternalTwoByteStringIterateBody(ObjectVisitor* v) { + typedef v8::String::ExternalStringResource Resource; + v->VisitExternalTwoByteString( + reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset))); } +#undef FIELD_ADDR template <typename IteratorA, typename IteratorB> static inline bool CompareStringContents(IteratorA* ia, IteratorB* ib) { @@ -4705,43 +4630,10 @@ uint32_t String::ComputeLengthAndHashField(unibrow::CharacterStream* buffer, } -Object* String::Slice(int start, int end) { +Object* String::SubString(int start, int end) { if (start == 0 && end == length()) return this; - if (StringShape(this).representation_tag() == kSlicedStringTag) { - // Translate slices of a SlicedString into slices of the - // underlying string buffer. - SlicedString* str = SlicedString::cast(this); - String* buf = str->buffer(); - return Heap::AllocateSlicedString(buf, - str->start() + start, - str->start() + end); - } - Object* result = Heap::AllocateSlicedString(this, start, end); - if (result->IsFailure()) { - return result; - } - // Due to the way we retry after GC on allocation failure we are not allowed - // to fail on allocation after this point. This is the one-allocation rule. - - // Try to flatten a cons string that is under the sliced string. - // This is to avoid memory leaks and possible stack overflows caused by - // building 'towers' of sliced strings on cons strings. - // This may fail due to an allocation failure (when a GC is needed), but it - // will succeed often enough to avoid the problem. We only have to do this - // if Heap::AllocateSlicedString actually returned a SlicedString. It will - // return flat strings for small slices for efficiency reasons. - String* answer = String::cast(result); - if (StringShape(answer).IsSliced() && - StringShape(this).representation_tag() == kConsStringTag) { - TryFlatten(); - // If the flatten succeeded we might as well make the sliced string point - // to the flat string rather than the cons string. - String* second = ConsString::cast(this)->second(); - if (second->length() == 0) { - SlicedString::cast(answer)->set_buffer(ConsString::cast(this)->first()); - } - } - return answer; + Object* result = Heap::AllocateSubString(this, start, end); + return result; } @@ -4921,13 +4813,9 @@ int SharedFunctionInfo::CalculateInObjectProperties() { void SharedFunctionInfo::SetThisPropertyAssignmentsInfo( - bool only_this_property_assignments, bool only_simple_this_property_assignments, FixedArray* assignments) { set_compiler_hints(BooleanBit::set(compiler_hints(), - kHasOnlyThisPropertyAssignments, - only_this_property_assignments)); - set_compiler_hints(BooleanBit::set(compiler_hints(), kHasOnlySimpleThisPropertyAssignments, only_simple_this_property_assignments)); set_this_property_assignments(assignments); @@ -4937,9 +4825,6 @@ void SharedFunctionInfo::SetThisPropertyAssignmentsInfo( void SharedFunctionInfo::ClearThisPropertyAssignmentsInfo() { set_compiler_hints(BooleanBit::set(compiler_hints(), - kHasOnlyThisPropertyAssignments, - false)); - set_compiler_hints(BooleanBit::set(compiler_hints(), kHasOnlySimpleThisPropertyAssignments, false)); set_this_property_assignments(Heap::undefined_value()); @@ -4994,7 +4879,7 @@ void SharedFunctionInfo::SourceCodePrint(StringStream* accumulator, return; } - // Get the slice of the source for this function. + // Get the source for the script which this function came from. // Don't use String::cast because we don't want more assertion errors while // we are already creating a stack dump. String* script_source = @@ -5083,7 +4968,7 @@ void Code::CodeIterateBody(ObjectVisitor* v) { } -void Code::Relocate(int delta) { +void Code::Relocate(intptr_t delta) { for (RelocIterator it(this, RelocInfo::kApplyMask); !it.done(); it.next()) { it.rinfo()->apply(delta); } @@ -5149,8 +5034,9 @@ int Code::SourcePosition(Address pc) { // Only look at positions after the current pc. if (it.rinfo()->pc() < pc) { // Get position and distance. - int dist = pc - it.rinfo()->pc(); - int pos = it.rinfo()->data(); + + int dist = static_cast<int>(pc - it.rinfo()->pc()); + int pos = static_cast<int>(it.rinfo()->data()); // If this position is closer than the current candidate or if it has the // same distance as the current candidate and the position is higher then // this position is the new candidate. @@ -5177,7 +5063,7 @@ int Code::SourceStatementPosition(Address pc) { RelocIterator it(this, RelocInfo::kPositionMask); while (!it.done()) { if (RelocInfo::IsStatementPosition(it.rinfo()->rmode())) { - int p = it.rinfo()->data(); + int p = static_cast<int>(it.rinfo()->data()); if (statement_position < p && p <= position) { statement_position = p; } @@ -6284,6 +6170,17 @@ Object* JSObject::GetPropertyPostInterceptor(JSObject* receiver, return pt->GetPropertyWithReceiver(receiver, name, attributes); } +Object* JSObject::GetLocalPropertyPostInterceptor( + JSObject* receiver, + String* name, + PropertyAttributes* attributes) { + // Check local property in holder, ignore interceptor. + LookupResult result; + LocalLookupRealNamedProperty(name, &result); + if (!result.IsValid()) return Heap::undefined_value(); + return GetProperty(receiver, &result, name, attributes); +} + Object* JSObject::GetPropertyWithInterceptor( JSObject* receiver, diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h index 89cbd44637..6ea0a820c7 100644 --- a/deps/v8/src/objects.h +++ b/deps/v8/src/objects.h @@ -78,7 +78,6 @@ // - SeqAsciiString // - SeqTwoByteString // - ConsString -// - SlicedString // - ExternalString // - ExternalAsciiString // - ExternalTwoByteString @@ -210,7 +209,7 @@ enum PropertyNormalizationMode { // considered TWO_BYTE. It is not mentioned in the name. ASCII encoding is // mentioned explicitly in the name. Likewise, the default representation is // considered sequential. It is not mentioned in the name. The other -// representations (eg, CONS, SLICED, EXTERNAL) are explicitly mentioned. +// representations (eg, CONS, EXTERNAL) are explicitly mentioned. // Finally, the string is either a SYMBOL_TYPE (if it is a symbol) or a // STRING_TYPE (if it is not a symbol). // @@ -235,12 +234,6 @@ enum PropertyNormalizationMode { V(SHORT_CONS_ASCII_SYMBOL_TYPE) \ V(MEDIUM_CONS_ASCII_SYMBOL_TYPE) \ V(LONG_CONS_ASCII_SYMBOL_TYPE) \ - V(SHORT_SLICED_SYMBOL_TYPE) \ - V(MEDIUM_SLICED_SYMBOL_TYPE) \ - V(LONG_SLICED_SYMBOL_TYPE) \ - V(SHORT_SLICED_ASCII_SYMBOL_TYPE) \ - V(MEDIUM_SLICED_ASCII_SYMBOL_TYPE) \ - V(LONG_SLICED_ASCII_SYMBOL_TYPE) \ V(SHORT_EXTERNAL_SYMBOL_TYPE) \ V(MEDIUM_EXTERNAL_SYMBOL_TYPE) \ V(LONG_EXTERNAL_SYMBOL_TYPE) \ @@ -259,12 +252,6 @@ enum PropertyNormalizationMode { V(SHORT_CONS_ASCII_STRING_TYPE) \ V(MEDIUM_CONS_ASCII_STRING_TYPE) \ V(LONG_CONS_ASCII_STRING_TYPE) \ - V(SHORT_SLICED_STRING_TYPE) \ - V(MEDIUM_SLICED_STRING_TYPE) \ - V(LONG_SLICED_STRING_TYPE) \ - V(SHORT_SLICED_ASCII_STRING_TYPE) \ - V(MEDIUM_SLICED_ASCII_STRING_TYPE) \ - V(LONG_SLICED_ASCII_STRING_TYPE) \ V(SHORT_EXTERNAL_STRING_TYPE) \ V(MEDIUM_EXTERNAL_STRING_TYPE) \ V(LONG_EXTERNAL_STRING_TYPE) \ @@ -380,30 +367,6 @@ enum PropertyNormalizationMode { ConsString::kSize, \ long_cons_ascii_symbol, \ LongConsAsciiSymbol) \ - V(SHORT_SLICED_SYMBOL_TYPE, \ - SlicedString::kSize, \ - short_sliced_symbol, \ - ShortSlicedSymbol) \ - V(MEDIUM_SLICED_SYMBOL_TYPE, \ - SlicedString::kSize, \ - medium_sliced_symbol, \ - MediumSlicedSymbol) \ - V(LONG_SLICED_SYMBOL_TYPE, \ - SlicedString::kSize, \ - long_sliced_symbol, \ - LongSlicedSymbol) \ - V(SHORT_SLICED_ASCII_SYMBOL_TYPE, \ - SlicedString::kSize, \ - short_sliced_ascii_symbol, \ - ShortSlicedAsciiSymbol) \ - V(MEDIUM_SLICED_ASCII_SYMBOL_TYPE, \ - SlicedString::kSize, \ - medium_sliced_ascii_symbol, \ - MediumSlicedAsciiSymbol) \ - V(LONG_SLICED_ASCII_SYMBOL_TYPE, \ - SlicedString::kSize, \ - long_sliced_ascii_symbol, \ - LongSlicedAsciiSymbol) \ V(SHORT_EXTERNAL_SYMBOL_TYPE, \ ExternalTwoByteString::kSize, \ short_external_symbol, \ @@ -476,30 +439,6 @@ enum PropertyNormalizationMode { ConsString::kSize, \ long_cons_ascii_string, \ LongConsAsciiString) \ - V(SHORT_SLICED_STRING_TYPE, \ - SlicedString::kSize, \ - short_sliced_string, \ - ShortSlicedString) \ - V(MEDIUM_SLICED_STRING_TYPE, \ - SlicedString::kSize, \ - medium_sliced_string, \ - MediumSlicedString) \ - V(LONG_SLICED_STRING_TYPE, \ - SlicedString::kSize, \ - long_sliced_string, \ - LongSlicedString) \ - V(SHORT_SLICED_ASCII_STRING_TYPE, \ - SlicedString::kSize, \ - short_sliced_ascii_string, \ - ShortSlicedAsciiString) \ - V(MEDIUM_SLICED_ASCII_STRING_TYPE, \ - SlicedString::kSize, \ - medium_sliced_ascii_string, \ - MediumSlicedAsciiString) \ - V(LONG_SLICED_ASCII_STRING_TYPE, \ - SlicedString::kSize, \ - long_sliced_ascii_string, \ - LongSlicedAsciiString) \ V(SHORT_EXTERNAL_STRING_TYPE, \ ExternalTwoByteString::kSize, \ short_external_string, \ @@ -591,7 +530,6 @@ const uint32_t kStringRepresentationMask = 0x03; enum StringRepresentationTag { kSeqStringTag = 0x0, kConsStringTag = 0x1, - kSlicedStringTag = 0x2, kExternalStringTag = 0x3 }; @@ -627,15 +565,6 @@ enum InstanceType { kMediumStringTag | kAsciiStringTag | kSymbolTag | kConsStringTag, LONG_CONS_ASCII_SYMBOL_TYPE = kLongStringTag | kAsciiStringTag | kSymbolTag | kConsStringTag, - SHORT_SLICED_SYMBOL_TYPE = kShortStringTag | kSymbolTag | kSlicedStringTag, - MEDIUM_SLICED_SYMBOL_TYPE = kMediumStringTag | kSymbolTag | kSlicedStringTag, - LONG_SLICED_SYMBOL_TYPE = kLongStringTag | kSymbolTag | kSlicedStringTag, - SHORT_SLICED_ASCII_SYMBOL_TYPE = - kShortStringTag | kAsciiStringTag | kSymbolTag | kSlicedStringTag, - MEDIUM_SLICED_ASCII_SYMBOL_TYPE = - kMediumStringTag | kAsciiStringTag | kSymbolTag | kSlicedStringTag, - LONG_SLICED_ASCII_SYMBOL_TYPE = - kLongStringTag | kAsciiStringTag | kSymbolTag | kSlicedStringTag, SHORT_EXTERNAL_SYMBOL_TYPE = kShortStringTag | kSymbolTag | kExternalStringTag, MEDIUM_EXTERNAL_SYMBOL_TYPE = @@ -662,15 +591,6 @@ enum InstanceType { kMediumStringTag | kAsciiStringTag | kConsStringTag, LONG_CONS_ASCII_STRING_TYPE = kLongStringTag | kAsciiStringTag | kConsStringTag, - SHORT_SLICED_STRING_TYPE = kShortStringTag | kSlicedStringTag, - MEDIUM_SLICED_STRING_TYPE = kMediumStringTag | kSlicedStringTag, - LONG_SLICED_STRING_TYPE = kLongStringTag | kSlicedStringTag, - SHORT_SLICED_ASCII_STRING_TYPE = - kShortStringTag | kAsciiStringTag | kSlicedStringTag, - MEDIUM_SLICED_ASCII_STRING_TYPE = - kMediumStringTag | kAsciiStringTag | kSlicedStringTag, - LONG_SLICED_ASCII_STRING_TYPE = - kLongStringTag | kAsciiStringTag | kSlicedStringTag, SHORT_EXTERNAL_STRING_TYPE = kShortStringTag | kExternalStringTag, MEDIUM_EXTERNAL_STRING_TYPE = kMediumStringTag | kExternalStringTag, LONG_EXTERNAL_STRING_TYPE = kLongStringTag | kExternalStringTag, @@ -790,16 +710,13 @@ class Object BASE_EMBEDDED { inline bool IsHeapNumber(); inline bool IsString(); inline bool IsSymbol(); -#ifdef DEBUG // See objects-inl.h for more details inline bool IsSeqString(); - inline bool IsSlicedString(); inline bool IsExternalString(); inline bool IsExternalTwoByteString(); inline bool IsExternalAsciiString(); inline bool IsSeqTwoByteString(); inline bool IsSeqAsciiString(); -#endif // DEBUG inline bool IsConsString(); inline bool IsNumber(); @@ -1490,6 +1407,9 @@ class JSObject: public HeapObject { Object* GetPropertyPostInterceptor(JSObject* receiver, String* name, PropertyAttributes* attributes); + Object* GetLocalPropertyPostInterceptor(JSObject* receiver, + String* name, + PropertyAttributes* attributes); Object* GetLazyProperty(Object* receiver, LookupResult* result, String* name, @@ -1511,6 +1431,27 @@ class JSObject: public HeapObject { return GetLocalPropertyAttribute(name) != ABSENT; } + // If the receiver is a JSGlobalProxy this method will return its prototype, + // otherwise the result is the receiver itself. + inline Object* BypassGlobalProxy(); + + // Accessors for hidden properties object. + // + // Hidden properties are not local properties of the object itself. + // Instead they are stored on an auxiliary JSObject stored as a local + // property with a special name Heap::hidden_symbol(). But if the + // receiver is a JSGlobalProxy then the auxiliary object is a property + // of its prototype. + // + // Has/Get/SetHiddenPropertiesObject methods don't allow the holder to be + // a JSGlobalProxy. Use BypassGlobalProxy method above to get to the real + // holder. + // + // These accessors do not touch interceptors or accessors. + inline bool HasHiddenPropertiesObject(); + inline Object* GetHiddenPropertiesObject(); + inline Object* SetHiddenPropertiesObject(Object* hidden_obj); + Object* DeleteProperty(String* name, DeleteMode mode); Object* DeleteElement(uint32_t index, DeleteMode mode); Object* DeleteLazyProperty(LookupResult* result, @@ -2873,7 +2814,7 @@ class Code: public HeapObject { // Relocate the code by delta bytes. Called to signal that this code // object has been moved by delta bytes. - void Relocate(int delta); + void Relocate(intptr_t delta); // Migrate code described by desc. void CopyFrom(const CodeDesc& desc); @@ -2910,7 +2851,8 @@ class Code: public HeapObject { void CodeVerify(); #endif // Code entry points are aligned to 32 bytes. - static const int kCodeAlignment = 32; + static const int kCodeAlignmentBits = 5; + static const int kCodeAlignment = 1 << kCodeAlignmentBits; static const int kCodeAlignmentMask = kCodeAlignment - 1; // Layout description. @@ -3238,8 +3180,11 @@ class Script: public Struct { // [compilation]: how the the script was compiled. DECL_ACCESSORS(compilation_type, Smi) - // [line_ends]: array of line ends positions. - DECL_ACCESSORS(line_ends, Object) + // [line_ends]: FixedArray of line ends positions. + DECL_ACCESSORS(line_ends_fixed_array, Object) + + // [line_ends]: JSArray of line ends positions. + DECL_ACCESSORS(line_ends_js_array, Object) // [eval_from_function]: for eval scripts the funcion from which eval was // called. @@ -3269,8 +3214,16 @@ class Script: public Struct { static const int kWrapperOffset = kContextOffset + kPointerSize; static const int kTypeOffset = kWrapperOffset + kPointerSize; static const int kCompilationTypeOffset = kTypeOffset + kPointerSize; - static const int kLineEndsOffset = kCompilationTypeOffset + kPointerSize; - static const int kIdOffset = kLineEndsOffset + kPointerSize; + // We have the line ends array both in FixedArray form and in JSArray form. + // The FixedArray form is useful when we don't have a context and so can't + // create a JSArray. The JSArray form is useful when we want to see the + // array from JS code (e.g. debug-delay.js) which cannot handle unboxed + // FixedArray objects. + static const int kLineEndsFixedArrayOffset = + kCompilationTypeOffset + kPointerSize; + static const int kLineEndsJSArrayOffset = + kLineEndsFixedArrayOffset + kPointerSize; + static const int kIdOffset = kLineEndsJSArrayOffset + kPointerSize; static const int kEvalFromFunctionOffset = kIdOffset + kPointerSize; static const int kEvalFrominstructionsOffsetOffset = kEvalFromFunctionOffset + kPointerSize; @@ -3371,7 +3324,6 @@ class SharedFunctionInfo: public HeapObject { // Add information on assignments of the form this.x = ...; void SetThisPropertyAssignmentsInfo( - bool has_only_this_property_assignments, bool has_only_simple_this_property_assignments, FixedArray* this_property_assignments); @@ -3379,13 +3331,12 @@ class SharedFunctionInfo: public HeapObject { void ClearThisPropertyAssignmentsInfo(); // Indicate that this function only consists of assignments of the form - // this.x = ...;. - inline bool has_only_this_property_assignments(); - - // Indicate that this function only consists of assignments of the form // this.x = y; where y is either a constant or refers to an argument. inline bool has_only_simple_this_property_assignments(); + inline bool try_fast_codegen(); + inline void set_try_fast_codegen(bool flag); + // For functions which only contains this property assignments this provides // access to the names for the properties assigned. DECL_ACCESSORS(this_property_assignments, Object) @@ -3464,8 +3415,8 @@ class SharedFunctionInfo: public HeapObject { static const int kStartPositionMask = ~((1 << kStartPositionShift) - 1); // Bit positions in compiler_hints. - static const int kHasOnlyThisPropertyAssignments = 0; - static const int kHasOnlySimpleThisPropertyAssignments = 1; + static const int kHasOnlySimpleThisPropertyAssignments = 0; + static const int kTryFastCodegen = 1; DISALLOW_IMPLICIT_CONSTRUCTORS(SharedFunctionInfo); }; @@ -3917,7 +3868,6 @@ class StringShape BASE_EMBEDDED { inline bool IsSequential(); inline bool IsExternal(); inline bool IsCons(); - inline bool IsSliced(); inline bool IsExternalAscii(); inline bool IsExternalTwoByte(); inline bool IsSequentialAscii(); @@ -3975,9 +3925,8 @@ class String: public HeapObject { inline uint16_t Get(int index); // Try to flatten the top level ConsString that is hiding behind this - // string. This is a no-op unless the string is a ConsString or a - // SlicedString. Flatten mutates the ConsString and might return a - // failure. + // string. This is a no-op unless the string is a ConsString. Flatten + // mutates the ConsString and might return a failure. Object* TryFlatten(); // Try to flatten the string. Checks first inline to see if it is necessary. @@ -3993,8 +3942,8 @@ class String: public HeapObject { // ascii and two byte string types. bool MarkAsUndetectable(); - // Slice the string and return a substring. - Object* Slice(int from, int to); + // Return a substring. + Object* SubString(int from, int to); // String equality operations. inline bool Equals(String* other); @@ -4079,7 +4028,7 @@ class String: public HeapObject { static const unsigned kMaxAsciiCharCodeU = unibrow::Utf8::kMaxOneByteChar; static const int kMaxUC16CharCode = 0xffff; - // Minimum length for a cons or sliced string. + // Minimum length for a cons string. static const int kMinNonFlatLength = 13; // Mask constant for checking if a string has a computed hash code @@ -4152,12 +4101,6 @@ class String: public HeapObject { unsigned remaining; }; - // NOTE: If you call StringInputBuffer routines on strings that are - // too deeply nested trees of cons and slice strings, then this - // routine will overflow the stack. Strings that are merely deeply - // nested trees of cons strings do not have a problem apart from - // performance. - static inline const unibrow::byte* ReadBlock(String* input, ReadBlockBuffer* buffer, unsigned* offset, @@ -4342,56 +4285,6 @@ class ConsString: public String { }; -// The SlicedString class describes string values that are slices of -// some other string. SlicedStrings consist of a reference to an -// underlying heap-allocated string value, a start index, and the -// length field common to all strings. -class SlicedString: public String { - public: - // The underlying string buffer. - inline String* buffer(); - inline void set_buffer(String* buffer); - - // The start index of the slice. - inline int start(); - inline void set_start(int start); - - // Dispatched behavior. - uint16_t SlicedStringGet(int index); - - // Casting. - static inline SlicedString* cast(Object* obj); - - // Garbage collection support. - void SlicedStringIterateBody(ObjectVisitor* v); - - // Layout description -#if V8_HOST_ARCH_64_BIT - // Optimizations expect buffer to be located at same offset as a ConsString's - // first substring. In 64 bit mode we have room for the start offset before - // the buffer. - static const int kStartOffset = String::kSize; - static const int kBufferOffset = kStartOffset + kIntSize; - static const int kSize = kBufferOffset + kPointerSize; -#else - static const int kBufferOffset = String::kSize; - static const int kStartOffset = kBufferOffset + kPointerSize; - static const int kSize = kStartOffset + kIntSize; -#endif - - // Support for StringInputBuffer. - inline const unibrow::byte* SlicedStringReadBlock(ReadBlockBuffer* buffer, - unsigned* offset_ptr, - unsigned chars); - inline void SlicedStringReadBlockIntoBuffer(ReadBlockBuffer* buffer, - unsigned* offset_ptr, - unsigned chars); - - private: - DISALLOW_IMPLICIT_CONSTRUCTORS(SlicedString); -}; - - // The ExternalString class describes string values that are backed by // a string resource that lies outside the V8 heap. ExternalStrings // consist of the length field common to all strings, a pointer to the @@ -4433,6 +4326,9 @@ class ExternalAsciiString: public ExternalString { // Casting. static inline ExternalAsciiString* cast(Object* obj); + // Garbage collection support. + void ExternalAsciiStringIterateBody(ObjectVisitor* v); + // Support for StringInputBuffer. const unibrow::byte* ExternalAsciiStringReadBlock(unsigned* remaining, unsigned* offset, @@ -4468,6 +4364,9 @@ class ExternalTwoByteString: public ExternalString { // Casting. static inline ExternalTwoByteString* cast(Object* obj); + // Garbage collection support. + void ExternalTwoByteStringIterateBody(ObjectVisitor* v); + // Support for StringInputBuffer. void ExternalTwoByteStringReadBlockIntoBuffer(ReadBlockBuffer* buffer, unsigned* offset_ptr, @@ -4719,6 +4618,7 @@ class AccessorInfo: public Struct { DECL_ACCESSORS(data, Object) DECL_ACCESSORS(name, Object) DECL_ACCESSORS(flag, Smi) + DECL_ACCESSORS(load_stub_cache, Object) inline bool all_can_read(); inline void set_all_can_read(bool value); @@ -4744,7 +4644,8 @@ class AccessorInfo: public Struct { static const int kDataOffset = kSetterOffset + kPointerSize; static const int kNameOffset = kDataOffset + kPointerSize; static const int kFlagOffset = kNameOffset + kPointerSize; - static const int kSize = kFlagOffset + kPointerSize; + static const int kLoadStubCacheOffset = kFlagOffset + kPointerSize; + static const int kSize = kLoadStubCacheOffset + kPointerSize; private: // Bit positions in flag. @@ -5097,6 +4998,12 @@ class ObjectVisitor BASE_EMBEDDED { // Visits a runtime entry in the instruction stream. virtual void VisitRuntimeEntry(RelocInfo* rinfo) {} + // Visits the resource of an ASCII or two-byte string. + virtual void VisitExternalAsciiString( + v8::String::ExternalAsciiStringResource** resource) {} + virtual void VisitExternalTwoByteString( + v8::String::ExternalStringResource** resource) {} + // Visits a debug call target in the instruction stream. virtual void VisitDebugTarget(RelocInfo* rinfo); diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc index 02fcfdc594..c37078ce64 100644 --- a/deps/v8/src/parser.cc +++ b/deps/v8/src/parser.cc @@ -676,17 +676,12 @@ class TemporaryScope BASE_EMBEDDED { int materialized_literal_count() { return materialized_literal_count_; } void SetThisPropertyAssignmentInfo( - bool only_this_property_assignments, bool only_simple_this_property_assignments, Handle<FixedArray> this_property_assignments) { - only_this_property_assignments_ = only_this_property_assignments; only_simple_this_property_assignments_ = only_simple_this_property_assignments; this_property_assignments_ = this_property_assignments; } - bool only_this_property_assignments() { - return only_this_property_assignments_; - } bool only_simple_this_property_assignments() { return only_simple_this_property_assignments_; } @@ -705,7 +700,6 @@ class TemporaryScope BASE_EMBEDDED { // Properties count estimation. int expected_property_count_; - bool only_this_property_assignments_; bool only_simple_this_property_assignments_; Handle<FixedArray> this_property_assignments_; @@ -720,7 +714,6 @@ class TemporaryScope BASE_EMBEDDED { TemporaryScope::TemporaryScope(Parser* parser) : materialized_literal_count_(0), expected_property_count_(0), - only_this_property_assignments_(false), only_simple_this_property_assignments_(false), this_property_assignments_(Factory::empty_fixed_array()), parser_(parser), @@ -1227,7 +1220,6 @@ FunctionLiteral* Parser::ParseProgram(Handle<String> source, body.elements(), temp_scope.materialized_literal_count(), temp_scope.expected_property_count(), - temp_scope.only_this_property_assignments(), temp_scope.only_simple_this_property_assignments(), temp_scope.this_property_assignments(), 0, @@ -1339,7 +1331,7 @@ class ParserFinder { // An InitializationBlockFinder finds and marks sequences of statements of the -// form x.y.z.a = ...; x.y.z.b = ...; etc. +// form expr.a = ...; expr.b = ...; etc. class InitializationBlockFinder : public ParserFinder { public: InitializationBlockFinder() @@ -1367,7 +1359,7 @@ class InitializationBlockFinder : public ParserFinder { private: // Returns true if the expressions appear to denote the same object. // In the context of initialization blocks, we only consider expressions - // of the form 'x.y.z'. + // of the form 'expr.x' or expr["x"]. static bool SameObject(Expression* e1, Expression* e2) { VariableProxy* v1 = e1->AsVariableProxy(); VariableProxy* v2 = e2->AsVariableProxy(); @@ -1441,16 +1433,15 @@ class InitializationBlockFinder : public ParserFinder { class ThisNamedPropertyAssigmentFinder : public ParserFinder { public: ThisNamedPropertyAssigmentFinder() - : only_this_property_assignments_(true), - only_simple_this_property_assignments_(true), + : only_simple_this_property_assignments_(true), names_(NULL), assigned_arguments_(NULL), assigned_constants_(NULL) {} void Update(Scope* scope, Statement* stat) { - // Bail out if function already has non this property assignment - // statements. - if (!only_this_property_assignments_) { + // Bail out if function already has property assignment that are + // not simple this property assignments. + if (!only_simple_this_property_assignments_) { return; } @@ -1459,16 +1450,10 @@ class ThisNamedPropertyAssigmentFinder : public ParserFinder { if (IsThisPropertyAssignment(assignment)) { HandleThisPropertyAssignment(scope, assignment); } else { - only_this_property_assignments_ = false; only_simple_this_property_assignments_ = false; } } - // Returns whether only statements of the form this.x = ...; was encountered. - bool only_this_property_assignments() { - return only_this_property_assignments_; - } - // Returns whether only statements of the form this.x = y; where y is either a // constant or a function argument was encountered. bool only_simple_this_property_assignments() { @@ -1524,28 +1509,24 @@ class ThisNamedPropertyAssigmentFinder : public ParserFinder { // Constant assigned. Literal* literal = assignment->value()->AsLiteral(); AssignmentFromConstant(key, literal->handle()); + return; } else if (assignment->value()->AsVariableProxy() != NULL) { // Variable assigned. Handle<String> name = assignment->value()->AsVariableProxy()->name(); // Check whether the variable assigned matches an argument name. - int index = -1; for (int i = 0; i < scope->num_parameters(); i++) { if (*scope->parameter(i)->name() == *name) { // Assigned from function argument. - index = i; - break; + AssignmentFromParameter(key, i); + return; } } - if (index != -1) { - AssignmentFromParameter(key, index); - } else { - AssignmentFromSomethingElse(key); - } - } else { - AssignmentFromSomethingElse(key); } } + // It is not a simple "this.x = value;" assignment with a constant + // or parameter value. + AssignmentFromSomethingElse(); } void AssignmentFromParameter(Handle<String> name, int index) { @@ -1562,12 +1543,7 @@ class ThisNamedPropertyAssigmentFinder : public ParserFinder { assigned_constants_->Add(value); } - void AssignmentFromSomethingElse(Handle<String> name) { - EnsureAllocation(); - names_->Add(name); - assigned_arguments_->Add(-1); - assigned_constants_->Add(Factory::undefined_value()); - + void AssignmentFromSomethingElse() { // The this assignment is not a simple one. only_simple_this_property_assignments_ = false; } @@ -1582,7 +1558,6 @@ class ThisNamedPropertyAssigmentFinder : public ParserFinder { } } - bool only_this_property_assignments_; bool only_simple_this_property_assignments_; ZoneStringList* names_; ZoneList<int>* assigned_arguments_; @@ -1623,11 +1598,11 @@ void* Parser::ParseSourceElements(ZoneListWrapper<Statement>* processor, // Propagate the collected information on this property assignments. if (top_scope_->is_function_scope()) { - if (this_property_assignment_finder.only_this_property_assignments()) { + bool only_simple_this_property_assignments = + this_property_assignment_finder.only_simple_this_property_assignments(); + if (only_simple_this_property_assignments) { temp_scope_->SetThisPropertyAssignmentInfo( - this_property_assignment_finder.only_this_property_assignments(), - this_property_assignment_finder. - only_simple_this_property_assignments(), + only_simple_this_property_assignments, this_property_assignment_finder.GetThisPropertyAssignments()); } } @@ -2567,6 +2542,12 @@ DoWhileStatement* Parser::ParseDoWhileStatement(ZoneStringList* labels, Statement* body = ParseStatement(NULL, CHECK_OK); Expect(Token::WHILE, CHECK_OK); Expect(Token::LPAREN, CHECK_OK); + + if (loop != NULL) { + int position = scanner().location().beg_pos; + loop->set_condition_position(position); + } + Expression* cond = ParseExpression(true, CHECK_OK); Expect(Token::RPAREN, CHECK_OK); @@ -3624,7 +3605,6 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name, int materialized_literal_count; int expected_property_count; - bool only_this_property_assignments; bool only_simple_this_property_assignments; Handle<FixedArray> this_property_assignments; if (is_lazily_compiled && pre_data() != NULL) { @@ -3634,15 +3614,12 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name, scanner_.SeekForward(end_pos); materialized_literal_count = entry.literal_count(); expected_property_count = entry.property_count(); - only_this_property_assignments = false; only_simple_this_property_assignments = false; this_property_assignments = Factory::empty_fixed_array(); } else { ParseSourceElements(&body, Token::RBRACE, CHECK_OK); materialized_literal_count = temp_scope.materialized_literal_count(); expected_property_count = temp_scope.expected_property_count(); - only_this_property_assignments = - temp_scope.only_this_property_assignments(); only_simple_this_property_assignments = temp_scope.only_simple_this_property_assignments(); this_property_assignments = temp_scope.this_property_assignments(); @@ -3664,7 +3641,6 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name, body.elements(), materialized_literal_count, expected_property_count, - only_this_property_assignments, only_simple_this_property_assignments, this_property_assignments, num_parameters, diff --git a/deps/v8/src/platform-freebsd.cc b/deps/v8/src/platform-freebsd.cc index 73d6eeb651..9b452fa9e6 100644 --- a/deps/v8/src/platform-freebsd.cc +++ b/deps/v8/src/platform-freebsd.cc @@ -84,6 +84,11 @@ void OS::Setup() { } +uint64_t OS::CpuFeaturesImpliedByPlatform() { + return 0; // FreeBSD runs on anything. +} + + double OS::nan_value() { return NAN; } diff --git a/deps/v8/src/platform-linux.cc b/deps/v8/src/platform-linux.cc index fe4c31f515..9ce0be011b 100644 --- a/deps/v8/src/platform-linux.cc +++ b/deps/v8/src/platform-linux.cc @@ -84,11 +84,73 @@ void OS::Setup() { } +uint64_t OS::CpuFeaturesImpliedByPlatform() { +#if (defined(__VFP_FP__) && !defined(__SOFTFP__)) + // Here gcc is telling us that we are on an ARM and gcc is assuming that we + // have VFP3 instructions. If gcc can assume it then so can we. + return 1u << VFP3; +#else + return 0; // Linux runs on anything. +#endif +} + + double OS::nan_value() { return NAN; } +#ifdef __arm__ +bool OS::ArmCpuHasFeature(CpuFeature feature) { + const char* search_string = NULL; + const char* file_name = "/proc/cpuinfo"; + // Simple detection of VFP at runtime for Linux. + // It is based on /proc/cpuinfo, which reveals hardware configuration + // to user-space applications. According to ARM (mid 2009), no similar + // facility is universally available on the ARM architectures, + // so it's up to individual OSes to provide such. + // + // This is written as a straight shot one pass parser + // and not using STL string and ifstream because, + // on Linux, it's reading from a (non-mmap-able) + // character special device. + switch (feature) { + case VFP3: + search_string = "vfp"; + break; + default: + UNREACHABLE(); + } + + FILE* f = NULL; + const char* what = search_string; + + if (NULL == (f = fopen(file_name, "r"))) + return false; + + int k; + while (EOF != (k = fgetc(f))) { + if (k == *what) { + ++what; + while ((*what != '\0') && (*what == fgetc(f))) { + ++what; + } + if (*what == '\0') { + fclose(f); + return true; + } else { + what = search_string; + } + } + } + fclose(f); + + // Did not find string in the proc file. + return false; +} +#endif // def __arm__ + + int OS::ActivationFrameAlignment() { #ifdef V8_TARGET_ARCH_ARM // On EABI ARM targets this is required for fp correctness in the @@ -232,7 +294,7 @@ void OS::LogSharedLibraryAddresses() { // This function assumes that the layout of the file is as follows: // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name] // If we encounter an unexpected situation we abort scanning further entries. - FILE *fp = fopen("/proc/self/maps", "r"); + FILE* fp = fopen("/proc/self/maps", "r"); if (fp == NULL) return; // Allocate enough room to be able to store a full file name. @@ -603,7 +665,7 @@ typedef uint32_t __sigset_t; typedef struct sigcontext mcontext_t; typedef struct ucontext { uint32_t uc_flags; - struct ucontext *uc_link; + struct ucontext* uc_link; stack_t uc_stack; mcontext_t uc_mcontext; __sigset_t uc_sigmask; diff --git a/deps/v8/src/platform-macos.cc b/deps/v8/src/platform-macos.cc index 0b236a5a93..d79cff14ac 100644 --- a/deps/v8/src/platform-macos.cc +++ b/deps/v8/src/platform-macos.cc @@ -244,6 +244,14 @@ void OS::LogSharedLibraryAddresses() { } +uint64_t OS::CpuFeaturesImpliedByPlatform() { + // MacOSX requires all these to install so we can assume they are present. + // These constants are defined by the CPUid instructions. + const uint64_t one = 1; + return (one << SSE2) | (one << CMOV) | (one << RDTSC) | (one << CPUID); +} + + double OS::nan_value() { return NAN; } diff --git a/deps/v8/src/platform-nullos.cc b/deps/v8/src/platform-nullos.cc index 084880e394..656c317b62 100644 --- a/deps/v8/src/platform-nullos.cc +++ b/deps/v8/src/platform-nullos.cc @@ -150,11 +150,22 @@ int OS::VSNPrintF(char* str, size_t size, const char* format, va_list args) { } +uint64_t OS::CpuFeaturesImpliedByPlatform() { + return 0; +} + + double OS::nan_value() { UNIMPLEMENTED(); return 0; } + +bool OS::ArmCpuHasFeature(CpuFeature feature) { + UNIMPLEMENTED(); +} + + bool OS::IsOutsideAllocatedSpace(void* address) { UNIMPLEMENTED(); return false; diff --git a/deps/v8/src/platform-win32.cc b/deps/v8/src/platform-win32.cc index d1f53194fb..1be4b77f8a 100644 --- a/deps/v8/src/platform-win32.cc +++ b/deps/v8/src/platform-win32.cc @@ -839,7 +839,7 @@ void* OS::Allocate(const size_t requested, size_t* allocated, bool is_executable) { // VirtualAlloc rounds allocated size to page size automatically. - size_t msize = RoundUp(requested, GetPageSize()); + size_t msize = RoundUp(requested, static_cast<int>(GetPageSize())); // Windows XP SP2 allows Data Excution Prevention (DEP). int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; @@ -852,7 +852,7 @@ void* OS::Allocate(const size_t requested, ASSERT(IsAligned(reinterpret_cast<size_t>(mbase), OS::AllocateAlignment())); *allocated = msize; - UpdateAllocatedSpaceLimits(mbase, msize); + UpdateAllocatedSpaceLimits(mbase, static_cast<int>(msize)); return mbase; } @@ -1316,6 +1316,11 @@ int OS::StackWalk(Vector<OS::StackFrame> frames) { return 0; } #endif // __MINGW32__ +uint64_t OS::CpuFeaturesImpliedByPlatform() { + return 0; // Windows runs on anything. +} + + double OS::nan_value() { #ifdef _MSC_VER // Positive Quiet NaN with no payload (aka. Indeterminate) has all bits @@ -1361,7 +1366,7 @@ bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { return false; } - UpdateAllocatedSpaceLimits(address, size); + UpdateAllocatedSpaceLimits(address, static_cast<int>(size)); return true; } @@ -1689,7 +1694,9 @@ bool Win32Socket::Connect(const char* host, const char* port) { } // Connect. - status = connect(socket_, result->ai_addr, result->ai_addrlen); + status = connect(socket_, + result->ai_addr, + static_cast<int>(result->ai_addrlen)); freeaddrinfo(result); return status == 0; } diff --git a/deps/v8/src/platform.h b/deps/v8/src/platform.h index fefe4b8569..75e557cb44 100644 --- a/deps/v8/src/platform.h +++ b/deps/v8/src/platform.h @@ -247,9 +247,20 @@ class OS { // for. static void LogSharedLibraryAddresses(); + // The return value indicates the CPU features we are sure of because of the + // OS. For example MacOSX doesn't run on any x86 CPUs that don't have SSE2 + // instructions. + // This is a little messy because the interpretation is subject to the cross + // of the CPU and the OS. The bits in the answer correspond to the bit + // positions indicated by the members of the CpuFeature enum from globals.h + static uint64_t CpuFeaturesImpliedByPlatform(); + // Returns the double constant NAN static double nan_value(); + // Support runtime detection of VFP3 on ARM CPUs. + static bool ArmCpuHasFeature(CpuFeature feature); + // Returns the activation frame alignment constraint or zero if // the platform doesn't care. Guaranteed to be a power of two. static int ActivationFrameAlignment(); diff --git a/deps/v8/src/prettyprinter.cc b/deps/v8/src/prettyprinter.cc index 10c1ea8717..87da026426 100644 --- a/deps/v8/src/prettyprinter.cc +++ b/deps/v8/src/prettyprinter.cc @@ -1339,9 +1339,6 @@ void JsonAstBuilder::VisitSlot(Slot* expr) { case Slot::LOOKUP: AddAttribute("type", "LOOKUP"); break; - case Slot::GLOBAL: - AddAttribute("type", "GLOBAL"); - break; } AddAttribute("index", expr->index()); } diff --git a/deps/v8/src/regexp-macro-assembler.cc b/deps/v8/src/regexp-macro-assembler.cc index 0d00ceec35..9ae19d723e 100644 --- a/deps/v8/src/regexp-macro-assembler.cc +++ b/deps/v8/src/regexp-macro-assembler.cc @@ -30,13 +30,7 @@ #include "assembler.h" #include "regexp-stack.h" #include "regexp-macro-assembler.h" -#if V8_TARGET_ARCH_ARM -#include "arm/simulator-arm.h" -#elif V8_TARGET_ARCH_IA32 -#include "ia32/simulator-ia32.h" -#elif V8_TARGET_ARCH_X64 -#include "x64/simulator-x64.h" -#endif +#include "simulator.h" namespace v8 { namespace internal { @@ -130,11 +124,6 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Match( if (StringShape(subject_ptr).IsCons()) { subject_ptr = ConsString::cast(subject_ptr)->first(); - } else if (StringShape(subject_ptr).IsSliced()) { - SlicedString* slice = SlicedString::cast(subject_ptr); - start_offset += slice->start(); - end_offset += slice->start(); - subject_ptr = slice->buffer(); } // Ensure that an underlying string has the same ascii-ness. ASSERT(subject_ptr->IsAsciiRepresentation() == is_ascii); diff --git a/deps/v8/src/regexp-stack.cc b/deps/v8/src/regexp-stack.cc index 87a674dba3..7696279a1d 100644 --- a/deps/v8/src/regexp-stack.cc +++ b/deps/v8/src/regexp-stack.cc @@ -81,7 +81,7 @@ Address RegExpStack::EnsureCapacity(size_t size) { if (size > kMaximumStackSize) return NULL; if (size < kMinimumStackSize) size = kMinimumStackSize; if (thread_local_.memory_size_ < size) { - Address new_memory = NewArray<byte>(size); + Address new_memory = NewArray<byte>(static_cast<int>(size)); if (thread_local_.memory_size_ > 0) { // Copy original memory into top of new memory. memcpy(reinterpret_cast<void*>( diff --git a/deps/v8/src/regexp-stack.h b/deps/v8/src/regexp-stack.h index 319ab2894c..fbaa6fbb51 100644 --- a/deps/v8/src/regexp-stack.h +++ b/deps/v8/src/regexp-stack.h @@ -68,7 +68,9 @@ class RegExpStack { static Address EnsureCapacity(size_t size); // Thread local archiving. - static size_t ArchiveSpacePerThread() { return sizeof(thread_local_); } + static int ArchiveSpacePerThread() { + return static_cast<int>(sizeof(thread_local_)); + } static char* ArchiveStack(char* to); static char* RestoreStack(char* from); static void FreeThreadResources() { thread_local_.Free(); } diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc index 8fd62c986c..ccb88851b8 100644 --- a/deps/v8/src/runtime.cc +++ b/deps/v8/src/runtime.cc @@ -1274,7 +1274,9 @@ static Object* CharCodeAt(String* subject, Object* index) { // Flatten the string. If someone wants to get a char at an index // in a cons string, it is likely that more indices will be // accessed. - subject->TryFlattenIfNotFlat(); + Object* flat = subject->TryFlatten(); + if (flat->IsFailure()) return flat; + subject = String::cast(flat); if (i >= static_cast<uint32_t>(subject->length())) { return Heap::nan_value(); } @@ -1357,8 +1359,9 @@ class ReplacementStringBuilder { StringBuilderSubstringPosition::encode(from); AddElement(Smi::FromInt(encoded_slice)); } else { - Handle<String> slice = Factory::NewStringSlice(subject_, from, to); - AddElement(*slice); + // Otherwise encode as two smis. + AddElement(Smi::FromInt(-length)); + AddElement(Smi::FromInt(from)); } IncrementCharacterCount(length); } @@ -1642,16 +1645,14 @@ void CompiledReplacement::Compile(Handle<String> replacement, capture_count, subject_length); } - // Find substrings of replacement string and create them as String objects.. + // Find substrings of replacement string and create them as String objects. int substring_index = 0; for (int i = 0, n = parts_.length(); i < n; i++) { int tag = parts_[i].tag; if (tag <= 0) { // A replacement string slice. int from = -tag; int to = parts_[i].data; - replacement_substrings_.Add(Factory::NewStringSlice(replacement, - from, - to)); + replacement_substrings_.Add(Factory::NewSubString(replacement, from, to)); parts_[i].tag = REPLACEMENT_SUBSTRING; parts_[i].data = substring_index; substring_index++; @@ -1750,8 +1751,9 @@ static Object* StringReplaceRegExpWithString(String* subject, int prev = 0; // Number of parts added by compiled replacement plus preceeding string - // and possibly suffix after last match. - const int parts_added_per_loop = compiled_replacement.parts() + 2; + // and possibly suffix after last match. It is possible for compiled + // replacements to use two elements when encoded as two smis. + const int parts_added_per_loop = compiled_replacement.parts() * 2 + 2; bool matched = true; do { ASSERT(last_match_info_handle->HasFastElements()); @@ -2223,8 +2225,8 @@ int Runtime::StringMatch(Handle<String> sub, if (pos == NULL) { return -1; } - return reinterpret_cast<const char*>(pos) - ascii_vector.start() - + start_index; + return static_cast<int>(reinterpret_cast<const char*>(pos) + - ascii_vector.start() + start_index); } return SingleCharIndexOf(sub->ToUC16Vector(), pat->Get(0), start_index); } @@ -2349,7 +2351,7 @@ static Object* Runtime_StringLocaleCompare(Arguments args) { } -static Object* Runtime_StringSlice(Arguments args) { +static Object* Runtime_SubString(Arguments args) { NoHandleAllocation ha; ASSERT(args.length() == 3); @@ -2363,7 +2365,7 @@ static Object* Runtime_StringSlice(Arguments args) { RUNTIME_ASSERT(end >= start); RUNTIME_ASSERT(start >= 0); RUNTIME_ASSERT(end <= value->length()); - return value->Slice(start, end); + return value->SubString(start, end); } @@ -2410,7 +2412,7 @@ static Object* Runtime_StringMatch(Arguments args) { for (int i = 0; i < matches ; i++) { int from = offsets.at(i * 2); int to = offsets.at(i * 2 + 1); - elements->set(i, *Factory::NewStringSlice(subject, from, to)); + elements->set(i, *Factory::NewSubString(subject, from, to)); } Handle<JSArray> result = Factory::NewJSArrayWithElements(elements); result->set_length(Smi::FromInt(matches)); @@ -3385,8 +3387,7 @@ static Object* Runtime_StringParseInt(Arguments args) { NoHandleAllocation ha; CONVERT_CHECKED(String, s, args[0]); - CONVERT_DOUBLE_CHECKED(n, args[1]); - int radix = FastD2I(n); + CONVERT_SMI_CHECKED(radix, args[1]); s->TryFlattenIfNotFlat(); @@ -3611,7 +3612,7 @@ static Object* Runtime_StringTrim(Arguments args) { right--; } } - return s->Slice(left, right); + return s->SubString(left, right); } bool Runtime::IsUpperCaseChar(uint16_t ch) { @@ -3766,9 +3767,21 @@ static inline void StringBuilderConcatHelper(String* special, for (int i = 0; i < array_length; i++) { Object* element = fixed_array->get(i); if (element->IsSmi()) { + // Smi encoding of position and length. int encoded_slice = Smi::cast(element)->value(); - int pos = StringBuilderSubstringPosition::decode(encoded_slice); - int len = StringBuilderSubstringLength::decode(encoded_slice); + int pos; + int len; + if (encoded_slice > 0) { + // Position and length encoded in one smi. + pos = StringBuilderSubstringPosition::decode(encoded_slice); + len = StringBuilderSubstringLength::decode(encoded_slice); + } else { + // Position and length encoded in two smis. + Object* obj = fixed_array->get(++i); + ASSERT(obj->IsSmi()); + pos = Smi::cast(obj)->value(); + len = -encoded_slice; + } String::WriteToFlat(special, sink + position, pos, @@ -3789,6 +3802,10 @@ static Object* Runtime_StringBuilderConcat(Arguments args) { ASSERT(args.length() == 2); CONVERT_CHECKED(JSArray, array, args[0]); CONVERT_CHECKED(String, special, args[1]); + + // This assumption is used by the slice encoding in one or two smis. + ASSERT(Smi::kMaxValue >= String::kMaxLength); + int special_length = special->length(); Object* smi_array_length = array->length(); if (!smi_array_length->IsSmi()) { @@ -3816,13 +3833,29 @@ static Object* Runtime_StringBuilderConcat(Arguments args) { for (int i = 0; i < array_length; i++) { Object* elt = fixed_array->get(i); if (elt->IsSmi()) { + // Smi encoding of position and length. int len = Smi::cast(elt)->value(); - int pos = len >> 11; - len &= 0x7ff; - if (pos + len > special_length) { - return Top::Throw(Heap::illegal_argument_symbol()); + if (len > 0) { + // Position and length encoded in one smi. + int pos = len >> 11; + len &= 0x7ff; + if (pos + len > special_length) { + return Top::Throw(Heap::illegal_argument_symbol()); + } + position += len; + } else { + // Position and length encoded in two smis. + position += (-len); + // Get the position and check that it is also a smi. + i++; + if (i >= array_length) { + return Top::Throw(Heap::illegal_argument_symbol()); + } + Object* pos = fixed_array->get(i); + if (!pos->IsSmi()) { + return Top::Throw(Heap::illegal_argument_symbol()); + } } - position += len; } else if (elt->IsString()) { String* element = String::cast(elt); int element_length = element->length(); @@ -4336,8 +4369,6 @@ static Object* Runtime_NewArgumentsFast(Arguments args) { Object* result = Heap::AllocateArgumentsObject(callee, length); if (result->IsFailure()) return result; - ASSERT(Heap::InNewSpace(result)); - // Allocate the elements if needed. if (length > 0) { // Allocate the fixed array. @@ -4350,8 +4381,7 @@ static Object* Runtime_NewArgumentsFast(Arguments args) { for (int i = 0; i < length; i++) { array->set(i, *--parameters, mode); } - JSObject::cast(result)->set_elements(FixedArray::cast(obj), - SKIP_WRITE_BARRIER); + JSObject::cast(result)->set_elements(FixedArray::cast(obj)); } return result; } @@ -4797,6 +4827,12 @@ static Object* Runtime_ReThrow(Arguments args) { } +static Object* Runtime_PromoteScheduledException(Arguments args) { + ASSERT_EQ(0, args.length()); + return Top::PromoteScheduledException(); +} + + static Object* Runtime_ThrowReferenceError(Arguments args) { HandleScope scope; ASSERT(args.length() == 1); @@ -5964,14 +6000,33 @@ static Object* Runtime_DebugLocalPropertyNames(Arguments args) { // Get the property names. jsproto = obj; + int proto_with_hidden_properties = 0; for (int i = 0; i < length; i++) { jsproto->GetLocalPropertyNames(*names, i == 0 ? 0 : local_property_count[i - 1]); + if (!GetHiddenProperties(jsproto, false)->IsUndefined()) { + proto_with_hidden_properties++; + } if (i < length - 1) { jsproto = Handle<JSObject>(JSObject::cast(jsproto->GetPrototype())); } } + // Filter out name of hidden propeties object. + if (proto_with_hidden_properties > 0) { + Handle<FixedArray> old_names = names; + names = Factory::NewFixedArray( + names->length() - proto_with_hidden_properties); + int dest_pos = 0; + for (int i = 0; i < total_property_count; i++) { + Object* name = old_names->get(i); + if (name == Heap::hidden_symbol()) { + continue; + } + names->set(dest_pos++, name); + } + } + DeleteArray(local_property_count); return *Factory::NewJSArrayWithElements(names); } @@ -6778,8 +6833,9 @@ static Object* Runtime_GetCFrames(Arguments args) { // Get the stack walk text for this frame. Handle<String> frame_text; - if (strlen(frames[i].text) > 0) { - Vector<const char> str(frames[i].text, strlen(frames[i].text)); + int frame_text_length = StrLength(frames[i].text); + if (frame_text_length > 0) { + Vector<const char> str(frames[i].text, frame_text_length); frame_text = Factory::NewStringFromAscii(str); } @@ -7246,7 +7302,7 @@ static Object* Runtime_DebugEvaluate(Arguments args) { // function(arguments,__source__) {return eval(__source__);} static const char* source_str = "(function(arguments,__source__){return eval(__source__);})"; - static const int source_str_length = strlen(source_str); + static const int source_str_length = StrLength(source_str); Handle<String> function_source = Factory::NewStringFromAscii(Vector<const char>(source_str, source_str_length)); @@ -7711,7 +7767,7 @@ static Object* Runtime_CollectStackTrace(Arguments args) { Object* fun = frame->function(); Address pc = frame->pc(); Address start = frame->code()->address(); - Smi* offset = Smi::FromInt(pc - start); + Smi* offset = Smi::FromInt(static_cast<int>(pc - start)); FixedArray* elements = FixedArray::cast(result->elements()); if (cursor + 2 < elements->length()) { elements->set(cursor++, recv); @@ -7758,6 +7814,13 @@ static Object* Runtime_Abort(Arguments args) { } +static Object* Runtime_DeleteHandleScopeExtensions(Arguments args) { + ASSERT(args.length() == 0); + HandleScope::DeleteExtensions(); + return Heap::undefined_value(); +} + + #ifdef DEBUG // ListNatives is ONLY used by the fuzz-natives.js in debug mode // Exclude the code in release mode. @@ -7770,7 +7833,8 @@ static Object* Runtime_ListNatives(Arguments args) { { \ HandleScope inner; \ Handle<String> name = \ - Factory::NewStringFromAscii(Vector<const char>(#Name, strlen(#Name))); \ + Factory::NewStringFromAscii( \ + Vector<const char>(#Name, StrLength(#Name))); \ Handle<JSArray> pair = Factory::NewJSArray(0); \ SetElement(pair, 0, name); \ SetElement(pair, 1, Handle<Smi>(Smi::FromInt(argc))); \ diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h index 6b1ce480b5..c05ae6b7b3 100644 --- a/deps/v8/src/runtime.h +++ b/deps/v8/src/runtime.h @@ -149,7 +149,7 @@ namespace internal { F(StringIndexOf, 3, 1) \ F(StringLastIndexOf, 3, 1) \ F(StringLocaleCompare, 2, 1) \ - F(StringSlice, 3, 1) \ + F(SubString, 3, 1) \ F(StringReplaceRegExpWithString, 4, 1) \ F(StringMatch, 3, 1) \ F(StringTrim, 3, 1) \ @@ -234,6 +234,7 @@ namespace internal { F(ReThrow, 1, 1) \ F(ThrowReferenceError, 1, 1) \ F(StackGuard, 1, 1) \ + F(PromoteScheduledException, 0, 1) \ \ /* Contexts */ \ F(NewContext, 1, 1) \ @@ -263,6 +264,8 @@ namespace internal { F(Log, 2, 1) \ /* ES5 */ \ F(LocalKeys, 1, 1) \ + /* Handle scopes */ \ + F(DeleteHandleScopeExtensions, 0, 1) \ \ /* Pseudo functions - handled as macros by parser */ \ F(IS_VAR, 1, 1) diff --git a/deps/v8/src/scanner.cc b/deps/v8/src/scanner.cc index 3dae414f9d..0d3b789f9b 100644 --- a/deps/v8/src/scanner.cc +++ b/deps/v8/src/scanner.cc @@ -49,17 +49,11 @@ StaticResource<Scanner::Utf8Decoder> Scanner::utf8_decoder_; // ---------------------------------------------------------------------------- // UTF8Buffer -UTF8Buffer::UTF8Buffer() { - static const int kInitialCapacity = 1 * KB; - data_ = NewArray<char>(kInitialCapacity); - limit_ = ComputeLimit(data_, kInitialCapacity); - Reset(); - ASSERT(Capacity() == kInitialCapacity && pos() == 0); -} +UTF8Buffer::UTF8Buffer() : data_(NULL), limit_(NULL) { } UTF8Buffer::~UTF8Buffer() { - DeleteArray(data_); + if (data_ != NULL) DeleteArray(data_); } @@ -69,7 +63,7 @@ void UTF8Buffer::AddCharSlow(uc32 c) { int old_capacity = Capacity(); int old_position = pos(); int new_capacity = - Min(old_capacity * 2, old_capacity + kCapacityGrowthLimit); + Min(old_capacity * 3, old_capacity + kCapacityGrowthLimit); char* new_data = NewArray<char>(new_capacity); memcpy(new_data, data_, old_position); DeleteArray(data_); @@ -194,11 +188,142 @@ void TwoByteStringUTF16Buffer::SeekForward(int pos) { // ---------------------------------------------------------------------------- +// Keyword Matcher +KeywordMatcher::FirstState KeywordMatcher::first_states_[] = { + { "break", KEYWORD_PREFIX, Token::BREAK }, + { NULL, C, Token::ILLEGAL }, + { NULL, D, Token::ILLEGAL }, + { "else", KEYWORD_PREFIX, Token::ELSE }, + { NULL, F, Token::ILLEGAL }, + { NULL, UNMATCHABLE, Token::ILLEGAL }, + { NULL, UNMATCHABLE, Token::ILLEGAL }, + { NULL, I, Token::ILLEGAL }, + { NULL, UNMATCHABLE, Token::ILLEGAL }, + { NULL, UNMATCHABLE, Token::ILLEGAL }, + { NULL, UNMATCHABLE, Token::ILLEGAL }, + { NULL, UNMATCHABLE, Token::ILLEGAL }, + { NULL, N, Token::ILLEGAL }, + { NULL, UNMATCHABLE, Token::ILLEGAL }, + { NULL, UNMATCHABLE, Token::ILLEGAL }, + { NULL, UNMATCHABLE, Token::ILLEGAL }, + { "return", KEYWORD_PREFIX, Token::RETURN }, + { "switch", KEYWORD_PREFIX, Token::SWITCH }, + { NULL, T, Token::ILLEGAL }, + { NULL, UNMATCHABLE, Token::ILLEGAL }, + { NULL, V, Token::ILLEGAL }, + { NULL, W, Token::ILLEGAL } +}; + + +void KeywordMatcher::Step(uc32 input) { + switch (state_) { + case INITIAL: { + // matching the first character is the only state with significant fanout. + // Match only lower-case letters in range 'b'..'w'. + unsigned int offset = input - kFirstCharRangeMin; + if (offset < kFirstCharRangeLength) { + state_ = first_states_[offset].state; + if (state_ == KEYWORD_PREFIX) { + keyword_ = first_states_[offset].keyword; + counter_ = 1; + keyword_token_ = first_states_[offset].token; + } + return; + } + break; + } + case KEYWORD_PREFIX: + if (keyword_[counter_] == input) { + ASSERT_NE(input, '\0'); + counter_++; + if (keyword_[counter_] == '\0') { + state_ = KEYWORD_MATCHED; + token_ = keyword_token_; + } + return; + } + break; + case KEYWORD_MATCHED: + token_ = Token::IDENTIFIER; + break; + case C: + if (MatchState(input, 'a', CA)) return; + if (MatchState(input, 'o', CO)) return; + break; + case CA: + if (MatchKeywordStart(input, "case", 2, Token::CASE)) return; + if (MatchKeywordStart(input, "catch", 2, Token::CATCH)) return; + break; + case CO: + if (MatchState(input, 'n', CON)) return; + break; + case CON: + if (MatchKeywordStart(input, "const", 3, Token::CONST)) return; + if (MatchKeywordStart(input, "continue", 3, Token::CONTINUE)) return; + break; + case D: + if (MatchState(input, 'e', DE)) return; + if (MatchKeyword(input, 'o', KEYWORD_MATCHED, Token::DO)) return; + break; + case DE: + if (MatchKeywordStart(input, "debugger", 2, Token::DEBUGGER)) return; + if (MatchKeywordStart(input, "default", 2, Token::DEFAULT)) return; + if (MatchKeywordStart(input, "delete", 2, Token::DELETE)) return; + break; + case F: + if (MatchKeywordStart(input, "false", 1, Token::FALSE_LITERAL)) return; + if (MatchKeywordStart(input, "finally", 1, Token::FINALLY)) return; + if (MatchKeywordStart(input, "for", 1, Token::FOR)) return; + if (MatchKeywordStart(input, "function", 1, Token::FUNCTION)) return; + break; + case I: + if (MatchKeyword(input, 'f', KEYWORD_MATCHED, Token::IF)) return; + if (MatchKeyword(input, 'n', IN, Token::IN)) return; + break; + case IN: + token_ = Token::IDENTIFIER; + if (MatchKeywordStart(input, "instanceof", 2, Token::INSTANCEOF)) { + return; + } + break; + case N: + if (MatchKeywordStart(input, "native", 1, Token::NATIVE)) return; + if (MatchKeywordStart(input, "new", 1, Token::NEW)) return; + if (MatchKeywordStart(input, "null", 1, Token::NULL_LITERAL)) return; + break; + case T: + if (MatchState(input, 'h', TH)) return; + if (MatchState(input, 'r', TR)) return; + if (MatchKeywordStart(input, "typeof", 1, Token::TYPEOF)) return; + break; + case TH: + if (MatchKeywordStart(input, "this", 2, Token::THIS)) return; + if (MatchKeywordStart(input, "throw", 2, Token::THROW)) return; + break; + case TR: + if (MatchKeywordStart(input, "true", 2, Token::TRUE_LITERAL)) return; + if (MatchKeyword(input, 'y', KEYWORD_MATCHED, Token::TRY)) return; + break; + case V: + if (MatchKeywordStart(input, "var", 1, Token::VAR)) return; + if (MatchKeywordStart(input, "void", 1, Token::VOID)) return; + break; + case W: + if (MatchKeywordStart(input, "while", 1, Token::WHILE)) return; + if (MatchKeywordStart(input, "with", 1, Token::WITH)) return; + break; + default: + UNREACHABLE(); + } + // On fallthrough, it's a failure. + state_ = UNMATCHABLE; +} + + +// ---------------------------------------------------------------------------- // Scanner -Scanner::Scanner(bool pre) : stack_overflow_(false), is_pre_parsing_(pre) { - Token::Initialize(); -} +Scanner::Scanner(bool pre) : stack_overflow_(false), is_pre_parsing_(pre) { } void Scanner::Init(Handle<String> source, unibrow::CharacterStream* stream, @@ -215,12 +340,11 @@ void Scanner::Init(Handle<String> source, unibrow::CharacterStream* stream, position_ = position; - // Reset literals buffer - literals_.Reset(); - // Set c0_ (one character ahead) ASSERT(kCharacterLookaheadBufferSize == 1); Advance(); + // Initializer current_ to not refer to a literal buffer. + current_.literal_buffer = NULL; // Skip initial whitespace allowing HTML comment ends just like // after a newline and scan first token. @@ -253,17 +377,23 @@ Token::Value Scanner::Next() { void Scanner::StartLiteral() { - next_.literal_pos = literals_.pos(); + // Use the first buffer unless it's currently in use by the current_ token. + // In most cases we won't have two literals/identifiers in a row, so + // the second buffer won't be used very often and is unlikely to grow much. + UTF8Buffer* free_buffer = + (current_.literal_buffer != &literal_buffer_1_) ? &literal_buffer_1_ + : &literal_buffer_2_; + next_.literal_buffer = free_buffer; + free_buffer->Reset(); } void Scanner::AddChar(uc32 c) { - literals_.AddChar(c); + next_.literal_buffer->AddChar(c); } void Scanner::TerminateLiteral() { - next_.literal_end = literals_.pos(); AddChar(0); } @@ -383,6 +513,7 @@ Token::Value Scanner::ScanHtmlComment() { void Scanner::Scan() { + next_.literal_buffer = NULL; Token::Value token; has_line_terminator_before_next_ = false; do { @@ -855,48 +986,40 @@ uc32 Scanner::ScanIdentifierUnicodeEscape() { Token::Value Scanner::ScanIdentifier() { ASSERT(kIsIdentifierStart.get(c0_)); - bool has_escapes = false; StartLiteral(); + KeywordMatcher keyword_match; + // Scan identifier start character. if (c0_ == '\\') { - has_escapes = true; uc32 c = ScanIdentifierUnicodeEscape(); // Only allow legal identifier start characters. if (!kIsIdentifierStart.get(c)) return Token::ILLEGAL; AddChar(c); + keyword_match.Fail(); } else { AddChar(c0_); + keyword_match.AddChar(c0_); Advance(); } // Scan the rest of the identifier characters. while (kIsIdentifierPart.get(c0_)) { if (c0_ == '\\') { - has_escapes = true; uc32 c = ScanIdentifierUnicodeEscape(); // Only allow legal identifier part characters. if (!kIsIdentifierPart.get(c)) return Token::ILLEGAL; AddChar(c); + keyword_match.Fail(); } else { AddChar(c0_); + keyword_match.AddChar(c0_); Advance(); } } TerminateLiteral(); - // We don't have any 1-letter keywords (this is probably a common case). - if ((next_.literal_end - next_.literal_pos) == 1) { - return Token::IDENTIFIER; - } - - // If the identifier contains unicode escapes, it must not be - // resolved to a keyword. - if (has_escapes) { - return Token::IDENTIFIER; - } - - return Token::Lookup(&literals_.data()[next_.literal_pos]); + return keyword_match.token(); } diff --git a/deps/v8/src/scanner.h b/deps/v8/src/scanner.h index a201d0e976..9d7b34e7ca 100644 --- a/deps/v8/src/scanner.h +++ b/deps/v8/src/scanner.h @@ -41,6 +41,7 @@ class UTF8Buffer { ~UTF8Buffer(); void AddChar(uc32 c) { + ASSERT_NOT_NULL(data_); if (cursor_ <= limit_ && static_cast<unsigned>(c) <= unibrow::Utf8::kMaxOneByteChar) { *cursor_++ = static_cast<char>(c); @@ -49,17 +50,30 @@ class UTF8Buffer { } } - void Reset() { cursor_ = data_; } - int pos() const { return cursor_ - data_; } + void Reset() { + if (data_ == NULL) { + data_ = NewArray<char>(kInitialCapacity); + limit_ = ComputeLimit(data_, kInitialCapacity); + } + cursor_ = data_; + } + + int pos() const { + ASSERT_NOT_NULL(data_); + return static_cast<int>(cursor_ - data_); + } + char* data() const { return data_; } private: + static const int kInitialCapacity = 256; char* data_; char* cursor_; char* limit_; int Capacity() const { - return (limit_ - data_) + unibrow::Utf8::kMaxEncodedSize; + ASSERT_NOT_NULL(data_); + return static_cast<int>(limit_ - data_) + unibrow::Utf8::kMaxEncodedSize; } static char* ComputeLimit(char* data, int capacity) { @@ -123,6 +137,121 @@ class TwoByteStringUTF16Buffer: public UTF16Buffer { }; +class KeywordMatcher { +// Incrementally recognize keywords. +// +// Recognized keywords: +// break case catch const* continue debugger* default delete do else +// finally false for function if in instanceof native* new null +// return switch this throw true try typeof var void while with +// +// *: Actually "future reserved keywords". These are the only ones we +// recognized, the remaining are allowed as identifiers. + public: + KeywordMatcher() : state_(INITIAL), token_(Token::IDENTIFIER) {} + + Token::Value token() { return token_; } + + inline void AddChar(uc32 input) { + if (state_ != UNMATCHABLE) { + Step(input); + } + } + + void Fail() { + token_ = Token::IDENTIFIER; + state_ = UNMATCHABLE; + } + + private: + enum State { + UNMATCHABLE, + INITIAL, + KEYWORD_PREFIX, + KEYWORD_MATCHED, + C, + CA, + CO, + CON, + D, + DE, + F, + I, + IN, + N, + T, + TH, + TR, + V, + W + }; + + struct FirstState { + const char* keyword; + State state; + Token::Value token; + }; + + // Range of possible first characters of a keyword. + static const unsigned int kFirstCharRangeMin = 'b'; + static const unsigned int kFirstCharRangeMax = 'w'; + static const unsigned int kFirstCharRangeLength = + kFirstCharRangeMax - kFirstCharRangeMin + 1; + // State map for first keyword character range. + static FirstState first_states_[kFirstCharRangeLength]; + + // Current state. + State state_; + // Token for currently added characters. + Token::Value token_; + + // Matching a specific keyword string (there is only one possible valid + // keyword with the current prefix). + const char* keyword_; + int counter_; + Token::Value keyword_token_; + + // If input equals keyword's character at position, continue matching keyword + // from that position. + inline bool MatchKeywordStart(uc32 input, + const char* keyword, + int position, + Token::Value token_if_match) { + if (input == keyword[position]) { + state_ = KEYWORD_PREFIX; + this->keyword_ = keyword; + this->counter_ = position + 1; + this->keyword_token_ = token_if_match; + return true; + } + return false; + } + + // If input equals match character, transition to new state and return true. + inline bool MatchState(uc32 input, char match, State new_state) { + if (input == match) { + state_ = new_state; + return true; + } + return false; + } + + inline bool MatchKeyword(uc32 input, + char match, + State new_state, + Token::Value keyword_token) { + if (input == match) { // Matched "do". + state_ = new_state; + token_ = keyword_token; + return true; + } + return false; + } + + void Step(uc32 input); +}; + + class Scanner { public: @@ -163,26 +292,30 @@ class Scanner { // token returned by Next()). The string is 0-terminated and in // UTF-8 format; they may contain 0-characters. Literal strings are // collected for identifiers, strings, and numbers. + // These functions only give the correct result if the literal + // was scanned between calls to StartLiteral() and TerminateLiteral(). const char* literal_string() const { - return &literals_.data()[current_.literal_pos]; + return current_.literal_buffer->data(); } int literal_length() const { - return current_.literal_end - current_.literal_pos; - } - - Vector<const char> next_literal() const { - return Vector<const char>(next_literal_string(), next_literal_length()); + // Excluding terminal '\0' added by TerminateLiteral(). + return current_.literal_buffer->pos() - 1; } // Returns the literal string for the next token (the token that // would be returned if Next() were called). const char* next_literal_string() const { - return &literals_.data()[next_.literal_pos]; + return next_.literal_buffer->data(); } // Returns the length of the next token (that would be returned if // Next() were called). int next_literal_length() const { - return next_.literal_end - next_.literal_pos; + return next_.literal_buffer->pos() - 1; + } + + Vector<const char> next_literal() const { + return Vector<const char>(next_literal_string(), + next_literal_length()); } // Scans the input as a regular expression pattern, previous @@ -224,7 +357,8 @@ class Scanner { // Buffer to hold literal values (identifiers, strings, numbers) // using 0-terminated UTF-8 encoding. - UTF8Buffer literals_; + UTF8Buffer literal_buffer_1_; + UTF8Buffer literal_buffer_2_; bool stack_overflow_; static StaticResource<Utf8Decoder> utf8_decoder_; @@ -236,7 +370,7 @@ class Scanner { struct TokenDesc { Token::Value token; Location location; - int literal_pos, literal_end; + UTF8Buffer* literal_buffer; }; TokenDesc current_; // desc for current token (as returned by Next()) diff --git a/deps/v8/src/scopeinfo.cc b/deps/v8/src/scopeinfo.cc index 8a237fd0ec..8b989d7a35 100644 --- a/deps/v8/src/scopeinfo.cc +++ b/deps/v8/src/scopeinfo.cc @@ -100,8 +100,7 @@ ScopeInfo<Allocator>::ScopeInfo(Scope* scope) break; case Slot::LOOKUP: - case Slot::GLOBAL: - // these are currently not used + // This is currently not used. UNREACHABLE(); break; } @@ -419,7 +418,7 @@ int ScopeInfo<Allocator>::StackSlotIndex(Code* code, String* name) { Object** p0 = StackSlotEntriesAddr(code) + 1; Object** p = p0; while (*p != NULL) { - if (*p == name) return p - p0; + if (*p == name) return static_cast<int>(p - p0); p++; } } @@ -450,7 +449,7 @@ int ScopeInfo<Allocator>::ContextSlotIndex(Code* code, ReadInt(p + 1, &v); Variable::Mode mode_value = static_cast<Variable::Mode>(v); if (mode != NULL) *mode = mode_value; - result = ((p - p0) >> 1) + Context::MIN_CONTEXT_SLOTS; + result = static_cast<int>((p - p0) >> 1) + Context::MIN_CONTEXT_SLOTS; ContextSlotCache::Update(code, name, mode_value, result); return result; } @@ -482,7 +481,7 @@ int ScopeInfo<Allocator>::ParameterIndex(Code* code, String* name) { p = p0 + n; while (p > p0) { p--; - if (*p == name) return p - p0; + if (*p == name) return static_cast<int>(p - p0); } } return -1; diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/scopes.cc index 25873fac1d..7da06cdbc0 100644 --- a/deps/v8/src/scopes.cc +++ b/deps/v8/src/scopes.cc @@ -42,7 +42,7 @@ class ZoneAllocator: public Allocator { /* nothing to do */ virtual ~ZoneAllocator() {} - virtual void* New(size_t size) { return Zone::New(size); } + virtual void* New(size_t size) { return Zone::New(static_cast<int>(size)); } /* ignored - Zone is freed in one fell swoop */ virtual void Delete(void* p) {} @@ -540,11 +540,11 @@ Variable* Scope::NonLocal(Handle<String> name, Variable::Mode mode) { // Lookup a variable starting with this scope. The result is either -// the statically resolved (local!) variable belonging to an outer scope, -// or NULL. It may be NULL because a) we couldn't find a variable, or b) -// because the variable is just a guess (and may be shadowed by another -// variable that is introduced dynamically via an 'eval' call or a 'with' -// statement). +// the statically resolved variable belonging to an outer scope, or +// NULL. It may be NULL because a) we couldn't find a variable, or b) +// because the variable is just a guess (and may be shadowed by +// another variable that is introduced dynamically via an 'eval' call +// or a 'with' statement). Variable* Scope::LookupRecursive(Handle<String> name, bool inner_lookup, Variable** invalidated_local) { @@ -598,9 +598,11 @@ Variable* Scope::LookupRecursive(Handle<String> name, if (inner_lookup) var->is_accessed_from_inner_scope_ = true; - // If the variable we have found is just a guess, invalidate the result. + // If the variable we have found is just a guess, invalidate the + // result. If the found variable is local, record that fact so we + // can generate fast code to get it if it is not shadowed by eval. if (guess) { - *invalidated_local = var; + if (!var->is_global()) *invalidated_local = var; var = NULL; } diff --git a/deps/v8/src/serialize.cc b/deps/v8/src/serialize.cc index 6eedeef37b..de87022e00 100644 --- a/deps/v8/src/serialize.cc +++ b/deps/v8/src/serialize.cc @@ -39,353 +39,11 @@ #include "stub-cache.h" #include "v8threads.h" #include "top.h" +#include "bootstrapper.h" namespace v8 { namespace internal { -// 32-bit encoding: a RelativeAddress must be able to fit in a -// pointer: it is encoded as an Address with (from LS to MS bits): -// - 2 bits identifying this as a HeapObject. -// - 4 bits to encode the AllocationSpace (including special values for -// code and fixed arrays in LO space) -// - 27 bits identifying a word in the space, in one of three formats: -// - paged spaces: 16 bits of page number, 11 bits of word offset in page -// - NEW space: 27 bits of word offset -// - LO space: 27 bits of page number - -const int kSpaceShift = kHeapObjectTagSize; -const int kSpaceBits = 4; -const int kSpaceMask = (1 << kSpaceBits) - 1; - -const int kOffsetShift = kSpaceShift + kSpaceBits; -const int kOffsetBits = 11; -const int kOffsetMask = (1 << kOffsetBits) - 1; - -const int kPageShift = kOffsetShift + kOffsetBits; -const int kPageBits = 32 - (kOffsetBits + kSpaceBits + kHeapObjectTagSize); -const int kPageMask = (1 << kPageBits) - 1; - -const int kPageAndOffsetShift = kOffsetShift; -const int kPageAndOffsetBits = kPageBits + kOffsetBits; -const int kPageAndOffsetMask = (1 << kPageAndOffsetBits) - 1; - -// These values are special allocation space tags used for -// serialization. -// Mark the pages executable on platforms that support it. -const int kLargeCode = LAST_SPACE + 1; -// Allocate extra remembered-set bits. -const int kLargeFixedArray = LAST_SPACE + 2; - - -static inline AllocationSpace GetSpace(Address addr) { - const intptr_t encoded = reinterpret_cast<intptr_t>(addr); - int space_number = (static_cast<int>(encoded >> kSpaceShift) & kSpaceMask); - if (space_number > LAST_SPACE) space_number = LO_SPACE; - return static_cast<AllocationSpace>(space_number); -} - - -static inline bool IsLargeExecutableObject(Address addr) { - const intptr_t encoded = reinterpret_cast<intptr_t>(addr); - const int space_number = - (static_cast<int>(encoded >> kSpaceShift) & kSpaceMask); - return (space_number == kLargeCode); -} - - -static inline bool IsLargeFixedArray(Address addr) { - const intptr_t encoded = reinterpret_cast<intptr_t>(addr); - const int space_number = - (static_cast<int>(encoded >> kSpaceShift) & kSpaceMask); - return (space_number == kLargeFixedArray); -} - - -static inline int PageIndex(Address addr) { - const intptr_t encoded = reinterpret_cast<intptr_t>(addr); - return static_cast<int>(encoded >> kPageShift) & kPageMask; -} - - -static inline int PageOffset(Address addr) { - const intptr_t encoded = reinterpret_cast<intptr_t>(addr); - const int offset = static_cast<int>(encoded >> kOffsetShift) & kOffsetMask; - return offset << kObjectAlignmentBits; -} - - -static inline int NewSpaceOffset(Address addr) { - const intptr_t encoded = reinterpret_cast<intptr_t>(addr); - const int page_offset = - static_cast<int>(encoded >> kPageAndOffsetShift) & kPageAndOffsetMask; - return page_offset << kObjectAlignmentBits; -} - - -static inline int LargeObjectIndex(Address addr) { - const intptr_t encoded = reinterpret_cast<intptr_t>(addr); - return static_cast<int>(encoded >> kPageAndOffsetShift) & kPageAndOffsetMask; -} - - -// A RelativeAddress encodes a heap address that is independent of -// the actual memory addresses in real heap. The general case (for the -// OLD, CODE and MAP spaces) is as a (space id, page number, page offset) -// triple. The NEW space has page number == 0, because there are no -// pages. The LARGE_OBJECT space has page offset = 0, since there is -// exactly one object per page. RelativeAddresses are encodable as -// Addresses, so that they can replace the map() pointers of -// HeapObjects. The encoded Addresses are also encoded as HeapObjects -// and allow for marking (is_marked() see mark(), clear_mark()...) as -// used by the Mark-Compact collector. - -class RelativeAddress { - public: - RelativeAddress(AllocationSpace space, - int page_index, - int page_offset) - : space_(space), page_index_(page_index), page_offset_(page_offset) { - // Assert that the space encoding (plus the two pseudo-spaces for - // special large objects) fits in the available bits. - ASSERT(((LAST_SPACE + 2) & ~kSpaceMask) == 0); - ASSERT(space <= LAST_SPACE && space >= 0); - } - - // Return the encoding of 'this' as an Address. Decode with constructor. - Address Encode() const; - - AllocationSpace space() const { - if (space_ > LAST_SPACE) return LO_SPACE; - return static_cast<AllocationSpace>(space_); - } - int page_index() const { return page_index_; } - int page_offset() const { return page_offset_; } - - bool in_paged_space() const { - return space_ == CODE_SPACE || - space_ == OLD_POINTER_SPACE || - space_ == OLD_DATA_SPACE || - space_ == MAP_SPACE || - space_ == CELL_SPACE; - } - - void next_address(int offset) { page_offset_ += offset; } - void next_page(int init_offset = 0) { - page_index_++; - page_offset_ = init_offset; - } - -#ifdef DEBUG - void Verify(); -#endif - - void set_to_large_code_object() { - ASSERT(space_ == LO_SPACE); - space_ = kLargeCode; - } - void set_to_large_fixed_array() { - ASSERT(space_ == LO_SPACE); - space_ = kLargeFixedArray; - } - - - private: - int space_; - int page_index_; - int page_offset_; -}; - - -Address RelativeAddress::Encode() const { - ASSERT(page_index_ >= 0); - int word_offset = 0; - int result = 0; - switch (space_) { - case MAP_SPACE: - case CELL_SPACE: - case OLD_POINTER_SPACE: - case OLD_DATA_SPACE: - case CODE_SPACE: - ASSERT_EQ(0, page_index_ & ~kPageMask); - word_offset = page_offset_ >> kObjectAlignmentBits; - ASSERT_EQ(0, word_offset & ~kOffsetMask); - result = (page_index_ << kPageShift) | (word_offset << kOffsetShift); - break; - case NEW_SPACE: - ASSERT_EQ(0, page_index_); - word_offset = page_offset_ >> kObjectAlignmentBits; - ASSERT_EQ(0, word_offset & ~kPageAndOffsetMask); - result = word_offset << kPageAndOffsetShift; - break; - case LO_SPACE: - case kLargeCode: - case kLargeFixedArray: - ASSERT_EQ(0, page_offset_); - ASSERT_EQ(0, page_index_ & ~kPageAndOffsetMask); - result = page_index_ << kPageAndOffsetShift; - break; - } - // OR in AllocationSpace and kHeapObjectTag - ASSERT_EQ(0, space_ & ~kSpaceMask); - result |= (space_ << kSpaceShift) | kHeapObjectTag; - return reinterpret_cast<Address>(result); -} - - -#ifdef DEBUG -void RelativeAddress::Verify() { - ASSERT(page_offset_ >= 0 && page_index_ >= 0); - switch (space_) { - case MAP_SPACE: - case CELL_SPACE: - case OLD_POINTER_SPACE: - case OLD_DATA_SPACE: - case CODE_SPACE: - ASSERT(Page::kObjectStartOffset <= page_offset_ && - page_offset_ <= Page::kPageSize); - break; - case NEW_SPACE: - ASSERT(page_index_ == 0); - break; - case LO_SPACE: - case kLargeCode: - case kLargeFixedArray: - ASSERT(page_offset_ == 0); - break; - } -} -#endif - -enum GCTreatment { - DataObject, // Object that cannot contain a reference to new space. - PointerObject, // Object that can contain a reference to new space. - CodeObject // Object that contains executable code. -}; - -// A SimulatedHeapSpace simulates the allocation of objects in a page in -// the heap. It uses linear allocation - that is, it doesn't simulate the -// use of a free list. This simulated -// allocation must exactly match that done by Heap. - -class SimulatedHeapSpace { - public: - // The default constructor initializes to an invalid state. - SimulatedHeapSpace(): current_(LAST_SPACE, -1, -1) {} - - // Sets 'this' to the first address in 'space' that would be - // returned by allocation in an empty heap. - void InitEmptyHeap(AllocationSpace space); - - // Sets 'this' to the next address in 'space' that would be returned - // by allocation in the current heap. Intended only for testing - // serialization and deserialization in the current address space. - void InitCurrentHeap(AllocationSpace space); - - // Returns the RelativeAddress where the next - // object of 'size' bytes will be allocated, and updates 'this' to - // point to the next free address beyond that object. - RelativeAddress Allocate(int size, GCTreatment special_gc_treatment); - - private: - RelativeAddress current_; -}; - - -void SimulatedHeapSpace::InitEmptyHeap(AllocationSpace space) { - switch (space) { - case MAP_SPACE: - case CELL_SPACE: - case OLD_POINTER_SPACE: - case OLD_DATA_SPACE: - case CODE_SPACE: - current_ = RelativeAddress(space, 0, Page::kObjectStartOffset); - break; - case NEW_SPACE: - case LO_SPACE: - current_ = RelativeAddress(space, 0, 0); - break; - } -} - - -void SimulatedHeapSpace::InitCurrentHeap(AllocationSpace space) { - switch (space) { - case MAP_SPACE: - case CELL_SPACE: - case OLD_POINTER_SPACE: - case OLD_DATA_SPACE: - case CODE_SPACE: { - PagedSpace* ps; - if (space == MAP_SPACE) { - ps = Heap::map_space(); - } else if (space == CELL_SPACE) { - ps = Heap::cell_space(); - } else if (space == OLD_POINTER_SPACE) { - ps = Heap::old_pointer_space(); - } else if (space == OLD_DATA_SPACE) { - ps = Heap::old_data_space(); - } else { - ASSERT(space == CODE_SPACE); - ps = Heap::code_space(); - } - Address top = ps->top(); - Page* top_page = Page::FromAllocationTop(top); - int page_index = 0; - PageIterator it(ps, PageIterator::PAGES_IN_USE); - while (it.has_next()) { - if (it.next() == top_page) break; - page_index++; - } - current_ = RelativeAddress(space, - page_index, - top_page->Offset(top)); - break; - } - case NEW_SPACE: - current_ = RelativeAddress(space, - 0, - Heap::NewSpaceTop() - Heap::NewSpaceStart()); - break; - case LO_SPACE: - int page_index = 0; - for (LargeObjectIterator it(Heap::lo_space()); it.has_next(); it.next()) { - page_index++; - } - current_ = RelativeAddress(space, page_index, 0); - break; - } -} - - -RelativeAddress SimulatedHeapSpace::Allocate(int size, - GCTreatment special_gc_treatment) { -#ifdef DEBUG - current_.Verify(); -#endif - int alloc_size = OBJECT_SIZE_ALIGN(size); - if (current_.in_paged_space() && - current_.page_offset() + alloc_size > Page::kPageSize) { - ASSERT(alloc_size <= Page::kMaxHeapObjectSize); - current_.next_page(Page::kObjectStartOffset); - } - RelativeAddress result = current_; - if (current_.space() == LO_SPACE) { - current_.next_page(); - if (special_gc_treatment == CodeObject) { - result.set_to_large_code_object(); - } else if (special_gc_treatment == PointerObject) { - result.set_to_large_fixed_array(); - } - } else { - current_.next_address(alloc_size); - } -#ifdef DEBUG - current_.Verify(); - result.Verify(); -#endif - return result; -} - // ----------------------------------------------------------------------------- // Coding of external references. @@ -489,12 +147,12 @@ void ExternalReferenceTable::Add(Address address, TypeCode type, uint16_t id, const char* name) { - CHECK_NE(NULL, address); + ASSERT_NE(NULL, address); ExternalReferenceEntry entry; entry.address = address; entry.code = EncodeExternal(type, id); entry.name = name; - CHECK_NE(0, entry.code); + ASSERT_NE(0, entry.code); refs_.Add(entry); if (id > max_id_[type]) max_id_[type] = id; } @@ -575,7 +233,7 @@ void ExternalReferenceTable::PopulateTable() { Debug::k_debug_break_return_address << kDebugIdShift, "Debug::debug_break_return_address()"); const char* debug_register_format = "Debug::register_address(%i)"; - size_t dr_format_length = strlen(debug_register_format); + int dr_format_length = StrLength(debug_register_format); for (int i = 0; i < kNumJSCallerSaved; ++i) { Vector<char> name = Vector<char>::New(dr_format_length + 1); OS::SNPrintF(name, debug_register_format, i); @@ -623,11 +281,11 @@ void ExternalReferenceTable::PopulateTable() { #undef C }; - size_t top_format_length = strlen(top_address_format) - 2; + int top_format_length = StrLength(top_address_format) - 2; for (uint16_t i = 0; i < Top::k_top_address_count; ++i) { const char* address_name = AddressNames[i]; Vector<char> name = - Vector<char>::New(top_format_length + strlen(address_name) + 1); + Vector<char>::New(top_format_length + StrLength(address_name) + 1); const char* chars = name.start(); OS::SNPrintF(name, top_address_format, address_name); Add(Top::get_address_from_id((Top::AddressId)i), TOP_ADDRESS, i, chars); @@ -688,76 +346,80 @@ void ExternalReferenceTable::PopulateTable() { UNCLASSIFIED, 3, "Heap::roots_address()"); - Add(ExternalReference::address_of_stack_guard_limit().address(), + Add(ExternalReference::address_of_stack_limit().address(), UNCLASSIFIED, 4, "StackGuard::address_of_jslimit()"); - Add(ExternalReference::address_of_regexp_stack_limit().address(), + Add(ExternalReference::address_of_real_stack_limit().address(), UNCLASSIFIED, 5, + "StackGuard::address_of_real_jslimit()"); + Add(ExternalReference::address_of_regexp_stack_limit().address(), + UNCLASSIFIED, + 6, "RegExpStack::limit_address()"); Add(ExternalReference::new_space_start().address(), UNCLASSIFIED, - 6, + 7, "Heap::NewSpaceStart()"); Add(ExternalReference::heap_always_allocate_scope_depth().address(), UNCLASSIFIED, - 7, + 8, "Heap::always_allocate_scope_depth()"); Add(ExternalReference::new_space_allocation_limit_address().address(), UNCLASSIFIED, - 8, + 9, "Heap::NewSpaceAllocationLimitAddress()"); Add(ExternalReference::new_space_allocation_top_address().address(), UNCLASSIFIED, - 9, + 10, "Heap::NewSpaceAllocationTopAddress()"); #ifdef ENABLE_DEBUGGER_SUPPORT Add(ExternalReference::debug_break().address(), UNCLASSIFIED, - 10, + 11, "Debug::Break()"); Add(ExternalReference::debug_step_in_fp_address().address(), UNCLASSIFIED, - 11, + 12, "Debug::step_in_fp_addr()"); #endif Add(ExternalReference::double_fp_operation(Token::ADD).address(), UNCLASSIFIED, - 12, + 13, "add_two_doubles"); Add(ExternalReference::double_fp_operation(Token::SUB).address(), UNCLASSIFIED, - 13, + 14, "sub_two_doubles"); Add(ExternalReference::double_fp_operation(Token::MUL).address(), UNCLASSIFIED, - 14, + 15, "mul_two_doubles"); Add(ExternalReference::double_fp_operation(Token::DIV).address(), UNCLASSIFIED, - 15, + 16, "div_two_doubles"); Add(ExternalReference::double_fp_operation(Token::MOD).address(), UNCLASSIFIED, - 16, + 17, "mod_two_doubles"); Add(ExternalReference::compare_doubles().address(), UNCLASSIFIED, - 17, + 18, "compare_doubles"); #ifdef V8_NATIVE_REGEXP Add(ExternalReference::re_case_insensitive_compare_uc16().address(), UNCLASSIFIED, - 18, + 19, "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()"); Add(ExternalReference::re_check_stack_guard_state().address(), UNCLASSIFIED, - 19, + 20, "RegExpMacroAssembler*::CheckStackGuardState()"); Add(ExternalReference::re_grow_stack().address(), UNCLASSIFIED, - 20, + 21, "NativeRegExpMacroAssembler::GrowStack()"); #endif } @@ -823,1053 +485,90 @@ ExternalReferenceDecoder::~ExternalReferenceDecoder() { } -//------------------------------------------------------------------------------ -// Implementation of Serializer - - -// Helper class to write the bytes of the serialized heap. - -class SnapshotWriter { - public: - SnapshotWriter() { - len_ = 0; - max_ = 8 << 10; // 8K initial size - str_ = NewArray<byte>(max_); - } - - ~SnapshotWriter() { - DeleteArray(str_); - } - - void GetBytes(byte** str, int* len) { - *str = NewArray<byte>(len_); - memcpy(*str, str_, len_); - *len = len_; - } - - void Reserve(int bytes, int pos); - - void PutC(char c) { - InsertC(c, len_); - } - - void PutInt(int i) { - InsertInt(i, len_); - } - - void PutAddress(Address p) { - PutBytes(reinterpret_cast<byte*>(&p), sizeof(p)); - } - - void PutBytes(const byte* a, int size) { - InsertBytes(a, len_, size); - } - - void PutString(const char* s) { - InsertString(s, len_); - } - - int InsertC(char c, int pos) { - Reserve(1, pos); - str_[pos] = c; - len_++; - return pos + 1; - } - - int InsertInt(int i, int pos) { - return InsertBytes(reinterpret_cast<byte*>(&i), pos, sizeof(i)); - } - - int InsertBytes(const byte* a, int pos, int size) { - Reserve(size, pos); - memcpy(&str_[pos], a, size); - len_ += size; - return pos + size; - } - - int InsertString(const char* s, int pos); - - int length() { return len_; } - - Address position() { return reinterpret_cast<Address>(&str_[len_]); } - - private: - byte* str_; // the snapshot - int len_; // the current length of str_ - int max_; // the allocated size of str_ -}; - - -void SnapshotWriter::Reserve(int bytes, int pos) { - CHECK(0 <= pos && pos <= len_); - while (len_ + bytes >= max_) { - max_ *= 2; - byte* old = str_; - str_ = NewArray<byte>(max_); - memcpy(str_, old, len_); - DeleteArray(old); - } - if (pos < len_) { - byte* old = str_; - str_ = NewArray<byte>(max_); - memcpy(str_, old, pos); - memcpy(str_ + pos + bytes, old + pos, len_ - pos); - DeleteArray(old); - } -} - -int SnapshotWriter::InsertString(const char* s, int pos) { - int size = strlen(s); - pos = InsertC('[', pos); - pos = InsertInt(size, pos); - pos = InsertC(']', pos); - return InsertBytes(reinterpret_cast<const byte*>(s), pos, size); -} - - -class ReferenceUpdater: public ObjectVisitor { - public: - ReferenceUpdater(HeapObject* obj, Serializer* serializer) - : obj_address_(obj->address()), - serializer_(serializer), - reference_encoder_(serializer->reference_encoder_), - offsets_(8), - addresses_(8), - offsets_32_bit_(0), - data_32_bit_(0) { - } - - virtual void VisitPointers(Object** start, Object** end) { - for (Object** p = start; p < end; ++p) { - if ((*p)->IsHeapObject()) { - offsets_.Add(reinterpret_cast<Address>(p) - obj_address_); - Address a = serializer_->GetSavedAddress(HeapObject::cast(*p)); - addresses_.Add(a); - } - } - } - - virtual void VisitCodeTarget(RelocInfo* rinfo) { - ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode())); - Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); - Address encoded_target = serializer_->GetSavedAddress(target); - // All calls and jumps are to code objects that encode into 32 bits. - offsets_32_bit_.Add(rinfo->target_address_address() - obj_address_); - uint32_t small_target = - static_cast<uint32_t>(reinterpret_cast<uintptr_t>(encoded_target)); - ASSERT(reinterpret_cast<uintptr_t>(encoded_target) == small_target); - data_32_bit_.Add(small_target); - } - - - virtual void VisitExternalReferences(Address* start, Address* end) { - for (Address* p = start; p < end; ++p) { - uint32_t code = reference_encoder_->Encode(*p); - CHECK(*p == NULL ? code == 0 : code != 0); - offsets_.Add(reinterpret_cast<Address>(p) - obj_address_); - addresses_.Add(reinterpret_cast<Address>(code)); - } - } - - virtual void VisitRuntimeEntry(RelocInfo* rinfo) { - Address target = rinfo->target_address(); - uint32_t encoding = reference_encoder_->Encode(target); - CHECK(target == NULL ? encoding == 0 : encoding != 0); - offsets_.Add(rinfo->target_address_address() - obj_address_); - addresses_.Add(reinterpret_cast<Address>(encoding)); - } - - void Update(Address start_address) { - for (int i = 0; i < offsets_.length(); i++) { - memcpy(start_address + offsets_[i], &addresses_[i], sizeof(Address)); - } - for (int i = 0; i < offsets_32_bit_.length(); i++) { - memcpy(start_address + offsets_32_bit_[i], &data_32_bit_[i], - sizeof(uint32_t)); - } - } - - private: - Address obj_address_; - Serializer* serializer_; - ExternalReferenceEncoder* reference_encoder_; - List<int> offsets_; - List<Address> addresses_; - // Some updates are 32-bit even on a 64-bit platform. - // We keep a separate list of them on 64-bit platforms. - List<int> offsets_32_bit_; - List<uint32_t> data_32_bit_; -}; - - -// Helper functions for a map of encoded heap object addresses. -static uint32_t HeapObjectHash(HeapObject* key) { - uint32_t low32bits = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)); - return low32bits >> 2; -} - - -static bool MatchHeapObject(void* key1, void* key2) { - return key1 == key2; -} - - -Serializer::Serializer() - : global_handles_(4), - saved_addresses_(MatchHeapObject) { - root_ = true; - roots_ = 0; - objects_ = 0; - reference_encoder_ = NULL; - writer_ = new SnapshotWriter(); - for (int i = 0; i <= LAST_SPACE; i++) { - allocator_[i] = new SimulatedHeapSpace(); - } -} - - -Serializer::~Serializer() { - for (int i = 0; i <= LAST_SPACE; i++) { - delete allocator_[i]; - } - if (reference_encoder_) delete reference_encoder_; - delete writer_; -} - - bool Serializer::serialization_enabled_ = false; +bool Serializer::too_late_to_enable_now_ = false; -#ifdef DEBUG -static const int kMaxTagLength = 32; - -void Serializer::Synchronize(const char* tag) { - if (FLAG_debug_serialization) { - int length = strlen(tag); - ASSERT(length <= kMaxTagLength); - writer_->PutC('S'); - writer_->PutInt(length); - writer_->PutBytes(reinterpret_cast<const byte*>(tag), length); - } -} -#endif - - -void Serializer::InitializeAllocators() { - for (int i = 0; i <= LAST_SPACE; i++) { - allocator_[i]->InitEmptyHeap(static_cast<AllocationSpace>(i)); - } -} - - -bool Serializer::IsVisited(HeapObject* obj) { - HashMap::Entry* entry = - saved_addresses_.Lookup(obj, HeapObjectHash(obj), false); - return entry != NULL; -} - - -Address Serializer::GetSavedAddress(HeapObject* obj) { - HashMap::Entry* entry = - saved_addresses_.Lookup(obj, HeapObjectHash(obj), false); - ASSERT(entry != NULL); - return reinterpret_cast<Address>(entry->value); -} - - -void Serializer::SaveAddress(HeapObject* obj, Address addr) { - HashMap::Entry* entry = - saved_addresses_.Lookup(obj, HeapObjectHash(obj), true); - entry->value = addr; -} - - -void Serializer::Serialize() { - // No active threads. - CHECK_EQ(NULL, ThreadState::FirstInUse()); - // No active or weak handles. - CHECK(HandleScopeImplementer::instance()->blocks()->is_empty()); - CHECK_EQ(0, GlobalHandles::NumberOfWeakHandles()); - // We need a counter function during serialization to resolve the - // references to counters in the code on the heap. - CHECK(StatsTable::HasCounterFunction()); - CHECK(enabled()); - InitializeAllocators(); - reference_encoder_ = new ExternalReferenceEncoder(); - PutHeader(); - Heap::IterateRoots(this); - PutLog(); - PutContextStack(); - Disable(); -} - - -void Serializer::Finalize(byte** str, int* len) { - writer_->GetBytes(str, len); -} - - -// Serialize objects by writing them into the stream. - -void Serializer::VisitPointers(Object** start, Object** end) { - bool root = root_; - root_ = false; - for (Object** p = start; p < end; ++p) { - bool serialized; - Address a = Encode(*p, &serialized); - if (root) { - roots_++; - // If the object was not just serialized, - // write its encoded address instead. - if (!serialized) PutEncodedAddress(a); - } - } - root_ = root; -} - - -void Serializer::VisitCodeTarget(RelocInfo* rinfo) { - ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode())); - Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); - bool serialized; - Encode(target, &serialized); -} - - -class GlobalHandlesRetriever: public ObjectVisitor { - public: - explicit GlobalHandlesRetriever(List<Object**>* handles) - : global_handles_(handles) {} - - virtual void VisitPointers(Object** start, Object** end) { - for (; start != end; ++start) { - global_handles_->Add(start); - } - } - - private: - List<Object**>* global_handles_; -}; - - -void Serializer::PutFlags() { - writer_->PutC('F'); - List<const char*>* argv = FlagList::argv(); - writer_->PutInt(argv->length()); - writer_->PutC('['); - for (int i = 0; i < argv->length(); i++) { - if (i > 0) writer_->PutC('|'); - writer_->PutString((*argv)[i]); - DeleteArray((*argv)[i]); - } - writer_->PutC(']'); - flags_end_ = writer_->length(); - delete argv; -} - - -void Serializer::PutHeader() { - PutFlags(); - writer_->PutC('D'); -#ifdef DEBUG - writer_->PutC(FLAG_debug_serialization ? '1' : '0'); -#else - writer_->PutC('0'); -#endif -#ifdef V8_NATIVE_REGEXP - writer_->PutC('N'); -#else // Interpreted regexp - writer_->PutC('I'); -#endif - // Write sizes of paged memory spaces. Allocate extra space for the old - // and code spaces, because objects in new space will be promoted to them. - writer_->PutC('S'); - writer_->PutC('['); - writer_->PutInt(Heap::old_pointer_space()->Size() + - Heap::new_space()->Size()); - writer_->PutC('|'); - writer_->PutInt(Heap::old_data_space()->Size() + Heap::new_space()->Size()); - writer_->PutC('|'); - writer_->PutInt(Heap::code_space()->Size() + Heap::new_space()->Size()); - writer_->PutC('|'); - writer_->PutInt(Heap::map_space()->Size()); - writer_->PutC('|'); - writer_->PutInt(Heap::cell_space()->Size()); - writer_->PutC(']'); - // Write global handles. - writer_->PutC('G'); - writer_->PutC('['); - GlobalHandlesRetriever ghr(&global_handles_); - GlobalHandles::IterateRoots(&ghr); - for (int i = 0; i < global_handles_.length(); i++) { - writer_->PutC('N'); - } - writer_->PutC(']'); -} - - -void Serializer::PutLog() { -#ifdef ENABLE_LOGGING_AND_PROFILING - if (FLAG_log_code) { - Logger::TearDown(); - int pos = writer_->InsertC('L', flags_end_); - bool exists; - Vector<const char> log = ReadFile(FLAG_logfile, &exists); - writer_->InsertString(log.start(), pos); - log.Dispose(); - } -#endif -} - - -static int IndexOf(const List<Object**>& list, Object** element) { - for (int i = 0; i < list.length(); i++) { - if (list[i] == element) return i; - } - return -1; -} - - -void Serializer::PutGlobalHandleStack(const List<Handle<Object> >& stack) { - writer_->PutC('['); - writer_->PutInt(stack.length()); - for (int i = stack.length() - 1; i >= 0; i--) { - writer_->PutC('|'); - int gh_index = IndexOf(global_handles_, stack[i].location()); - CHECK_GE(gh_index, 0); - writer_->PutInt(gh_index); - } - writer_->PutC(']'); -} - - -void Serializer::PutContextStack() { - List<Context*> contexts(2); - while (HandleScopeImplementer::instance()->HasSavedContexts()) { - Context* context = - HandleScopeImplementer::instance()->RestoreContext(); - contexts.Add(context); - } - for (int i = contexts.length() - 1; i >= 0; i--) { - HandleScopeImplementer::instance()->SaveContext(contexts[i]); - } - writer_->PutC('C'); - writer_->PutC('['); - writer_->PutInt(contexts.length()); - if (!contexts.is_empty()) { - Object** start = reinterpret_cast<Object**>(&contexts.first()); - VisitPointers(start, start + contexts.length()); - } - writer_->PutC(']'); -} - -void Serializer::PutEncodedAddress(Address addr) { - writer_->PutC('P'); - writer_->PutAddress(addr); -} - - -Address Serializer::Encode(Object* o, bool* serialized) { - *serialized = false; - if (o->IsSmi()) { - return reinterpret_cast<Address>(o); - } else { - HeapObject* obj = HeapObject::cast(o); - if (IsVisited(obj)) { - return GetSavedAddress(obj); - } else { - // First visit: serialize the object. - *serialized = true; - return PutObject(obj); - } - } -} - - -Address Serializer::PutObject(HeapObject* obj) { - Map* map = obj->map(); - InstanceType type = map->instance_type(); - int size = obj->SizeFromMap(map); - - // Simulate the allocation of obj to predict where it will be - // allocated during deserialization. - Address addr = Allocate(obj).Encode(); - - SaveAddress(obj, addr); - - if (type == CODE_TYPE) { - LOG(CodeMoveEvent(obj->address(), addr)); - } - - // Write out the object prologue: type, size, and simulated address of obj. - writer_->PutC('['); - CHECK_EQ(0, static_cast<int>(size & kObjectAlignmentMask)); - writer_->PutInt(type); - writer_->PutInt(size >> kObjectAlignmentBits); - PutEncodedAddress(addr); // encodes AllocationSpace - - // Visit all the pointers in the object other than the map. This - // will recursively serialize any as-yet-unvisited objects. - obj->Iterate(this); - - // Mark end of recursively embedded objects, start of object body. - writer_->PutC('|'); - // Write out the raw contents of the object. No compression, but - // fast to deserialize. - writer_->PutBytes(obj->address(), size); - // Update pointers and external references in the written object. - ReferenceUpdater updater(obj, this); - obj->Iterate(&updater); - updater.Update(writer_->position() - size); - -#ifdef DEBUG - if (FLAG_debug_serialization) { - // Write out the object epilogue to catch synchronization errors. - PutEncodedAddress(addr); - writer_->PutC(']'); - } -#endif - - objects_++; - return addr; -} - - -RelativeAddress Serializer::Allocate(HeapObject* obj) { - // Find out which AllocationSpace 'obj' is in. - AllocationSpace s; - bool found = false; - for (int i = FIRST_SPACE; !found && i <= LAST_SPACE; i++) { - s = static_cast<AllocationSpace>(i); - found = Heap::InSpace(obj, s); - } - CHECK(found); - int size = obj->Size(); - if (s == NEW_SPACE) { - if (size > Heap::MaxObjectSizeInPagedSpace()) { - s = LO_SPACE; - } else { - OldSpace* space = Heap::TargetSpace(obj); - ASSERT(space == Heap::old_pointer_space() || - space == Heap::old_data_space()); - s = (space == Heap::old_pointer_space()) ? - OLD_POINTER_SPACE : - OLD_DATA_SPACE; - } - } - GCTreatment gc_treatment = DataObject; - if (obj->IsFixedArray()) gc_treatment = PointerObject; - else if (obj->IsCode()) gc_treatment = CodeObject; - return allocator_[s]->Allocate(size, gc_treatment); -} - - -//------------------------------------------------------------------------------ -// Implementation of Deserializer - - -static const int kInitArraySize = 32; - - -Deserializer::Deserializer(const byte* str, int len) - : reader_(str, len), - map_pages_(kInitArraySize), - cell_pages_(kInitArraySize), - old_pointer_pages_(kInitArraySize), - old_data_pages_(kInitArraySize), - code_pages_(kInitArraySize), - large_objects_(kInitArraySize), - global_handles_(4) { - root_ = true; - roots_ = 0; - objects_ = 0; - reference_decoder_ = NULL; -#ifdef DEBUG - expect_debug_information_ = false; -#endif -} - - -Deserializer::~Deserializer() { - if (reference_decoder_) delete reference_decoder_; -} - - -void Deserializer::ExpectEncodedAddress(Address expected) { - Address a = GetEncodedAddress(); - USE(a); - ASSERT(a == expected); -} - - -#ifdef DEBUG -void Deserializer::Synchronize(const char* tag) { - if (expect_debug_information_) { - char buf[kMaxTagLength]; - reader_.ExpectC('S'); - int length = reader_.GetInt(); - ASSERT(length <= kMaxTagLength); - reader_.GetBytes(reinterpret_cast<Address>(buf), length); - ASSERT_EQ(strlen(tag), length); - ASSERT(strncmp(tag, buf, length) == 0); - } -} -#endif - - -class NoGlobalHandlesChecker : public ObjectVisitor { - public: - virtual void VisitPointers(Object** start, Object** end) { - ASSERT(false); - } -}; - - -class GlobalHandleDestroyer : public ObjectVisitor { - void VisitPointers(Object**start, Object**end) { - while (start < end) { - GlobalHandles::Destroy(start++); - } - } -}; - - -void Deserializer::Deserialize() { - // No global handles. - NoGlobalHandlesChecker checker; - GlobalHandles::IterateRoots(&checker); - // No active threads. - ASSERT_EQ(NULL, ThreadState::FirstInUse()); - // No active handles. - ASSERT(HandleScopeImplementer::instance()->blocks()->is_empty()); - reference_decoder_ = new ExternalReferenceDecoder(); - // By setting linear allocation only, we forbid the use of free list - // allocation which is not predicted by SimulatedAddress. - GetHeader(); - Heap::IterateRoots(this); - GetContextStack(); - // Any global handles that have been set up by deserialization are leaked - // since noone is keeping track of them. So we discard them now. - GlobalHandleDestroyer destroyer; - GlobalHandles::IterateRoots(&destroyer); -} - - -void Deserializer::VisitPointers(Object** start, Object** end) { - bool root = root_; - root_ = false; - for (Object** p = start; p < end; ++p) { - if (root) { - roots_++; - // Read the next object or pointer from the stream - // pointer in the stream. - int c = reader_.GetC(); - if (c == '[') { - *p = GetObject(); // embedded object - } else { - ASSERT(c == 'P'); // pointer to previously serialized object - *p = Resolve(reader_.GetAddress()); - } - } else { - // A pointer internal to a HeapObject that we've already - // read: resolve it to a true address (or Smi) - *p = Resolve(reinterpret_cast<Address>(*p)); - } - } - root_ = root; -} - - -void Deserializer::VisitCodeTarget(RelocInfo* rinfo) { - ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode())); - // On all platforms, the encoded code object address is only 32 bits. - Address encoded_address = reinterpret_cast<Address>(Memory::uint32_at( - reinterpret_cast<Address>(rinfo->target_object_address()))); - Code* target_object = reinterpret_cast<Code*>(Resolve(encoded_address)); - rinfo->set_target_address(target_object->instruction_start()); -} - - -void Deserializer::VisitExternalReferences(Address* start, Address* end) { - for (Address* p = start; p < end; ++p) { - uint32_t code = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*p)); - *p = reference_decoder_->Decode(code); - } -} - - -void Deserializer::VisitRuntimeEntry(RelocInfo* rinfo) { - uint32_t* pc = reinterpret_cast<uint32_t*>(rinfo->target_address_address()); - uint32_t encoding = *pc; - Address target = reference_decoder_->Decode(encoding); - rinfo->set_target_address(target); -} - - -void Deserializer::GetFlags() { - reader_.ExpectC('F'); - int argc = reader_.GetInt() + 1; - char** argv = NewArray<char*>(argc); - reader_.ExpectC('['); - for (int i = 1; i < argc; i++) { - if (i > 1) reader_.ExpectC('|'); - argv[i] = reader_.GetString(); - } - reader_.ExpectC(']'); - has_log_ = false; - for (int i = 1; i < argc; i++) { - if (strcmp("--log_code", argv[i]) == 0) { - has_log_ = true; - } else if (strcmp("--nouse_ic", argv[i]) == 0) { - FLAG_use_ic = false; - } else if (strcmp("--debug_code", argv[i]) == 0) { - FLAG_debug_code = true; - } else if (strcmp("--nolazy", argv[i]) == 0) { - FLAG_lazy = false; - } - DeleteArray(argv[i]); - } - - DeleteArray(argv); -} - - -void Deserializer::GetLog() { - if (has_log_) { - reader_.ExpectC('L'); - char* snapshot_log = reader_.GetString(); -#ifdef ENABLE_LOGGING_AND_PROFILING - if (FLAG_log_code) { - LOG(Preamble(snapshot_log)); - } -#endif - DeleteArray(snapshot_log); - } -} - - -static void InitPagedSpace(PagedSpace* space, - int capacity, - List<Page*>* page_list) { - if (!space->EnsureCapacity(capacity)) { - V8::FatalProcessOutOfMemory("InitPagedSpace"); - } - PageIterator it(space, PageIterator::ALL_PAGES); - while (it.has_next()) page_list->Add(it.next()); -} - - -void Deserializer::GetHeader() { - reader_.ExpectC('D'); -#ifdef DEBUG - expect_debug_information_ = reader_.GetC() == '1'; -#else - // In release mode, don't attempt to read a snapshot containing - // synchronization tags. - if (reader_.GetC() != '0') FATAL("Snapshot contains synchronization tags."); -#endif -#ifdef V8_NATIVE_REGEXP - reader_.ExpectC('N'); -#else // Interpreted regexp. - reader_.ExpectC('I'); -#endif - // Ensure sufficient capacity in paged memory spaces to avoid growth - // during deserialization. - reader_.ExpectC('S'); - reader_.ExpectC('['); - InitPagedSpace(Heap::old_pointer_space(), - reader_.GetInt(), - &old_pointer_pages_); - reader_.ExpectC('|'); - InitPagedSpace(Heap::old_data_space(), reader_.GetInt(), &old_data_pages_); - reader_.ExpectC('|'); - InitPagedSpace(Heap::code_space(), reader_.GetInt(), &code_pages_); - reader_.ExpectC('|'); - InitPagedSpace(Heap::map_space(), reader_.GetInt(), &map_pages_); - reader_.ExpectC('|'); - InitPagedSpace(Heap::cell_space(), reader_.GetInt(), &cell_pages_); - reader_.ExpectC(']'); - // Create placeholders for global handles later to be fill during - // IterateRoots. - reader_.ExpectC('G'); - reader_.ExpectC('['); - int c = reader_.GetC(); - while (c != ']') { - ASSERT(c == 'N'); - global_handles_.Add(GlobalHandles::Create(NULL).location()); - c = reader_.GetC(); - } -} - - -void Deserializer::GetGlobalHandleStack(List<Handle<Object> >* stack) { - reader_.ExpectC('['); - int length = reader_.GetInt(); - for (int i = 0; i < length; i++) { - reader_.ExpectC('|'); - int gh_index = reader_.GetInt(); - stack->Add(global_handles_[gh_index]); - } - reader_.ExpectC(']'); -} - - -void Deserializer::GetContextStack() { - reader_.ExpectC('C'); - CHECK_EQ(reader_.GetC(), '['); - int count = reader_.GetInt(); - List<Context*> entered_contexts(count); - if (count > 0) { - Object** start = reinterpret_cast<Object**>(&entered_contexts.first()); - VisitPointers(start, start + count); - } - reader_.ExpectC(']'); - for (int i = 0; i < count; i++) { - HandleScopeImplementer::instance()->SaveContext(entered_contexts[i]); - } -} - - -Address Deserializer::GetEncodedAddress() { - reader_.ExpectC('P'); - return reader_.GetAddress(); -} - - -Object* Deserializer::GetObject() { - // Read the prologue: type, size and encoded address. - InstanceType type = static_cast<InstanceType>(reader_.GetInt()); - int size = reader_.GetInt() << kObjectAlignmentBits; - Address a = GetEncodedAddress(); - - // Get a raw object of the right size in the right space. - AllocationSpace space = GetSpace(a); - Object* o; - if (IsLargeExecutableObject(a)) { - o = Heap::lo_space()->AllocateRawCode(size); - } else if (IsLargeFixedArray(a)) { - o = Heap::lo_space()->AllocateRawFixedArray(size); - } else { - AllocationSpace retry_space = (space == NEW_SPACE) - ? Heap::TargetSpaceId(type) - : space; - o = Heap::AllocateRaw(size, space, retry_space); - } - ASSERT(!o->IsFailure()); - // Check that the simulation of heap allocation was correct. - ASSERT(o == Resolve(a)); - - // Read any recursively embedded objects. - int c = reader_.GetC(); - while (c == '[') { - GetObject(); - c = reader_.GetC(); - } - ASSERT(c == '|'); - - HeapObject* obj = reinterpret_cast<HeapObject*>(o); - // Read the uninterpreted contents of the object after the map - reader_.GetBytes(obj->address(), size); -#ifdef DEBUG - if (expect_debug_information_) { - // Read in the epilogue to check that we're still synchronized - ExpectEncodedAddress(a); - reader_.ExpectC(']'); - } -#endif - - // Resolve the encoded pointers we just read in. - // Same as obj->Iterate(this), but doesn't rely on the map pointer being set. - VisitPointer(reinterpret_cast<Object**>(obj->address())); - obj->IterateBody(type, size, this); - - if (type == CODE_TYPE) { - LOG(CodeMoveEvent(a, obj->address())); - } - objects_++; - return o; -} - - -static inline Object* ResolvePaged(int page_index, - int page_offset, - PagedSpace* space, - List<Page*>* page_list) { - ASSERT(page_index < page_list->length()); - Address address = (*page_list)[page_index]->OffsetToAddress(page_offset); - return HeapObject::FromAddress(address); -} - - -template<typename T> -void ConcatReversed(List<T>* target, const List<T>& source) { - for (int i = source.length() - 1; i >= 0; i--) { - target->Add(source[i]); - } -} - - -Object* Deserializer::Resolve(Address encoded) { - Object* o = reinterpret_cast<Object*>(encoded); - if (o->IsSmi()) return o; - - // Encoded addresses of HeapObjects always have 'HeapObject' tags. - ASSERT(o->IsHeapObject()); - switch (GetSpace(encoded)) { - // For Map space and Old space, we cache the known Pages in map_pages, - // old_pointer_pages and old_data_pages. Even though MapSpace keeps a list - // of page addresses, we don't rely on it since GetObject uses AllocateRaw, - // and that appears not to update the page list. - case MAP_SPACE: - return ResolvePaged(PageIndex(encoded), PageOffset(encoded), - Heap::map_space(), &map_pages_); - case CELL_SPACE: - return ResolvePaged(PageIndex(encoded), PageOffset(encoded), - Heap::cell_space(), &cell_pages_); - case OLD_POINTER_SPACE: - return ResolvePaged(PageIndex(encoded), PageOffset(encoded), - Heap::old_pointer_space(), &old_pointer_pages_); - case OLD_DATA_SPACE: - return ResolvePaged(PageIndex(encoded), PageOffset(encoded), - Heap::old_data_space(), &old_data_pages_); - case CODE_SPACE: - return ResolvePaged(PageIndex(encoded), PageOffset(encoded), - Heap::code_space(), &code_pages_); - case NEW_SPACE: - return HeapObject::FromAddress(Heap::NewSpaceStart() + - NewSpaceOffset(encoded)); - case LO_SPACE: - // Cache the known large_objects, allocated one per 'page' - int index = LargeObjectIndex(encoded); - if (index >= large_objects_.length()) { - int new_object_count = - Heap::lo_space()->PageCount() - large_objects_.length(); - List<Object*> new_objects(new_object_count); - LargeObjectIterator it(Heap::lo_space()); - for (int i = 0; i < new_object_count; i++) { - new_objects.Add(it.next()); - } -#ifdef DEBUG - for (int i = large_objects_.length() - 1; i >= 0; i--) { - ASSERT(it.next() == large_objects_[i]); - } -#endif - ConcatReversed(&large_objects_, new_objects); - ASSERT(index < large_objects_.length()); - } - return large_objects_[index]; // s.page_offset() is ignored. - } - UNREACHABLE(); - return NULL; -} - - -Deserializer2::Deserializer2(SnapshotByteSource* source) +Deserializer::Deserializer(SnapshotByteSource* source) : source_(source), external_reference_decoder_(NULL) { - for (int i = 0; i <= LAST_SPACE; i++) { - fullness_[i] = 0; - } } // This routine both allocates a new object, and also keeps // track of where objects have been allocated so that we can // fix back references when deserializing. -Address Deserializer2::Allocate(int space_index, int size) { - HeapObject* new_object; - int old_fullness = CurrentAllocationAddress(space_index); - // When we start a new page we need to record its location. - bool record_page = (old_fullness == 0); - if (SpaceIsPaged(space_index)) { - PagedSpace* space; - switch (space_index) { - case OLD_DATA_SPACE: space = Heap::old_data_space(); break; - case OLD_POINTER_SPACE: space = Heap::old_pointer_space(); break; - case MAP_SPACE: space = Heap::map_space(); break; - case CODE_SPACE: space = Heap::code_space(); break; - case CELL_SPACE: space = Heap::cell_space(); break; - default: UNREACHABLE(); space = NULL; break; - } - ASSERT(size <= Page::kPageSize - Page::kObjectStartOffset); - int current_page = old_fullness >> Page::kPageSizeBits; - int new_fullness = old_fullness + size; - int new_page = new_fullness >> Page::kPageSizeBits; - // What is our new position within the current page. - int intra_page_offset = new_fullness - current_page * Page::kPageSize; - if (intra_page_offset > Page::kPageSize - Page::kObjectStartOffset) { - // This object will not fit in a page and we have to move to the next. - new_page = current_page + 1; - old_fullness = new_page << Page::kPageSizeBits; - new_fullness = old_fullness + size; - record_page = true; +Address Deserializer::Allocate(int space_index, Space* space, int size) { + Address address; + if (!SpaceIsLarge(space_index)) { + ASSERT(!SpaceIsPaged(space_index) || + size <= Page::kPageSize - Page::kObjectStartOffset); + Object* new_allocation; + if (space_index == NEW_SPACE) { + new_allocation = reinterpret_cast<NewSpace*>(space)->AllocateRaw(size); + } else { + new_allocation = reinterpret_cast<PagedSpace*>(space)->AllocateRaw(size); } - fullness_[space_index] = new_fullness; - Object* new_allocation = space->AllocateRaw(size); - new_object = HeapObject::cast(new_allocation); + HeapObject* new_object = HeapObject::cast(new_allocation); ASSERT(!new_object->IsFailure()); - ASSERT((reinterpret_cast<intptr_t>(new_object->address()) & - Page::kPageAlignmentMask) == - (old_fullness & Page::kPageAlignmentMask) + - Page::kObjectStartOffset); - } else if (SpaceIsLarge(space_index)) { + address = new_object->address(); + high_water_[space_index] = address + size; + } else { + ASSERT(SpaceIsLarge(space_index)); ASSERT(size > Page::kPageSize - Page::kObjectStartOffset); - fullness_[LO_SPACE]++; - LargeObjectSpace* lo_space = Heap::lo_space(); + LargeObjectSpace* lo_space = reinterpret_cast<LargeObjectSpace*>(space); Object* new_allocation; if (space_index == kLargeData) { new_allocation = lo_space->AllocateRaw(size); } else if (space_index == kLargeFixedArray) { new_allocation = lo_space->AllocateRawFixedArray(size); } else { - ASSERT(space_index == kLargeCode); + ASSERT_EQ(kLargeCode, space_index); new_allocation = lo_space->AllocateRawCode(size); } ASSERT(!new_allocation->IsFailure()); - new_object = HeapObject::cast(new_allocation); - record_page = true; - // The page recording below records all large objects in the same space. - space_index = LO_SPACE; - } else { - ASSERT(space_index == NEW_SPACE); - Object* new_allocation = Heap::new_space()->AllocateRaw(size); - fullness_[space_index] += size; - ASSERT(!new_allocation->IsFailure()); - new_object = HeapObject::cast(new_allocation); - } - Address address = new_object->address(); - if (record_page) { - pages_[space_index].Add(address); + HeapObject* new_object = HeapObject::cast(new_allocation); + // Record all large objects in the same space. + address = new_object->address(); + high_water_[LO_SPACE] = address + size; } + last_object_address_ = address; return address; } // This returns the address of an object that has been described in the // snapshot as being offset bytes back in a particular space. -HeapObject* Deserializer2::GetAddress(int space) { +HeapObject* Deserializer::GetAddressFromEnd(int space) { + int offset = source_->GetInt(); + ASSERT(!SpaceIsLarge(space)); + offset <<= kObjectAlignmentBits; + return HeapObject::FromAddress(high_water_[space] - offset); +} + + +// This returns the address of an object that has been described in the +// snapshot as being offset bytes into a particular space. +HeapObject* Deserializer::GetAddressFromStart(int space) { int offset = source_->GetInt(); if (SpaceIsLarge(space)) { // Large spaces have one object per 'page'. - return HeapObject::FromAddress( - pages_[LO_SPACE][fullness_[LO_SPACE] - offset]); + return HeapObject::FromAddress(pages_[LO_SPACE][offset]); } offset <<= kObjectAlignmentBits; if (space == NEW_SPACE) { // New space has only one space - numbered 0. - return HeapObject::FromAddress( - pages_[space][0] + fullness_[space] - offset); + return HeapObject::FromAddress(pages_[space][0] + offset); } ASSERT(SpaceIsPaged(space)); - int virtual_address = fullness_[space] - offset; - int page_of_pointee = (virtual_address) >> Page::kPageSizeBits; + int page_of_pointee = offset >> Page::kPageSizeBits; Address object_address = pages_[space][page_of_pointee] + - (virtual_address & Page::kPageAlignmentMask); + (offset & Page::kPageAlignmentMask); return HeapObject::FromAddress(object_address); } -void Deserializer2::Deserialize() { +void Deserializer::Deserialize() { // Don't GC while deserializing - just expand the heap. AlwaysAllocateScope always_allocate; // Don't use the free lists while deserializing. @@ -1878,9 +577,9 @@ void Deserializer2::Deserialize() { ASSERT_EQ(NULL, ThreadState::FirstInUse()); // No active handles. ASSERT(HandleScopeImplementer::instance()->blocks()->is_empty()); - ASSERT(external_reference_decoder_ == NULL); + ASSERT_EQ(NULL, external_reference_decoder_); external_reference_decoder_ = new ExternalReferenceDecoder(); - Heap::IterateRoots(this); + Heap::IterateRoots(this, VISIT_ONLY_STRONG); ASSERT(source_->AtEOF()); delete external_reference_decoder_; external_reference_decoder_ = NULL; @@ -1888,20 +587,11 @@ void Deserializer2::Deserialize() { // This is called on the roots. It is the driver of the deserialization -// process. -void Deserializer2::VisitPointers(Object** start, Object** end) { - for (Object** current = start; current < end; current++) { - DataType data = static_cast<DataType>(source_->Get()); - if (data == SMI_SERIALIZATION) { - *current = Smi::FromInt(source_->GetInt() - kSmiBias); - } else if (data == BACKREF_SERIALIZATION) { - int space = source_->Get(); - *current = GetAddress(space); - } else { - ASSERT(data == OBJECT_SERIALIZATION); - ReadObject(current); - } - } +// process. It is also called on the body of each function. +void Deserializer::VisitPointers(Object** start, Object** end) { + // The space must be new space. Any other space would cause ReadChunk to try + // to update the remembered using NULL as the address. + ReadChunk(start, end, NEW_SPACE, NULL); } @@ -1911,42 +601,106 @@ void Deserializer2::VisitPointers(Object** start, Object** end) { // written very late, which means the ByteArray map is not set up by the // time we need to use it to mark the space at the end of a page free (by // making it into a byte array). -bool Deserializer2::ReadObject(Object** write_back) { - int space = source_->Get(); +void Deserializer::ReadObject(int space_number, + Space* space, + Object** write_back) { int size = source_->GetInt() << kObjectAlignmentBits; - Address address = Allocate(space, size); + Address address = Allocate(space_number, space, size); *write_back = HeapObject::FromAddress(address); Object** current = reinterpret_cast<Object**>(address); Object** limit = current + (size >> kPointerSizeLog2); + ReadChunk(current, limit, space_number, address); +} + + +#define ONE_CASE_PER_SPACE(base_tag) \ + case (base_tag) + NEW_SPACE: /* NOLINT */ \ + case (base_tag) + OLD_POINTER_SPACE: /* NOLINT */ \ + case (base_tag) + OLD_DATA_SPACE: /* NOLINT */ \ + case (base_tag) + CODE_SPACE: /* NOLINT */ \ + case (base_tag) + MAP_SPACE: /* NOLINT */ \ + case (base_tag) + CELL_SPACE: /* NOLINT */ \ + case (base_tag) + kLargeData: /* NOLINT */ \ + case (base_tag) + kLargeCode: /* NOLINT */ \ + case (base_tag) + kLargeFixedArray: /* NOLINT */ + + +void Deserializer::ReadChunk(Object** current, + Object** limit, + int space, + Address address) { while (current < limit) { - DataType data = static_cast<DataType>(source_->Get()); + int data = source_->Get(); switch (data) { - case SMI_SERIALIZATION: - *current++ = Smi::FromInt(source_->GetInt() - kSmiBias); - break; +#define RAW_CASE(index, size) \ + case RAW_DATA_SERIALIZATION + index: { \ + byte* raw_data_out = reinterpret_cast<byte*>(current); \ + source_->CopyRaw(raw_data_out, size); \ + current = reinterpret_cast<Object**>(raw_data_out + size); \ + break; \ + } + COMMON_RAW_LENGTHS(RAW_CASE) +#undef RAW_CASE case RAW_DATA_SERIALIZATION: { int size = source_->GetInt(); byte* raw_data_out = reinterpret_cast<byte*>(current); - for (int j = 0; j < size; j++) { - *raw_data_out++ = source_->Get(); - } - current = reinterpret_cast<Object**>(raw_data_out); + source_->CopyRaw(raw_data_out, size); + current = reinterpret_cast<Object**>(raw_data_out + size); break; } - case OBJECT_SERIALIZATION: { - // Recurse to unpack an object that is forward-referenced from here. - bool in_new_space = ReadObject(current); - if (in_new_space && space != NEW_SPACE) { - Heap::RecordWrite(address, - reinterpret_cast<Address>(current) - address); + case OBJECT_SERIALIZATION + NEW_SPACE: { + ReadObject(NEW_SPACE, Heap::new_space(), current); + if (space != NEW_SPACE) { + Heap::RecordWrite(address, static_cast<int>( + reinterpret_cast<Address>(current) - address)); } current++; break; } - case CODE_OBJECT_SERIALIZATION: { + case OBJECT_SERIALIZATION + OLD_DATA_SPACE: + ReadObject(OLD_DATA_SPACE, Heap::old_data_space(), current++); + break; + case OBJECT_SERIALIZATION + OLD_POINTER_SPACE: + ReadObject(OLD_POINTER_SPACE, Heap::old_pointer_space(), current++); + break; + case OBJECT_SERIALIZATION + MAP_SPACE: + ReadObject(MAP_SPACE, Heap::map_space(), current++); + break; + case OBJECT_SERIALIZATION + CODE_SPACE: + ReadObject(CODE_SPACE, Heap::code_space(), current++); + Logger::LogCodeObject(current[-1]); + break; + case OBJECT_SERIALIZATION + CELL_SPACE: + ReadObject(CELL_SPACE, Heap::cell_space(), current++); + break; + case OBJECT_SERIALIZATION + kLargeData: + ReadObject(kLargeData, Heap::lo_space(), current++); + break; + case OBJECT_SERIALIZATION + kLargeCode: + ReadObject(kLargeCode, Heap::lo_space(), current++); + Logger::LogCodeObject(current[-1]); + break; + case OBJECT_SERIALIZATION + kLargeFixedArray: + ReadObject(kLargeFixedArray, Heap::lo_space(), current++); + break; + case CODE_OBJECT_SERIALIZATION + kLargeCode: { + Object* new_code_object = NULL; + ReadObject(kLargeCode, Heap::lo_space(), &new_code_object); + Code* code_object = reinterpret_cast<Code*>(new_code_object); + Logger::LogCodeObject(code_object); + // Setting a branch/call to another code object from code. + Address location_of_branch_data = reinterpret_cast<Address>(current); + Assembler::set_target_at(location_of_branch_data, + code_object->instruction_start()); + location_of_branch_data += Assembler::kCallTargetSize; + current = reinterpret_cast<Object**>(location_of_branch_data); + break; + } + case CODE_OBJECT_SERIALIZATION + CODE_SPACE: { Object* new_code_object = NULL; - ReadObject(&new_code_object); + ReadObject(CODE_SPACE, Heap::code_space(), &new_code_object); Code* code_object = reinterpret_cast<Code*>(new_code_object); + Logger::LogCodeObject(code_object); // Setting a branch/call to another code object from code. Address location_of_branch_data = reinterpret_cast<Address>(current); Assembler::set_target_at(location_of_branch_data, @@ -1955,21 +709,42 @@ bool Deserializer2::ReadObject(Object** write_back) { current = reinterpret_cast<Object**>(location_of_branch_data); break; } - case BACKREF_SERIALIZATION: { + ONE_CASE_PER_SPACE(BACKREF_SERIALIZATION) { // Write a backreference to an object we unpacked earlier. - int backref_space = source_->Get(); + int backref_space = (data & kSpaceMask); if (backref_space == NEW_SPACE && space != NEW_SPACE) { - Heap::RecordWrite(address, - reinterpret_cast<Address>(current) - address); + Heap::RecordWrite(address, static_cast<int>( + reinterpret_cast<Address>(current) - address)); + } + *current++ = GetAddressFromEnd(backref_space); + break; + } + ONE_CASE_PER_SPACE(REFERENCE_SERIALIZATION) { + // Write a reference to an object we unpacked earlier. + int reference_space = (data & kSpaceMask); + if (reference_space == NEW_SPACE && space != NEW_SPACE) { + Heap::RecordWrite(address, static_cast<int>( + reinterpret_cast<Address>(current) - address)); } - *current++ = GetAddress(backref_space); + *current++ = GetAddressFromStart(reference_space); break; } - case CODE_BACKREF_SERIALIZATION: { - int backref_space = source_->Get(); +#define COMMON_REFS_CASE(index, reference_space, address) \ + case REFERENCE_SERIALIZATION + index: { \ + ASSERT(SpaceIsPaged(reference_space)); \ + Address object_address = \ + pages_[reference_space][0] + (address << kObjectAlignmentBits); \ + *current++ = HeapObject::FromAddress(object_address); \ + break; \ + } + COMMON_REFERENCE_PATTERNS(COMMON_REFS_CASE) +#undef COMMON_REFS_CASE + ONE_CASE_PER_SPACE(CODE_BACKREF_SERIALIZATION) { + int backref_space = (data & kSpaceMask); // Can't use Code::cast because heap is not set up yet and assertions // will fail. - Code* code_object = reinterpret_cast<Code*>(GetAddress(backref_space)); + Code* code_object = + reinterpret_cast<Code*>(GetAddressFromEnd(backref_space)); // Setting a branch/call to previously decoded code object from code. Address location_of_branch_data = reinterpret_cast<Address>(current); Assembler::set_target_at(location_of_branch_data, @@ -1977,39 +752,74 @@ bool Deserializer2::ReadObject(Object** write_back) { location_of_branch_data += Assembler::kCallTargetSize; current = reinterpret_cast<Object**>(location_of_branch_data); break; - } + } + ONE_CASE_PER_SPACE(CODE_REFERENCE_SERIALIZATION) { + int backref_space = (data & kSpaceMask); + // Can't use Code::cast because heap is not set up yet and assertions + // will fail. + Code* code_object = + reinterpret_cast<Code*>(GetAddressFromStart(backref_space)); + // Setting a branch/call to previously decoded code object from code. + Address location_of_branch_data = reinterpret_cast<Address>(current); + Assembler::set_target_at(location_of_branch_data, + code_object->instruction_start()); + location_of_branch_data += Assembler::kCallTargetSize; + current = reinterpret_cast<Object**>(location_of_branch_data); + break; + } case EXTERNAL_REFERENCE_SERIALIZATION: { int reference_id = source_->GetInt(); Address address = external_reference_decoder_->Decode(reference_id); *current++ = reinterpret_cast<Object*>(address); break; } + case EXTERNAL_BRANCH_TARGET_SERIALIZATION: { + int reference_id = source_->GetInt(); + Address address = external_reference_decoder_->Decode(reference_id); + Address location_of_branch_data = reinterpret_cast<Address>(current); + Assembler::set_external_target_at(location_of_branch_data, address); + location_of_branch_data += Assembler::kExternalTargetSize; + current = reinterpret_cast<Object**>(location_of_branch_data); + break; + } + case START_NEW_PAGE_SERIALIZATION: { + int space = source_->Get(); + pages_[space].Add(last_object_address_); + break; + } + case NATIVES_STRING_RESOURCE: { + int index = source_->Get(); + Vector<const char> source_vector = Natives::GetScriptSource(index); + NativesExternalStringResource* resource = + new NativesExternalStringResource(source_vector.start()); + *current++ = reinterpret_cast<Object*>(resource); + break; + } default: UNREACHABLE(); } } - ASSERT(current == limit); - return space == NEW_SPACE; + ASSERT_EQ(current, limit); } void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) { const int max_shift = ((kPointerSize * kBitsPerByte) / 7) * 7; for (int shift = max_shift; shift > 0; shift -= 7) { - if (integer >= 1u << shift) { - Put(((integer >> shift) & 0x7f) | 0x80, "intpart"); + if (integer >= static_cast<uintptr_t>(1u) << shift) { + Put(((integer >> shift) & 0x7f) | 0x80, "IntPart"); } } - Put(integer & 0x7f, "intlastpart"); + PutSection(integer & 0x7f, "IntLastPart"); } #ifdef DEBUG -void Deserializer2::Synchronize(const char* tag) { +void Deserializer::Synchronize(const char* tag) { int data = source_->Get(); // If this assert fails then that indicates that you have a mismatch between // the number of GC roots when serializing and deserializing. - ASSERT(data == SYNCHRONIZE); + ASSERT_EQ(SYNCHRONIZE, data); do { int character = source_->Get(); if (character == 0) break; @@ -2023,18 +833,18 @@ void Deserializer2::Synchronize(const char* tag) { } -void Serializer2::Synchronize(const char* tag) { +void Serializer::Synchronize(const char* tag) { sink_->Put(SYNCHRONIZE, tag); int character; do { character = *tag++; - sink_->Put(character, "tagcharacter"); + sink_->PutSection(character, "TagCharacter"); } while (character != 0); } #endif -Serializer2::Serializer2(SnapshotByteSink* sink) +Serializer::Serializer(SnapshotByteSink* sink) : sink_(sink), current_root_index_(0), external_reference_encoder_(NULL) { @@ -2044,123 +854,183 @@ Serializer2::Serializer2(SnapshotByteSink* sink) } -void Serializer2::Serialize() { +void Serializer::Serialize() { // No active threads. CHECK_EQ(NULL, ThreadState::FirstInUse()); // No active or weak handles. CHECK(HandleScopeImplementer::instance()->blocks()->is_empty()); CHECK_EQ(0, GlobalHandles::NumberOfWeakHandles()); - ASSERT(external_reference_encoder_ == NULL); + CHECK_EQ(NULL, external_reference_encoder_); + // We don't support serializing installed extensions. + for (RegisteredExtension* ext = RegisteredExtension::first_extension(); + ext != NULL; + ext = ext->next()) { + CHECK_NE(v8::INSTALLED, ext->state()); + } external_reference_encoder_ = new ExternalReferenceEncoder(); - Heap::IterateRoots(this); + Heap::IterateRoots(this, VISIT_ONLY_STRONG); delete external_reference_encoder_; external_reference_encoder_ = NULL; } -void Serializer2::VisitPointers(Object** start, Object** end) { +void Serializer::VisitPointers(Object** start, Object** end) { for (Object** current = start; current < end; current++) { - SerializeObject(*current, TAGGED_REPRESENTATION); + if ((*current)->IsSmi()) { + sink_->Put(RAW_DATA_SERIALIZATION, "RawData"); + sink_->PutInt(kPointerSize, "length"); + for (int i = 0; i < kPointerSize; i++) { + sink_->Put(reinterpret_cast<byte*>(current)[i], "Byte"); + } + } else { + SerializeObject(*current, TAGGED_REPRESENTATION); + } } } -void Serializer2::SerializeObject( +void Serializer::SerializeObject( Object* o, ReferenceRepresentation reference_representation) { - if (o->IsHeapObject()) { - HeapObject* heap_object = HeapObject::cast(o); - MapWord map_word = heap_object->map_word(); - if (map_word.IsSerializationAddress()) { - int space = SpaceOfAlreadySerializedObject(heap_object); - int offset = - CurrentAllocationAddress(space) - map_word.ToSerializationAddress(); - // If we are actually dealing with real offsets (and not a numbering of - // all objects) then we should shift out the bits that are always 0. - if (!SpaceIsLarge(space)) offset >>= kObjectAlignmentBits; - if (reference_representation == CODE_TARGET_REPRESENTATION) { - sink_->Put(CODE_BACKREF_SERIALIZATION, "BackRefCodeSerialization"); + CHECK(o->IsHeapObject()); + HeapObject* heap_object = HeapObject::cast(o); + MapWord map_word = heap_object->map_word(); + if (map_word.IsSerializationAddress()) { + int space = SpaceOfAlreadySerializedObject(heap_object); + int address = map_word.ToSerializationAddress(); + int offset = CurrentAllocationAddress(space) - address; + bool from_start = true; + if (SpaceIsPaged(space)) { + if ((CurrentAllocationAddress(space) >> Page::kPageSizeBits) == + (address >> Page::kPageSizeBits)) { + from_start = false; + address = offset; + } + } else if (space == NEW_SPACE) { + if (offset < address) { + from_start = false; + address = offset; + } + } + // If we are actually dealing with real offsets (and not a numbering of + // all objects) then we should shift out the bits that are always 0. + if (!SpaceIsLarge(space)) address >>= kObjectAlignmentBits; + if (reference_representation == CODE_TARGET_REPRESENTATION) { + if (from_start) { + sink_->Put(CODE_REFERENCE_SERIALIZATION + space, "RefCodeSer"); + sink_->PutInt(address, "address"); } else { - ASSERT(reference_representation == TAGGED_REPRESENTATION); - sink_->Put(BACKREF_SERIALIZATION, "BackRefSerialization"); + sink_->Put(CODE_BACKREF_SERIALIZATION + space, "BackRefCodeSer"); + sink_->PutInt(address, "address"); } - sink_->Put(space, "space"); - sink_->PutInt(offset, "offset"); } else { - // Object has not yet been serialized. Serialize it here. - ObjectSerializer serializer(this, - heap_object, - sink_, - reference_representation); - serializer.Serialize(); + CHECK_EQ(TAGGED_REPRESENTATION, reference_representation); + if (from_start) { +#define COMMON_REFS_CASE(tag, common_space, common_offset) \ + if (space == common_space && address == common_offset) { \ + sink_->PutSection(tag + REFERENCE_SERIALIZATION, "RefSer"); \ + } else /* NOLINT */ + COMMON_REFERENCE_PATTERNS(COMMON_REFS_CASE) +#undef COMMON_REFS_CASE + { /* NOLINT */ + sink_->Put(REFERENCE_SERIALIZATION + space, "RefSer"); + sink_->PutInt(address, "address"); + } + } else { + sink_->Put(BACKREF_SERIALIZATION + space, "BackRefSer"); + sink_->PutInt(address, "address"); + } } } else { - // Serialize a Smi. - unsigned int value = Smi::cast(o)->value() + kSmiBias; - sink_->Put(SMI_SERIALIZATION, "SmiSerialization"); - sink_->PutInt(value, "smi"); + // Object has not yet been serialized. Serialize it here. + ObjectSerializer serializer(this, + heap_object, + sink_, + reference_representation); + serializer.Serialize(); } } -void Serializer2::ObjectSerializer::Serialize() { - int space = Serializer2::SpaceOfObject(object_); + +void Serializer::ObjectSerializer::Serialize() { + int space = Serializer::SpaceOfObject(object_); int size = object_->Size(); if (reference_representation_ == TAGGED_REPRESENTATION) { - sink_->Put(OBJECT_SERIALIZATION, "ObjectSerialization"); + sink_->Put(OBJECT_SERIALIZATION + space, "ObjectSerialization"); } else { - ASSERT(reference_representation_ == CODE_TARGET_REPRESENTATION); - sink_->Put(CODE_OBJECT_SERIALIZATION, "ObjectSerialization"); + CHECK_EQ(CODE_TARGET_REPRESENTATION, reference_representation_); + sink_->Put(CODE_OBJECT_SERIALIZATION + space, "ObjectSerialization"); } - sink_->Put(space, "space"); sink_->PutInt(size >> kObjectAlignmentBits, "Size in words"); // Get the map before overwriting it. Map* map = object_->map(); // Mark this object as already serialized. - object_->set_map_word( - MapWord::FromSerializationAddress(serializer_->Allocate(space, size))); + bool start_new_page; + object_->set_map_word(MapWord::FromSerializationAddress( + serializer_->Allocate(space, size, &start_new_page))); + if (start_new_page) { + sink_->Put(START_NEW_PAGE_SERIALIZATION, "NewPage"); + sink_->PutSection(space, "NewPageSpace"); + } // Serialize the map (first word of the object). serializer_->SerializeObject(map, TAGGED_REPRESENTATION); // Serialize the rest of the object. - ASSERT(bytes_processed_so_far_ == 0); + CHECK_EQ(0, bytes_processed_so_far_); bytes_processed_so_far_ = kPointerSize; object_->IterateBody(map->instance_type(), size, this); OutputRawData(object_->address() + size); } -void Serializer2::ObjectSerializer::VisitPointers(Object** start, - Object** end) { - Address pointers_start = reinterpret_cast<Address>(start); - OutputRawData(pointers_start); +void Serializer::ObjectSerializer::VisitPointers(Object** start, + Object** end) { + Object** current = start; + while (current < end) { + while (current < end && (*current)->IsSmi()) current++; + if (current < end) OutputRawData(reinterpret_cast<Address>(current)); - for (Object** current = start; current < end; current++) { - serializer_->SerializeObject(*current, TAGGED_REPRESENTATION); + while (current < end && !(*current)->IsSmi()) { + serializer_->SerializeObject(*current, TAGGED_REPRESENTATION); + bytes_processed_so_far_ += kPointerSize; + current++; + } } - bytes_processed_so_far_ += (end - start) * kPointerSize; } -void Serializer2::ObjectSerializer::VisitExternalReferences(Address* start, - Address* end) { +void Serializer::ObjectSerializer::VisitExternalReferences(Address* start, + Address* end) { Address references_start = reinterpret_cast<Address>(start); OutputRawData(references_start); for (Address* current = start; current < end; current++) { - sink_->Put(EXTERNAL_REFERENCE_SERIALIZATION, "External reference"); + sink_->Put(EXTERNAL_REFERENCE_SERIALIZATION, "ExternalReference"); int reference_id = serializer_->EncodeExternalReference(*current); sink_->PutInt(reference_id, "reference id"); } - bytes_processed_so_far_ += (end - start) * kPointerSize; + bytes_processed_so_far_ += static_cast<int>((end - start) * kPointerSize); } -void Serializer2::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) { - ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode())); +void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) { + Address target_start = rinfo->target_address_address(); + OutputRawData(target_start); + Address target = rinfo->target_address(); + uint32_t encoding = serializer_->EncodeExternalReference(target); + CHECK(target == NULL ? encoding == 0 : encoding != 0); + sink_->Put(EXTERNAL_BRANCH_TARGET_SERIALIZATION, "ExternalReference"); + sink_->PutInt(encoding, "reference id"); + bytes_processed_so_far_ += Assembler::kExternalTargetSize; +} + + +void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) { + CHECK(RelocInfo::IsCodeTarget(rinfo->rmode())); Address target_start = rinfo->target_address_address(); OutputRawData(target_start); Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); @@ -2169,26 +1039,62 @@ void Serializer2::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) { } -void Serializer2::ObjectSerializer::OutputRawData(Address up_to) { +void Serializer::ObjectSerializer::VisitExternalAsciiString( + v8::String::ExternalAsciiStringResource** resource_pointer) { + Address references_start = reinterpret_cast<Address>(resource_pointer); + OutputRawData(references_start); + for (int i = 0; i < Natives::GetBuiltinsCount(); i++) { + // Use raw_unchecked when maps are munged. + Object* source = Heap::raw_unchecked_natives_source_cache()->get(i); + if (!source->IsUndefined()) { + // Don't use cast when maps are munged. + ExternalAsciiString* string = + reinterpret_cast<ExternalAsciiString*>(source); + typedef v8::String::ExternalAsciiStringResource Resource; + Resource* resource = string->resource(); + if (resource == *resource_pointer) { + sink_->Put(NATIVES_STRING_RESOURCE, "NativesStringResource"); + sink_->PutSection(i, "NativesStringResourceEnd"); + bytes_processed_so_far_ += sizeof(resource); + return; + } + } + } + // One of the strings in the natives cache should match the resource. We + // can't serialize any other kinds of external strings. + UNREACHABLE(); +} + + +void Serializer::ObjectSerializer::OutputRawData(Address up_to) { Address object_start = object_->address(); - int up_to_offset = up_to - object_start; + int up_to_offset = static_cast<int>(up_to - object_start); int skipped = up_to_offset - bytes_processed_so_far_; // This assert will fail if the reloc info gives us the target_address_address // locations in a non-ascending order. Luckily that doesn't happen. ASSERT(skipped >= 0); if (skipped != 0) { - sink_->Put(RAW_DATA_SERIALIZATION, "raw data"); - sink_->PutInt(skipped, "length"); + Address base = object_start + bytes_processed_so_far_; +#define RAW_CASE(index, length) \ + if (skipped == length) { \ + sink_->PutSection(RAW_DATA_SERIALIZATION + index, "RawDataFixed"); \ + } else /* NOLINT */ + COMMON_RAW_LENGTHS(RAW_CASE) +#undef RAW_CASE + { /* NOLINT */ + sink_->Put(RAW_DATA_SERIALIZATION, "RawData"); + sink_->PutInt(skipped, "length"); + } for (int i = 0; i < skipped; i++) { - unsigned int data = object_start[bytes_processed_so_far_ + i]; - sink_->Put(data, "byte"); + unsigned int data = base[i]; + sink_->PutSection(data, "Byte"); } + bytes_processed_so_far_ += skipped; } - bytes_processed_so_far_ += skipped; } -int Serializer2::SpaceOfObject(HeapObject* object) { +int Serializer::SpaceOfObject(HeapObject* object) { for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) { AllocationSpace s = static_cast<AllocationSpace>(i); if (Heap::InSpace(object, s)) { @@ -2209,7 +1115,7 @@ int Serializer2::SpaceOfObject(HeapObject* object) { } -int Serializer2::SpaceOfAlreadySerializedObject(HeapObject* object) { +int Serializer::SpaceOfAlreadySerializedObject(HeapObject* object) { for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) { AllocationSpace s = static_cast<AllocationSpace>(i); if (Heap::InSpace(object, s)) { @@ -2221,13 +1127,18 @@ int Serializer2::SpaceOfAlreadySerializedObject(HeapObject* object) { } -int Serializer2::Allocate(int space, int size) { - ASSERT(space >= 0 && space < kNumberOfSpaces); +int Serializer::Allocate(int space, int size, bool* new_page) { + CHECK(space >= 0 && space < kNumberOfSpaces); if (SpaceIsLarge(space)) { // In large object space we merely number the objects instead of trying to // determine some sort of address. + *new_page = true; return fullness_[LO_SPACE]++; } + *new_page = false; + if (fullness_[space] == 0) { + *new_page = true; + } if (SpaceIsPaged(space)) { // Paged spaces are a little special. We encode their addresses as if the // pages were all contiguous and each page were filled up in the range @@ -2235,10 +1146,11 @@ int Serializer2::Allocate(int space, int size) { // and allocation does not start at offset 0 in the page, but this scheme // means the deserializer can get the page number quickly by shifting the // serialized address. - ASSERT(IsPowerOf2(Page::kPageSize)); + CHECK(IsPowerOf2(Page::kPageSize)); int used_in_this_page = (fullness_[space] & (Page::kPageSize - 1)); - ASSERT(size <= Page::kObjectAreaSize); + CHECK(size <= Page::kObjectAreaSize); if (used_in_this_page + size > Page::kObjectAreaSize) { + *new_page = true; fullness_[space] = RoundUp(fullness_[space], Page::kPageSize); } } diff --git a/deps/v8/src/serialize.h b/deps/v8/src/serialize.h index cefff78cac..96bd751da9 100644 --- a/deps/v8/src/serialize.h +++ b/deps/v8/src/serialize.h @@ -1,4 +1,4 @@ -// Copyright 2006-2008 the V8 project authors. All rights reserved. +// Copyright 2006-2009 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -108,249 +108,6 @@ class ExternalReferenceDecoder { }; -// A Serializer recursively visits objects to construct a serialized -// representation of the Heap stored in a string. Serialization is -// destructive. We use a similar mechanism to the GC to ensure that -// each object is visited once, namely, we modify the map pointer of -// each visited object to contain the relative address in the -// appropriate space where that object will be allocated when the heap -// is deserialized. - - -// Helper classes defined in serialize.cc. -class RelativeAddress; -class SimulatedHeapSpace; -class SnapshotWriter; -class ReferenceUpdater; - - -class Serializer: public ObjectVisitor { - public: - Serializer(); - - virtual ~Serializer(); - - // Serialize the current state of the heap. This operation destroys the - // heap contents and the contents of the roots into the heap. - void Serialize(); - - // Returns the serialized buffer. Ownership is transferred to the - // caller. Only the destructor and getters may be called after this call. - void Finalize(byte** str, int* len); - - int roots() { return roots_; } - int objects() { return objects_; } - -#ifdef DEBUG - // insert "tag" into the serialized stream - virtual void Synchronize(const char* tag); -#endif - - static bool enabled() { return serialization_enabled_; } - - static void Enable() { serialization_enabled_ = true; } - static void Disable() { serialization_enabled_ = false; } - - private: - friend class ReferenceUpdater; - - virtual void VisitPointers(Object** start, Object** end); - virtual void VisitCodeTarget(RelocInfo* rinfo); - bool IsVisited(HeapObject* obj); - - Address GetSavedAddress(HeapObject* obj); - - void SaveAddress(HeapObject* obj, Address addr); - - void PutEncodedAddress(Address addr); - // Write the global flags into the file. - void PutFlags(); - // Write global information into the header of the file. - void PutHeader(); - // Write the contents of the log into the file. - void PutLog(); - // Serialize 'obj', and return its encoded RelativeAddress. - Address PutObject(HeapObject* obj); - // Write a stack of handles to the file bottom first. - void PutGlobalHandleStack(const List<Handle<Object> >& stack); - // Write the context stack into the file. - void PutContextStack(); - - // Return the encoded RelativeAddress where this object will be - // allocated on deserialization. On the first visit of 'o', - // serialize its contents. On return, *serialized will be true iff - // 'o' has just been serialized. - Address Encode(Object* o, bool* serialized); - - // Simulate the allocation of 'obj', returning the address where it will - // be allocated on deserialization - RelativeAddress Allocate(HeapObject* obj); - - void InitializeAllocators(); - - SnapshotWriter* writer_; - bool root_; // serializing a root? - int roots_; // number of roots visited - int objects_; // number of objects serialized - - static bool serialization_enabled_; - - int flags_end_; // The position right after the flags. - - // An array of per-space SimulatedHeapSpaces used as memory allocators. - SimulatedHeapSpace* allocator_[LAST_SPACE+1]; - // A list of global handles at serialization time. - List<Object**> global_handles_; - - ExternalReferenceEncoder* reference_encoder_; - - HashMap saved_addresses_; - - DISALLOW_COPY_AND_ASSIGN(Serializer); -}; - -// Helper class to read the bytes of the serialized heap. - -class SnapshotReader { - public: - SnapshotReader(const byte* str, int len): str_(str), end_(str + len) {} - - void ExpectC(char expected) { - int c = GetC(); - USE(c); - ASSERT(c == expected); - } - - int GetC() { - if (str_ >= end_) return EOF; - return *str_++; - } - - int GetInt() { - int result; - GetBytes(reinterpret_cast<Address>(&result), sizeof(result)); - return result; - } - - Address GetAddress() { - Address result; - GetBytes(reinterpret_cast<Address>(&result), sizeof(result)); - return result; - } - - void GetBytes(Address a, int size) { - ASSERT(str_ + size <= end_); - memcpy(a, str_, size); - str_ += size; - } - - char* GetString() { - ExpectC('['); - int size = GetInt(); - ExpectC(']'); - char* s = NewArray<char>(size + 1); - GetBytes(reinterpret_cast<Address>(s), size); - s[size] = 0; - return s; - } - - private: - const byte* str_; - const byte* end_; -}; - - -// A Deserializer reads a snapshot and reconstructs the Object graph it defines. - - -// TODO(erikcorry): Get rid of this superclass when we are using the new -// snapshot code exclusively. -class GenericDeserializer: public ObjectVisitor { - public: - virtual void GetLog() = 0; - virtual void Deserialize() = 0; -}; - - -// TODO(erikcorry): Get rid of this class. -class Deserializer: public GenericDeserializer { - public: - // Create a deserializer. The snapshot is held in str and has size len. - Deserializer(const byte* str, int len); - - virtual ~Deserializer(); - - // Read the flags from the header of the file, and set those that - // should be inherited from the snapshot. - void GetFlags(); - - // Read saved profiling information from the file and log it if required. - void GetLog(); - - // Deserialize the snapshot into an empty heap. - void Deserialize(); - - int roots() { return roots_; } - int objects() { return objects_; } - -#ifdef DEBUG - // Check for the presence of "tag" in the serialized stream - virtual void Synchronize(const char* tag); -#endif - - private: - virtual void VisitPointers(Object** start, Object** end); - virtual void VisitCodeTarget(RelocInfo* rinfo); - virtual void VisitExternalReferences(Address* start, Address* end); - virtual void VisitRuntimeEntry(RelocInfo* rinfo); - - Address GetEncodedAddress(); - - // Read other global information (except flags) from the header of the file. - void GetHeader(); - // Read a stack of handles from the file bottom first. - void GetGlobalHandleStack(List<Handle<Object> >* stack); - // Read the context stack from the file. - void GetContextStack(); - - Object* GetObject(); - - // Get the encoded address. In debug mode we make sure - // it matches the given expectations. - void ExpectEncodedAddress(Address expected); - - // Given an encoded address (the result of - // RelativeAddress::Encode), return the object to which it points, - // which will be either an Smi or a HeapObject in the current heap. - Object* Resolve(Address encoded_address); - - SnapshotReader reader_; - bool root_; // Deserializing a root? - int roots_; // number of roots visited - int objects_; // number of objects serialized - - bool has_log_; // The file has log information. - - // Resolve caches the following: - List<Page*> map_pages_; // All pages in the map space. - List<Page*> cell_pages_; // All pages in the cell space. - List<Page*> old_pointer_pages_; // All pages in the old pointer space. - List<Page*> old_data_pages_; // All pages in the old data space. - List<Page*> code_pages_; // All pages in the code space. - List<Object*> large_objects_; // All known large objects. - // A list of global handles at deserialization time. - List<Object**> global_handles_; - - ExternalReferenceDecoder* reference_decoder_; - -#ifdef DEBUG - bool expect_debug_information_; -#endif - - DISALLOW_COPY_AND_ASSIGN(Deserializer); -}; - - class SnapshotByteSource { public: SnapshotByteSource(const byte* array, int length) @@ -363,13 +120,18 @@ class SnapshotByteSource { return data_[position_++]; } + void CopyRaw(byte* to, int number_of_bytes) { + memcpy(to, data_ + position_, number_of_bytes); + position_ += number_of_bytes; + } + int GetInt() { // A little unwind to catch the really small ints. int snapshot_byte = Get(); if ((snapshot_byte & 0x80) == 0) { return snapshot_byte; } - uintptr_t accumulator = (snapshot_byte & 0x7f) << 7; + int accumulator = (snapshot_byte & 0x7f) << 7; while (true) { snapshot_byte = Get(); if ((snapshot_byte & 0x80) == 0) { @@ -392,30 +154,72 @@ class SnapshotByteSource { }; -// The SerDes class is a common superclass for Serializer2 and Deserializer2 +// It is very common to have a reference to the object at word 10 in space 2, +// the object at word 5 in space 2 and the object at word 28 in space 4. This +// only works for objects in the first page of a space. +#define COMMON_REFERENCE_PATTERNS(f) \ + f(kNumberOfSpaces, 2, 10) \ + f(kNumberOfSpaces + 1, 2, 5) \ + f(kNumberOfSpaces + 2, 4, 28) \ + f(kNumberOfSpaces + 3, 2, 21) \ + f(kNumberOfSpaces + 4, 2, 98) \ + f(kNumberOfSpaces + 5, 2, 67) \ + f(kNumberOfSpaces + 6, 4, 132) + +#define COMMON_RAW_LENGTHS(f) \ + f(1, 1) \ + f(2, 2) \ + f(3, 3) \ + f(4, 4) \ + f(5, 5) \ + f(6, 6) \ + f(7, 7) \ + f(8, 8) \ + f(9, 12) \ + f(10, 16) \ + f(11, 20) \ + f(12, 24) \ + f(13, 28) \ + f(14, 32) \ + f(15, 36) + +// The SerDes class is a common superclass for Serializer and Deserializer // which is used to store common constants and methods used by both. -// TODO(erikcorry): This should inherit from ObjectVisitor. -class SerDes: public GenericDeserializer { +class SerDes: public ObjectVisitor { protected: enum DataType { - SMI_SERIALIZATION, - RAW_DATA_SERIALIZATION, - OBJECT_SERIALIZATION, - CODE_OBJECT_SERIALIZATION, - BACKREF_SERIALIZATION, - CODE_BACKREF_SERIALIZATION, - EXTERNAL_REFERENCE_SERIALIZATION, - SYNCHRONIZE + RAW_DATA_SERIALIZATION = 0, + // And 15 common raw lengths. + OBJECT_SERIALIZATION = 16, + // One variant per space. + CODE_OBJECT_SERIALIZATION = 25, + // One per space (only code spaces in use). + EXTERNAL_REFERENCE_SERIALIZATION = 34, + EXTERNAL_BRANCH_TARGET_SERIALIZATION = 35, + SYNCHRONIZE = 36, + START_NEW_PAGE_SERIALIZATION = 37, + NATIVES_STRING_RESOURCE = 38, + // Free: 39-47. + BACKREF_SERIALIZATION = 48, + // One per space, must be kSpaceMask aligned. + // Free: 57-63. + REFERENCE_SERIALIZATION = 64, + // One per space and common references. Must be kSpaceMask aligned. + CODE_BACKREF_SERIALIZATION = 80, + // One per space, must be kSpaceMask aligned. + // Free: 89-95. + CODE_REFERENCE_SERIALIZATION = 96 + // One per space, must be kSpaceMask aligned. + // Free: 105-255. }; - // Our Smi encoding is much more efficient for small positive integers than it - // is for negative numbers so we add a bias before encoding and subtract it - // after encoding so that popular small negative Smis are efficiently encoded. - static const int kSmiBias = 16; static const int kLargeData = LAST_SPACE; static const int kLargeCode = kLargeData + 1; static const int kLargeFixedArray = kLargeCode + 1; static const int kNumberOfSpaces = kLargeFixedArray + 1; + // A bitmask for getting the space out of an instruction. + static const int kSpaceMask = 15; + static inline bool SpaceIsLarge(int space) { return space >= kLargeData; } static inline bool SpaceIsPaged(int space) { return space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE; @@ -425,16 +229,15 @@ class SerDes: public GenericDeserializer { // A Deserializer reads a snapshot and reconstructs the Object graph it defines. -class Deserializer2: public SerDes { +class Deserializer: public SerDes { public: // Create a deserializer from a snapshot byte source. - explicit Deserializer2(SnapshotByteSource* source); + explicit Deserializer(SnapshotByteSource* source); - virtual ~Deserializer2() { } + virtual ~Deserializer() { } // Deserialize the snapshot into an empty heap. void Deserialize(); - void GetLog() { } // TODO(erikcorry): Get rid of this. #ifdef DEBUG virtual void Synchronize(const char* tag); #endif @@ -450,17 +253,11 @@ class Deserializer2: public SerDes { UNREACHABLE(); } - int CurrentAllocationAddress(int space) { - // The three different kinds of large objects have different tags in the - // snapshot so the deserializer knows which kind of object to allocate, - // but they share a fullness_ entry. - if (SpaceIsLarge(space)) space = LO_SPACE; - return fullness_[space]; - } - - HeapObject* GetAddress(int space); - Address Allocate(int space, int size); - bool ReadObject(Object** write_back); + void ReadChunk(Object** start, Object** end, int space, Address address); + HeapObject* GetAddressFromStart(int space); + inline HeapObject* GetAddressFromEnd(int space); + Address Allocate(int space_number, Space* space, int size); + void ReadObject(int space_number, Space* space, Object** write_back); // Keep track of the pages in the paged spaces. // (In large object space we are keeping track of individual objects @@ -470,13 +267,15 @@ class Deserializer2: public SerDes { SnapshotByteSource* source_; ExternalReferenceDecoder* external_reference_decoder_; - // Keep track of the fullness of each space in order to generate - // relative addresses for back references. Large objects are - // just numbered sequentially since relative addresses make no - // sense in large object space. - int fullness_[LAST_SPACE + 1]; + // This is the address of the next object that will be allocated in each + // space. It is used to calculate the addresses of back-references. + Address high_water_[LAST_SPACE + 1]; + // This is the address of the most recent object that was allocated. It + // is used to set the location of the new page when we encounter a + // START_NEW_PAGE_SERIALIZATION tag. + Address last_object_address_; - DISALLOW_COPY_AND_ASSIGN(Deserializer2); + DISALLOW_COPY_AND_ASSIGN(Deserializer); }; @@ -484,19 +283,33 @@ class SnapshotByteSink { public: virtual ~SnapshotByteSink() { } virtual void Put(int byte, const char* description) = 0; + virtual void PutSection(int byte, const char* description) { + Put(byte, description); + } void PutInt(uintptr_t integer, const char* description); }; -class Serializer2 : public SerDes { +class Serializer : public SerDes { public: - explicit Serializer2(SnapshotByteSink* sink); + explicit Serializer(SnapshotByteSink* sink); // Serialize the current state of the heap. This operation destroys the // heap contents. void Serialize(); void VisitPointers(Object** start, Object** end); - void GetLog() { } // TODO(erikcorry): Get rid of this. - void Deserialize() { } // TODO(erikcorry): Get rid of this. + + static void Enable() { + if (!serialization_enabled_) { + ASSERT(!too_late_to_enable_now_); + } + serialization_enabled_ = true; + } + + static void Disable() { serialization_enabled_ = false; } + // Call this when you have made use of the fact that there is no serialization + // going on. + static void TooLateToEnableNow() { too_late_to_enable_now_ = true; } + static bool enabled() { return serialization_enabled_; } #ifdef DEBUG virtual void Synchronize(const char* tag); #endif @@ -508,7 +321,7 @@ class Serializer2 : public SerDes { }; class ObjectSerializer : public ObjectVisitor { public: - ObjectSerializer(Serializer2* serializer, + ObjectSerializer(Serializer* serializer, Object* o, SnapshotByteSink* sink, ReferenceRepresentation representation) @@ -521,11 +334,20 @@ class Serializer2 : public SerDes { void VisitPointers(Object** start, Object** end); void VisitExternalReferences(Address* start, Address* end); void VisitCodeTarget(RelocInfo* target); + void VisitRuntimeEntry(RelocInfo* reloc); + // Used for seralizing the external strings that hold the natives source. + void VisitExternalAsciiString( + v8::String::ExternalAsciiStringResource** resource); + // We can't serialize a heap with external two byte strings. + void VisitExternalTwoByteString( + v8::String::ExternalStringResource** resource) { + UNREACHABLE(); + } private: void OutputRawData(Address up_to); - Serializer2* serializer_; + Serializer* serializer_; HeapObject* object_; SnapshotByteSink* sink_; ReferenceRepresentation reference_representation_; @@ -543,7 +365,7 @@ class Serializer2 : public SerDes { // for all large objects since you can't check the type of the object // once the map has been used for the serialization address. static int SpaceOfAlreadySerializedObject(HeapObject* object); - int Allocate(int space, int size); + int Allocate(int space, int size, bool* new_page_started); int CurrentAllocationAddress(int space) { if (SpaceIsLarge(space)) space = LO_SPACE; return fullness_[space]; @@ -560,11 +382,14 @@ class Serializer2 : public SerDes { SnapshotByteSink* sink_; int current_root_index_; ExternalReferenceEncoder* external_reference_encoder_; + static bool serialization_enabled_; + // Did we already make use of the fact that serialization was not enabled? + static bool too_late_to_enable_now_; friend class ObjectSerializer; - friend class Deserializer2; + friend class Deserializer; - DISALLOW_COPY_AND_ASSIGN(Serializer2); + DISALLOW_COPY_AND_ASSIGN(Serializer); }; } } // namespace v8::internal diff --git a/deps/v8/src/simulator.h b/deps/v8/src/simulator.h new file mode 100644 index 0000000000..6f8cd5a282 --- /dev/null +++ b/deps/v8/src/simulator.h @@ -0,0 +1,41 @@ +// Copyright 2009 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_SIMULATOR_H_ +#define V8_SIMULATOR_H_ + +#if V8_TARGET_ARCH_IA32 +#include "ia32/simulator-ia32.h" +#elif V8_TARGET_ARCH_X64 +#include "x64/simulator-x64.h" +#elif V8_TARGET_ARCH_ARM +#include "arm/simulator-arm.h" +#else +#error Unsupported target architecture. +#endif + +#endif // V8_SIMULATOR_H_ diff --git a/deps/v8/src/snapshot-common.cc b/deps/v8/src/snapshot-common.cc index b258a15c01..c01baad79a 100644 --- a/deps/v8/src/snapshot-common.cc +++ b/deps/v8/src/snapshot-common.cc @@ -38,15 +38,8 @@ namespace v8 { namespace internal { bool Snapshot::Deserialize(const byte* content, int len) { - Deserializer des(content, len); - des.GetFlags(); - return V8::Initialize(&des); -} - - -bool Snapshot::Deserialize2(const byte* content, int len) { SnapshotByteSource source(content, len); - Deserializer2 deserializer(&source); + Deserializer deserializer(&source); return V8::Initialize(&deserializer); } @@ -56,44 +49,17 @@ bool Snapshot::Initialize(const char* snapshot_file) { int len; byte* str = ReadBytes(snapshot_file, &len); if (!str) return false; - bool result = Deserialize(str, len); + Deserialize(str, len); DeleteArray(str); - return result; + return true; } else if (size_ > 0) { - return Deserialize(data_, size_); + Deserialize(data_, size_); + return true; } return false; } -bool Snapshot::Initialize2(const char* snapshot_file) { - if (snapshot_file) { - int len; - byte* str = ReadBytes(snapshot_file, &len); - if (!str) return false; - Deserialize2(str, len); - DeleteArray(str); - } else if (size_ > 0) { - Deserialize2(data_, size_); - } - return true; -} - - -bool Snapshot::WriteToFile(const char* snapshot_file) { - Serializer ser; - ser.Serialize(); - byte* str; - int len; - ser.Finalize(&str, &len); - - int written = WriteBytes(snapshot_file, str, len); - - DeleteArray(str); - return written == len; -} - - class FileByteSink : public SnapshotByteSink { public: explicit FileByteSink(const char* snapshot_file) { @@ -119,9 +85,9 @@ class FileByteSink : public SnapshotByteSink { }; -bool Snapshot::WriteToFile2(const char* snapshot_file) { +bool Snapshot::WriteToFile(const char* snapshot_file) { FileByteSink file(snapshot_file); - Serializer2 ser(&file); + Serializer ser(&file); ser.Serialize(); return true; } diff --git a/deps/v8/src/snapshot.h b/deps/v8/src/snapshot.h index a3a3867d05..88ba8db30e 100644 --- a/deps/v8/src/snapshot.h +++ b/deps/v8/src/snapshot.h @@ -37,7 +37,6 @@ class Snapshot { // NULL, use the internal snapshot instead. Returns false if no snapshot // could be found. static bool Initialize(const char* snapshot_file = NULL); - static bool Initialize2(const char* snapshot_file = NULL); // Returns whether or not the snapshot is enabled. static bool IsEnabled() { return size_ != 0; } @@ -45,14 +44,12 @@ class Snapshot { // Write snapshot to the given file. Returns true if snapshot was written // successfully. static bool WriteToFile(const char* snapshot_file); - static bool WriteToFile2(const char* snapshot_file); private: static const byte data_[]; static int size_; static bool Deserialize(const byte* content, int len); - static bool Deserialize2(const byte* content, int len); DISALLOW_IMPLICIT_CONSTRUCTORS(Snapshot); }; diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc index c69579a9d8..f3b6b9f639 100644 --- a/deps/v8/src/spaces.cc +++ b/deps/v8/src/spaces.cc @@ -354,7 +354,7 @@ void* MemoryAllocator::AllocateRawMemory(const size_t requested, } else { mem = OS::Allocate(requested, allocated, (executable == EXECUTABLE)); } - int alloced = *allocated; + int alloced = static_cast<int>(*allocated); size_ += alloced; Counters::memory_allocated.Increment(alloced); return mem; @@ -367,8 +367,8 @@ void MemoryAllocator::FreeRawMemory(void* mem, size_t length) { } else { OS::Free(mem, length); } - Counters::memory_allocated.Decrement(length); - size_ -= length; + Counters::memory_allocated.Decrement(static_cast<int>(length)); + size_ -= static_cast<int>(length); ASSERT(size_ >= 0); } @@ -387,7 +387,7 @@ void* MemoryAllocator::ReserveInitialChunk(const size_t requested) { // We are sure that we have mapped a block of requested addresses. ASSERT(initial_chunk_->size() == requested); LOG(NewEvent("InitialChunk", initial_chunk_->address(), requested)); - size_ += requested; + size_ += static_cast<int>(requested); return initial_chunk_->address(); } @@ -397,8 +397,8 @@ static int PagesInChunk(Address start, size_t size) { // and the last page ends on the last page-aligned address before // start+size. Page::kPageSize is a power of two so we can divide by // shifting. - return (RoundDown(start + size, Page::kPageSize) - - RoundUp(start, Page::kPageSize)) >> Page::kPageSizeBits; + return static_cast<int>((RoundDown(start + size, Page::kPageSize) + - RoundUp(start, Page::kPageSize)) >> Page::kPageSizeBits); } @@ -412,7 +412,7 @@ Page* MemoryAllocator::AllocatePages(int requested_pages, int* allocated_pages, if (size_ + static_cast<int>(chunk_size) > capacity_) { // Request as many pages as we can. chunk_size = capacity_ - size_; - requested_pages = chunk_size >> Page::kPageSizeBits; + requested_pages = static_cast<int>(chunk_size >> Page::kPageSizeBits); if (requested_pages <= 0) return Page::FromAddress(NULL); } @@ -445,7 +445,7 @@ Page* MemoryAllocator::CommitPages(Address start, size_t size, if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) { return Page::FromAddress(NULL); } - Counters::memory_allocated.Increment(size); + Counters::memory_allocated.Increment(static_cast<int>(size)); // So long as we correctly overestimated the number of chunks we should not // run out of chunk ids. @@ -466,7 +466,7 @@ bool MemoryAllocator::CommitBlock(Address start, ASSERT(InInitialChunk(start + size - 1)); if (!initial_chunk_->Commit(start, size, executable)) return false; - Counters::memory_allocated.Increment(size); + Counters::memory_allocated.Increment(static_cast<int>(size)); return true; } @@ -478,7 +478,7 @@ bool MemoryAllocator::UncommitBlock(Address start, size_t size) { ASSERT(InInitialChunk(start + size - 1)); if (!initial_chunk_->Uncommit(start, size)) return false; - Counters::memory_allocated.Decrement(size); + Counters::memory_allocated.Decrement(static_cast<int>(size)); return true; } @@ -558,7 +558,7 @@ void MemoryAllocator::DeleteChunk(int chunk_id) { // TODO(1240712): VirtualMemory::Uncommit has a return value which // is ignored here. initial_chunk_->Uncommit(c.address(), c.size()); - Counters::memory_allocated.Decrement(c.size()); + Counters::memory_allocated.Decrement(static_cast<int>(c.size())); } else { LOG(DeleteEvent("PagedChunk", c.address())); FreeRawMemory(c.address(), c.size()); @@ -1096,7 +1096,8 @@ void NewSpace::Grow() { void NewSpace::Shrink() { int new_capacity = Max(InitialCapacity(), 2 * Size()); - int rounded_new_capacity = RoundUp(new_capacity, OS::AllocateAlignment()); + int rounded_new_capacity = + RoundUp(new_capacity, static_cast<int>(OS::AllocateAlignment())); if (rounded_new_capacity < Capacity() && to_space_.ShrinkTo(rounded_new_capacity)) { // Only shrink from space if we managed to shrink to space. @@ -1234,7 +1235,7 @@ void SemiSpace::TearDown() { bool SemiSpace::Grow() { // Double the semispace size but only up to maximum capacity. int maximum_extra = maximum_capacity_ - capacity_; - int extra = Min(RoundUp(capacity_, OS::AllocateAlignment()), + int extra = Min(RoundUp(capacity_, static_cast<int>(OS::AllocateAlignment())), maximum_extra); if (!MemoryAllocator::CommitBlock(high(), extra, executable())) { return false; @@ -1797,12 +1798,14 @@ void OldSpace::MCCommitRelocationInfo() { while (it.has_next()) { Page* p = it.next(); // Space below the relocation pointer is allocated. - computed_size += p->mc_relocation_top - p->ObjectAreaStart(); + computed_size += + static_cast<int>(p->mc_relocation_top - p->ObjectAreaStart()); if (it.has_next()) { // Free the space at the top of the page. We cannot use // p->mc_relocation_top after the call to Free (because Free will clear // remembered set bits). - int extra_size = p->ObjectAreaEnd() - p->mc_relocation_top; + int extra_size = + static_cast<int>(p->ObjectAreaEnd() - p->mc_relocation_top); if (extra_size > 0) { int wasted_bytes = free_list_.Free(p->mc_relocation_top, extra_size); // The bytes we have just "freed" to add to the free list were @@ -1868,7 +1871,8 @@ HeapObject* OldSpace::AllocateInNextPage(Page* current_page, int size_in_bytes) { ASSERT(current_page->next_page()->is_valid()); // Add the block at the top of this page to the free list. - int free_size = current_page->ObjectAreaEnd() - allocation_info_.top; + int free_size = + static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top); if (free_size > 0) { int wasted_bytes = free_list_.Free(allocation_info_.top, free_size); accounting_stats_.WasteBytes(wasted_bytes); @@ -1968,7 +1972,7 @@ static void CollectCommentStatistics(RelocIterator* it) { if (it->rinfo()->rmode() == RelocInfo::COMMENT) { const char* const txt = reinterpret_cast<const char*>(it->rinfo()->data()); - flat_delta += it->rinfo()->pc() - prev_pc; + flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc); if (txt[0] == ']') break; // End of nested comment // A new comment CollectCommentStatistics(it); @@ -1996,7 +2000,7 @@ void PagedSpace::CollectCodeStatistics() { const byte* prev_pc = code->instruction_start(); while (!it.done()) { if (it.rinfo()->rmode() == RelocInfo::COMMENT) { - delta += it.rinfo()->pc() - prev_pc; + delta += static_cast<int>(it.rinfo()->pc() - prev_pc); CollectCommentStatistics(&it); prev_pc = it.rinfo()->pc(); } @@ -2005,7 +2009,7 @@ void PagedSpace::CollectCodeStatistics() { ASSERT(code->instruction_start() <= prev_pc && prev_pc <= code->relocation_start()); - delta += code->relocation_start() - prev_pc; + delta += static_cast<int>(code->relocation_start() - prev_pc); EnterComment("NoComment", delta); } } @@ -2034,7 +2038,8 @@ void OldSpace::ReportStatistics() { int rset = Memory::int_at(rset_addr); if (rset != 0) { // Bits were set - int intoff = rset_addr - p->address() - Page::kRSetOffset; + int intoff = + static_cast<int>(rset_addr - p->address() - Page::kRSetOffset); int bitoff = 0; for (; bitoff < kBitsPerInt; ++bitoff) { if ((rset & (1 << bitoff)) != 0) { @@ -2211,9 +2216,10 @@ void FixedSpace::MCCommitRelocationInfo() { while (it.has_next()) { Page* page = it.next(); Address page_top = page->AllocationTop(); - computed_size += page_top - page->ObjectAreaStart(); + computed_size += static_cast<int>(page_top - page->ObjectAreaStart()); if (it.has_next()) { - accounting_stats_.WasteBytes(page->ObjectAreaEnd() - page_top); + accounting_stats_.WasteBytes( + static_cast<int>(page->ObjectAreaEnd() - page_top)); } } @@ -2299,7 +2305,8 @@ void FixedSpace::ReportStatistics() { int rset = Memory::int_at(rset_addr); if (rset != 0) { // Bits were set - int intoff = rset_addr - p->address() - Page::kRSetOffset; + int intoff = + static_cast<int>(rset_addr - p->address() - Page::kRSetOffset); int bitoff = 0; for (; bitoff < kBitsPerInt; ++bitoff) { if ((rset & (1 << bitoff)) != 0) { @@ -2420,7 +2427,7 @@ LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes, int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) { - int os_alignment = OS::AllocateAlignment(); + int os_alignment = static_cast<int>(OS::AllocateAlignment()); if (os_alignment < Page::kPageSize) size_in_bytes += (Page::kPageSize - os_alignment); return size_in_bytes + Page::kObjectStartOffset; @@ -2499,7 +2506,7 @@ Object* LargeObjectSpace::AllocateRawInternal(int requested_size, return Failure::RetryAfterGC(requested_size, identity()); } - size_ += chunk_size; + size_ += static_cast<int>(chunk_size); page_count_++; chunk->set_next(first_chunk_); chunk->set_size(chunk_size); @@ -2650,7 +2657,7 @@ void LargeObjectSpace::FreeUnmarkedObjects() { if (object->IsCode()) { LOG(CodeDeleteEvent(object->address())); } - size_ -= chunk_size; + size_ -= static_cast<int>(chunk_size); page_count_--; MemoryAllocator::FreeRawMemory(chunk_address, chunk_size); LOG(DeleteEvent("LargeObjectChunk", chunk_address)); diff --git a/deps/v8/src/spaces.h b/deps/v8/src/spaces.h index 9e1d873c99..75b992ffee 100644 --- a/deps/v8/src/spaces.h +++ b/deps/v8/src/spaces.h @@ -172,7 +172,7 @@ class Page { // Returns the offset of a given address to this page. INLINE(int Offset(Address a)) { - int offset = a - address(); + int offset = static_cast<int>(a - address()); ASSERT_PAGE_OFFSET(offset); return offset; } @@ -1116,7 +1116,9 @@ class SemiSpace : public Space { } // The offset of an address from the beginning of the space. - int SpaceOffsetForAddress(Address addr) { return addr - low(); } + int SpaceOffsetForAddress(Address addr) { + return static_cast<int>(addr - low()); + } // If we don't have this here then SemiSpace will be abstract. However // it should never be called. @@ -1255,7 +1257,7 @@ class NewSpace : public Space { } // Return the allocated bytes in the active semispace. - virtual int Size() { return top() - bottom(); } + virtual int Size() { return static_cast<int>(top() - bottom()); } // Return the current capacity of a semispace. int Capacity() { diff --git a/deps/v8/src/string.js b/deps/v8/src/string.js index d2d6e969df..4f9957a63c 100644 --- a/deps/v8/src/string.js +++ b/deps/v8/src/string.js @@ -1,4 +1,4 @@ -// Copyright 2006-2008 the V8 project authors. All rights reserved. +// Copyright 2006-2009 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -180,7 +180,7 @@ function SubString(string, start, end) { } return %CharFromCode(char_code); } - return %StringSlice(string, start, end); + return %SubString(string, start, end); } @@ -380,12 +380,19 @@ function StringReplaceRegExpWithFunction(subject, regexp, replace) { // Unfortunately, that means this code is nearly duplicated, here and in // jsregexp.cc. if (regexp.global) { + var numberOfCaptures = NUMBER_OF_CAPTURES(matchInfo) >> 1; var previous = 0; do { - result.addSpecialSlice(previous, matchInfo[CAPTURE0]); var startOfMatch = matchInfo[CAPTURE0]; + result.addSpecialSlice(previous, startOfMatch); previous = matchInfo[CAPTURE1]; - result.add(ApplyReplacementFunction(replace, matchInfo, subject)); + if (numberOfCaptures == 1) { + var match = SubString(subject, startOfMatch, previous); + // Don't call directly to avoid exposing the built-in global object. + result.add(replace.call(null, match, startOfMatch, subject)); + } else { + result.add(ApplyReplacementFunction(replace, matchInfo, subject)); + } // Can't use matchInfo any more from here, since the function could // overwrite it. // Continue with the next match. @@ -810,10 +817,13 @@ ReplaceResultBuilder.prototype.addSpecialSlice = function(start, end) { var len = end - start; if (len == 0) return; var elements = this.elements; - if (start >= 0 && len >= 0 && start < 0x80000 && len < 0x800) { + if (start < 0x80000 && len < 0x800) { elements[elements.length] = (start << 11) + len; } else { - elements[elements.length] = SubString(this.special_string, start, end); + // 0 < len <= String::kMaxLength and Smi::kMaxValue >= String::kMaxLength, + // so -len is a smi. + elements[elements.length] = -len; + elements[elements.length] = start; } } diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc index e10dc61b2c..a399e4563e 100644 --- a/deps/v8/src/stub-cache.cc +++ b/deps/v8/src/stub-cache.cc @@ -735,11 +735,16 @@ Handle<Code> ComputeCallMiss(int argc) { Object* LoadCallbackProperty(Arguments args) { + ASSERT(args[0]->IsJSObject()); + ASSERT(args[1]->IsJSObject()); AccessorInfo* callback = AccessorInfo::cast(args[2]); Address getter_address = v8::ToCData<Address>(callback->getter()); v8::AccessorGetter fun = FUNCTION_CAST<v8::AccessorGetter>(getter_address); ASSERT(fun != NULL); - v8::AccessorInfo info(args.arguments()); + CustomArguments custom_args(callback->data(), + JSObject::cast(args[0]), + JSObject::cast(args[1])); + v8::AccessorInfo info(custom_args.end()); HandleScope scope; v8::Handle<v8::Value> result; { diff --git a/deps/v8/src/token.cc b/deps/v8/src/token.cc index bb42cead4b..0a4ad4c1ad 100644 --- a/deps/v8/src/token.cc +++ b/deps/v8/src/token.cc @@ -55,109 +55,4 @@ int8_t Token::precedence_[NUM_TOKENS] = { #undef T -// A perfect (0 collision) hash table of keyword token values. - -// larger N will reduce the number of collisions (power of 2 for fast %) -const unsigned int N = 128; -// make this small since we have <= 256 tokens -static uint8_t Hashtable[N]; -static bool IsInitialized = false; - - -static unsigned int Hash(const char* s) { - // The following constants have been found using trial-and-error. If the - // keyword set changes, they may have to be recomputed (make them flags - // and play with the flag values). Increasing N is the simplest way to - // reduce the number of collisions. - - // we must use at least 4 or more chars ('const' and 'continue' share - // 'con') - const unsigned int L = 5; - // smaller S tend to reduce the number of collisions - const unsigned int S = 4; - // make this a prime, or at least an odd number - const unsigned int M = 3; - - unsigned int h = 0; - for (unsigned int i = 0; s[i] != '\0' && i < L; i++) { - h += (h << S) + s[i]; - } - // unsigned int % by a power of 2 (otherwise this will not be a bit mask) - return h * M % N; -} - - -Token::Value Token::Lookup(const char* str) { - ASSERT(IsInitialized); - Value k = static_cast<Value>(Hashtable[Hash(str)]); - const char* s = string_[k]; - ASSERT(s != NULL || k == IDENTIFIER); - if (s == NULL || strcmp(s, str) == 0) { - return k; - } - return IDENTIFIER; -} - - -#ifdef DEBUG -// We need this function because C++ doesn't allow the expression -// NULL == NULL, which is a result of macro expansion below. What -// the hell? -static bool IsNull(const char* s) { - return s == NULL; -} -#endif - - -void Token::Initialize() { - if (IsInitialized) return; - - // A list of all keywords, terminated by ILLEGAL. -#define T(name, string, precedence) name, - static Value keyword[] = { - TOKEN_LIST(IGNORE_TOKEN, T, IGNORE_TOKEN) - ILLEGAL - }; -#undef T - - // Assert that the keyword array contains the 25 keywords, 3 future - // reserved words (const, debugger, and native), and the 3 named literals - // defined by ECMA-262 standard. - ASSERT(ARRAY_SIZE(keyword) == 25 + 3 + 3 + 1); // +1 for ILLEGAL sentinel - - // Initialize Hashtable. - ASSERT(NUM_TOKENS <= 256); // Hashtable contains uint8_t elements - for (unsigned int i = 0; i < N; i++) { - Hashtable[i] = IDENTIFIER; - } - - // Insert all keywords into Hashtable. - int collisions = 0; - for (int i = 0; keyword[i] != ILLEGAL; i++) { - Value k = keyword[i]; - unsigned int h = Hash(string_[k]); - if (Hashtable[h] != IDENTIFIER) collisions++; - Hashtable[h] = k; - } - - if (collisions > 0) { - PrintF("%d collisions in keyword hashtable\n", collisions); - FATAL("Fix keyword lookup!"); - } - - IsInitialized = true; - - // Verify hash table. -#define T(name, string, precedence) \ - ASSERT(IsNull(string) || Lookup(string) == IDENTIFIER); - -#define K(name, string, precedence) \ - ASSERT(Lookup(string) == name); - - TOKEN_LIST(T, K, IGNORE_TOKEN) - -#undef K -#undef T -} - } } // namespace v8::internal diff --git a/deps/v8/src/token.h b/deps/v8/src/token.h index 4d4df63458..a60704cd07 100644 --- a/deps/v8/src/token.h +++ b/deps/v8/src/token.h @@ -260,15 +260,6 @@ class Token { return precedence_[tok]; } - // Returns the keyword value if str is a keyword; - // returns IDENTIFIER otherwise. The class must - // have been initialized. - static Value Lookup(const char* str); - - // Must be called once to initialize the class. - // Multiple calls are ignored. - static void Initialize(); - private: #ifdef DEBUG static const char* name_[NUM_TOKENS]; diff --git a/deps/v8/src/top.cc b/deps/v8/src/top.cc index bb2dea4d2d..0274838588 100644 --- a/deps/v8/src/top.cc +++ b/deps/v8/src/top.cc @@ -31,8 +31,9 @@ #include "bootstrapper.h" #include "debug.h" #include "execution.h" -#include "string-stream.h" #include "platform.h" +#include "simulator.h" +#include "string-stream.h" namespace v8 { namespace internal { @@ -50,6 +51,30 @@ Address top_addresses[] = { NULL }; + +v8::TryCatch* ThreadLocalTop::TryCatchHandler() { + return TRY_CATCH_FROM_ADDRESS(try_catch_handler_address()); +} + + +void ThreadLocalTop::Initialize() { + c_entry_fp_ = 0; + handler_ = 0; +#ifdef ENABLE_LOGGING_AND_PROFILING + js_entry_sp_ = 0; +#endif + stack_is_cooked_ = false; + try_catch_handler_address_ = NULL; + context_ = NULL; + int id = ThreadManager::CurrentId(); + thread_id_ = (id == 0) ? ThreadManager::kInvalidId : id; + external_caught_exception_ = false; + failed_access_check_callback_ = NULL; + save_context_ = NULL; + catcher_ = NULL; +} + + Address Top::get_address_from_id(Top::AddressId id) { return top_addresses[id]; } @@ -70,9 +95,9 @@ void Top::Iterate(ObjectVisitor* v, ThreadLocalTop* thread) { v->VisitPointer(bit_cast<Object**, Context**>(&(thread->context_))); v->VisitPointer(&(thread->scheduled_exception_)); - for (v8::TryCatch* block = thread->try_catch_handler_; + for (v8::TryCatch* block = thread->TryCatchHandler(); block != NULL; - block = block->next_) { + block = TRY_CATCH_FROM_ADDRESS(block->next_)) { v->VisitPointer(bit_cast<Object**, void**>(&(block->exception_))); v->VisitPointer(bit_cast<Object**, void**>(&(block->message_))); } @@ -91,23 +116,10 @@ void Top::Iterate(ObjectVisitor* v) { void Top::InitializeThreadLocal() { - thread_local_.c_entry_fp_ = 0; - thread_local_.handler_ = 0; -#ifdef ENABLE_LOGGING_AND_PROFILING - thread_local_.js_entry_sp_ = 0; -#endif - thread_local_.stack_is_cooked_ = false; - thread_local_.try_catch_handler_ = NULL; - thread_local_.context_ = NULL; - int id = ThreadManager::CurrentId(); - thread_local_.thread_id_ = (id == 0) ? ThreadManager::kInvalidId : id; - thread_local_.external_caught_exception_ = false; - thread_local_.failed_access_check_callback_ = NULL; + thread_local_.Initialize(); clear_pending_exception(); clear_pending_message(); clear_scheduled_exception(); - thread_local_.save_context_ = NULL; - thread_local_.catcher_ = NULL; } @@ -254,46 +266,24 @@ void Top::TearDown() { } -// There are cases where the C stack is separated from JS stack (ARM simulator). -// To figure out the order of top-most JS try-catch handler and the top-most C -// try-catch handler, the C try-catch handler keeps a reference to the top-most -// JS try_catch handler when it was created. -// -// Here is a picture to explain the idea: -// Top::thread_local_.handler_ Top::thread_local_.try_catch_handler_ -// -// | | -// v v -// -// | JS handler | | C try_catch handler | -// | next |--+ +-------- | js_handler_ | -// | | | next_ |--+ -// | | | -// | JS handler |--+ <---------+ | -// | next | -// -// If the top-most JS try-catch handler is not equal to -// Top::thread_local_.try_catch_handler_.js_handler_, it means the JS handler -// is on the top. Otherwise, it means the C try-catch handler is on the top. -// void Top::RegisterTryCatchHandler(v8::TryCatch* that) { - StackHandler* handler = - reinterpret_cast<StackHandler*>(thread_local_.handler_); - - // Find the top-most try-catch handler. - while (handler != NULL && !handler->is_try_catch()) { - handler = handler->next(); - } - - that->js_handler_ = handler; // casted to void* - thread_local_.try_catch_handler_ = that; + // The ARM simulator has a separate JS stack. We therefore register + // the C++ try catch handler with the simulator and get back an + // address that can be used for comparisons with addresses into the + // JS stack. When running without the simulator, the address + // returned will be the address of the C++ try catch handler itself. + Address address = reinterpret_cast<Address>( + SimulatorStack::RegisterCTryCatch(reinterpret_cast<uintptr_t>(that))); + thread_local_.set_try_catch_handler_address(address); } void Top::UnregisterTryCatchHandler(v8::TryCatch* that) { - ASSERT(thread_local_.try_catch_handler_ == that); - thread_local_.try_catch_handler_ = that->next_; + ASSERT(thread_local_.TryCatchHandler() == that); + thread_local_.set_try_catch_handler_address( + reinterpret_cast<Address>(that->next_)); thread_local_.catcher_ = NULL; + SimulatorStack::UnregisterCTryCatch(); } @@ -725,20 +715,18 @@ bool Top::ShouldReturnException(bool* is_caught_externally, // Get the address of the external handler so we can compare the address to // determine which one is closer to the top of the stack. - v8::TryCatch* try_catch = thread_local_.try_catch_handler_; + Address external_handler_address = thread_local_.try_catch_handler_address(); // The exception has been externally caught if and only if there is // an external handler which is on top of the top-most try-catch // handler. - // - // See comments in RegisterTryCatchHandler for details. - *is_caught_externally = try_catch != NULL && - (handler == NULL || handler == try_catch->js_handler_ || + *is_caught_externally = external_handler_address != NULL && + (handler == NULL || handler->address() > external_handler_address || !catchable_by_javascript); if (*is_caught_externally) { // Only report the exception if the external handler is verbose. - return thread_local_.try_catch_handler_->is_verbose_; + return thread_local_.TryCatchHandler()->is_verbose_; } else { // Report the exception if it isn't caught by JavaScript code. return handler == NULL; @@ -775,7 +763,7 @@ void Top::DoThrow(Object* exception, MessageLocation potential_computed_location; bool try_catch_needs_message = is_caught_externally && - thread_local_.try_catch_handler_->capture_message_; + thread_local_.TryCatchHandler()->capture_message_; if (report_exception || try_catch_needs_message) { if (location == NULL) { // If no location was specified we use a computed one instead @@ -806,7 +794,7 @@ void Top::DoThrow(Object* exception, } if (is_caught_externally) { - thread_local_.catcher_ = thread_local_.try_catch_handler_; + thread_local_.catcher_ = thread_local_.TryCatchHandler(); } // NOTE: Notifying the debugger or generating the message @@ -830,15 +818,15 @@ void Top::ReportPendingMessages() { } else if (thread_local_.pending_exception_ == Heap::termination_exception()) { if (external_caught) { - thread_local_.try_catch_handler_->can_continue_ = false; - thread_local_.try_catch_handler_->exception_ = Heap::null_value(); + thread_local_.TryCatchHandler()->can_continue_ = false; + thread_local_.TryCatchHandler()->exception_ = Heap::null_value(); } } else { Handle<Object> exception(pending_exception()); thread_local_.external_caught_exception_ = false; if (external_caught) { - thread_local_.try_catch_handler_->can_continue_ = true; - thread_local_.try_catch_handler_->exception_ = + thread_local_.TryCatchHandler()->can_continue_ = true; + thread_local_.TryCatchHandler()->exception_ = thread_local_.pending_exception_; if (!thread_local_.pending_message_obj_->IsTheHole()) { try_catch_handler()->message_ = thread_local_.pending_message_obj_; @@ -892,9 +880,9 @@ bool Top::OptionalRescheduleException(bool is_bottom_call) { // If the exception is externally caught, clear it if there are no // JavaScript frames on the way to the C++ frame that has the // external handler. - ASSERT(thread_local_.try_catch_handler_ != NULL); + ASSERT(thread_local_.try_catch_handler_address() != NULL); Address external_handler_address = - reinterpret_cast<Address>(thread_local_.try_catch_handler_); + thread_local_.try_catch_handler_address(); JavaScriptFrameIterator it; if (it.done() || (it.frame()->sp() > external_handler_address)) { clear_exception = true; @@ -941,6 +929,19 @@ Handle<Context> Top::global_context() { Handle<Context> Top::GetCallingGlobalContext() { JavaScriptFrameIterator it; +#ifdef ENABLE_DEBUGGER_SUPPORT + if (Debug::InDebugger()) { + while (!it.done()) { + JavaScriptFrame* frame = it.frame(); + Context* context = Context::cast(frame->context()); + if (context->global_context() == *Debug::debug_context()) { + it.Advance(); + } else { + break; + } + } + } +#endif // ENABLE_DEBUGGER_SUPPORT if (it.done()) return Handle<Context>::null(); JavaScriptFrame* frame = it.frame(); Context* context = Context::cast(frame->context()); diff --git a/deps/v8/src/top.h b/deps/v8/src/top.h index ae94f08e3c..8780844b0d 100644 --- a/deps/v8/src/top.h +++ b/deps/v8/src/top.h @@ -43,6 +43,41 @@ class SaveContext; // Forward declaration. class ThreadLocalTop BASE_EMBEDDED { public: + // Initialize the thread data. + void Initialize(); + + // Get the top C++ try catch handler or NULL if none are registered. + // + // This method is not guarenteed to return an address that can be + // used for comparison with addresses into the JS stack. If such an + // address is needed, use try_catch_handler_address. + v8::TryCatch* TryCatchHandler(); + + // Get the address of the top C++ try catch handler or NULL if + // none are registered. + // + // This method always returns an address that can be compared to + // pointers into the JavaScript stack. When running on actual + // hardware, try_catch_handler_address and TryCatchHandler return + // the same pointer. When running on a simulator with a separate JS + // stack, try_catch_handler_address returns a JS stack address that + // corresponds to the place on the JS stack where the C++ handler + // would have been if the stack were not separate. + inline Address try_catch_handler_address() { + return try_catch_handler_address_; + } + + // Set the address of the top C++ try catch handler. + inline void set_try_catch_handler_address(Address address) { + try_catch_handler_address_ = address; + } + + void Free() { + ASSERT(!has_pending_message_); + ASSERT(!external_caught_exception_); + ASSERT(try_catch_handler_address_ == NULL); + } + // The context where the current execution method is created and for variable // lookups. Context* context_; @@ -59,7 +94,6 @@ class ThreadLocalTop BASE_EMBEDDED { // unify them later. Object* scheduled_exception_; bool external_caught_exception_; - v8::TryCatch* try_catch_handler_; SaveContext* save_context_; v8::TryCatch* catcher_; @@ -79,14 +113,11 @@ class ThreadLocalTop BASE_EMBEDDED { // Call back function to report unsafe JS accesses. v8::FailedAccessCheckCallback failed_access_check_callback_; - void Free() { - ASSERT(!has_pending_message_); - ASSERT(!external_caught_exception_); - ASSERT(try_catch_handler_ == NULL); - } + private: + Address try_catch_handler_address_; }; -#define TOP_ADDRESS_LIST(C) \ +#define TOP_ADDRESS_LIST(C) \ C(handler_address) \ C(c_entry_fp_address) \ C(context_address) \ @@ -157,7 +188,10 @@ class Top { thread_local_.pending_message_script_ = NULL; } static v8::TryCatch* try_catch_handler() { - return thread_local_.try_catch_handler_; + return thread_local_.TryCatchHandler(); + } + static Address try_catch_handler_address() { + return thread_local_.try_catch_handler_address(); } // This method is called by the api after operations that may throw // exceptions. If an exception was thrown and not handled by an external @@ -170,6 +204,10 @@ class Top { return &thread_local_.external_caught_exception_; } + static Object** scheduled_exception_address() { + return &thread_local_.scheduled_exception_; + } + static Object* scheduled_exception() { ASSERT(has_scheduled_exception()); return thread_local_.scheduled_exception_; @@ -185,7 +223,7 @@ class Top { thread_local_.external_caught_exception_ = has_pending_exception() && (thread_local_.catcher_ != NULL) && - (thread_local_.try_catch_handler_ == thread_local_.catcher_); + (try_catch_handler() == thread_local_.catcher_); } // Tells whether the current context has experienced an out of memory diff --git a/deps/v8/src/utils.cc b/deps/v8/src/utils.cc index 3c684b8199..ce5aceda3a 100644 --- a/deps/v8/src/utils.cc +++ b/deps/v8/src/utils.cc @@ -129,7 +129,7 @@ char* ReadLine(const char* prompt) { } return NULL; } - int len = strlen(line_buf); + int len = StrLength(line_buf); if (len > 1 && line_buf[len - 2] == '\\' && line_buf[len - 1] == '\n') { @@ -184,7 +184,7 @@ char* ReadCharsFromFile(const char* filename, char* result = NewArray<char>(*size + extra_space); for (int i = 0; i < *size;) { - int read = fread(&result[i], 1, *size - i, file); + int read = static_cast<int>(fread(&result[i], 1, *size - i, file)); if (read <= 0) { fclose(file); DeleteArray(result); @@ -221,7 +221,7 @@ Vector<const char> ReadFile(const char* filename, int WriteCharsToFile(const char* str, int size, FILE* f) { int total = 0; while (total < size) { - int write = fwrite(str, 1, size - total, f); + int write = static_cast<int>(fwrite(str, 1, size - total, f)); if (write == 0) { return total; } @@ -265,7 +265,7 @@ StringBuilder::StringBuilder(int size) { void StringBuilder::AddString(const char* s) { - AddSubstring(s, strlen(s)); + AddSubstring(s, StrLength(s)); } diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h index f4a0598c20..c271ae17f7 100644 --- a/deps/v8/src/utils.h +++ b/deps/v8/src/utils.h @@ -66,7 +66,7 @@ static inline intptr_t OffsetFrom(T x) { // integral types. template <typename T> static inline T AddressFrom(intptr_t x) { - return static_cast<T>(0) + x; + return static_cast<T>(static_cast<T>(0) + x); } @@ -137,6 +137,13 @@ static T Min(T a, T b) { } +inline int StrLength(const char* string) { + size_t length = strlen(string); + ASSERT(length == static_cast<size_t>(static_cast<int>(length))); + return static_cast<int>(length); +} + + // ---------------------------------------------------------------------------- // BitField is a help template for encoding and decode bitfield with // unsigned content. @@ -449,15 +456,15 @@ class ScopedVector : public Vector<T> { inline Vector<const char> CStrVector(const char* data) { - return Vector<const char>(data, static_cast<int>(strlen(data))); + return Vector<const char>(data, StrLength(data)); } inline Vector<char> MutableCStrVector(char* data) { - return Vector<char>(data, static_cast<int>(strlen(data))); + return Vector<char>(data, StrLength(data)); } inline Vector<char> MutableCStrVector(char* data, int max) { - int length = static_cast<int>(strlen(data)); + int length = StrLength(data); return Vector<char>(data, (length < max) ? length : max); } diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc index fe21b3ba6f..d172742531 100644 --- a/deps/v8/src/v8.cc +++ b/deps/v8/src/v8.cc @@ -30,12 +30,10 @@ #include "bootstrapper.h" #include "debug.h" #include "serialize.h" +#include "simulator.h" #include "stub-cache.h" #include "oprofile-agent.h" - -#if V8_TARGET_ARCH_ARM -#include "arm/simulator-arm.h" -#endif +#include "log.h" namespace v8 { namespace internal { @@ -45,7 +43,7 @@ bool V8::has_been_setup_ = false; bool V8::has_been_disposed_ = false; bool V8::has_fatal_error_ = false; -bool V8::Initialize(GenericDeserializer *des) { +bool V8::Initialize(Deserializer *des) { bool create_heap_objects = des == NULL; if (has_been_disposed_ || has_fatal_error_) return false; if (IsRunning()) return true; @@ -61,7 +59,6 @@ bool V8::Initialize(GenericDeserializer *des) { // Enable logging before setting up the heap Logger::Setup(); - if (des) des->GetLog(); // Setup the platform OS support. OS::Setup(); @@ -108,7 +105,7 @@ bool V8::Initialize(GenericDeserializer *des) { // Deserializing may put strange things in the root array's copy of the // stack guard. - Heap::SetStackLimit(StackGuard::jslimit()); + Heap::SetStackLimits(); // Setup the CPU support. Must be done after heap setup and after // any deserialization because we have to have the initial heap @@ -117,6 +114,11 @@ bool V8::Initialize(GenericDeserializer *des) { OProfileAgent::Initialize(); + if (FLAG_log_code) { + HandleScope scope; + Logger::LogCompiledFunctions(); + } + return true; } diff --git a/deps/v8/src/v8.h b/deps/v8/src/v8.h index 6c5546c631..b3624c5d54 100644 --- a/deps/v8/src/v8.h +++ b/deps/v8/src/v8.h @@ -72,6 +72,8 @@ namespace v8 { namespace internal { +class Deserializer; + class V8 : public AllStatic { public: // Global actions. @@ -80,7 +82,7 @@ class V8 : public AllStatic { // created from scratch. If a non-null Deserializer is given, the // initial state is created by reading the deserialized data into an // empty heap. - static bool Initialize(GenericDeserializer* des); + static bool Initialize(Deserializer* des); static void TearDown(); static bool IsRunning() { return is_running_; } // To be dead you have to have lived diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js index 2fecee80c0..8f9adcbb8d 100644 --- a/deps/v8/src/v8natives.js +++ b/deps/v8/src/v8natives.js @@ -95,8 +95,8 @@ function GlobalParseInt(string, radix) { // they make parseInt on a string 1.4% slower (274ns vs 270ns). if (%_IsSmi(string)) return string; if (IS_NUMBER(string) && - ((string < -0.01 && -1e9 < string) || - (0.01 < string && string < 1e9))) { + ((0.01 < string && string < 1e9) || + (-1e9 < string && string < -0.01))) { // Truncate number. return string | 0; } @@ -196,10 +196,7 @@ $Object.prototype.constructor = $Object; // ECMA-262 - 15.2.4.2 function ObjectToString() { - var c = %_ClassOf(this); - // Hide Arguments from the outside. - if (c === 'Arguments') c = 'Object'; - return "[object " + c + "]"; + return "[object " + %_ClassOf(this) + "]"; } diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc index 944cd4e1c2..b448032a6a 100644 --- a/deps/v8/src/version.cc +++ b/deps/v8/src/version.cc @@ -32,9 +32,9 @@ // These macros define the version number for the current version. // NOTE these macros are used by the SCons build script so their names // cannot be changed without changing the SCons build script. -#define MAJOR_VERSION 1 -#define MINOR_VERSION 3 -#define BUILD_NUMBER 18 +#define MAJOR_VERSION 2 +#define MINOR_VERSION 0 +#define BUILD_NUMBER 0 #define PATCH_LEVEL 0 #define CANDIDATE_VERSION false diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h index 8f078ff236..9c7f9b618d 100644 --- a/deps/v8/src/x64/assembler-x64-inl.h +++ b/deps/v8/src/x64/assembler-x64-inl.h @@ -176,7 +176,7 @@ Address Assembler::target_address_at(Address pc) { void Assembler::set_target_address_at(Address pc, Address target) { - Memory::int32_at(pc) = target - pc - 4; + Memory::int32_at(pc) = static_cast<int32_t>(target - pc - 4); CPU::FlushICache(pc, sizeof(int32_t)); } @@ -191,13 +191,13 @@ Handle<Object> Assembler::code_target_object_handle_at(Address pc) { void RelocInfo::apply(intptr_t delta) { if (IsInternalReference(rmode_)) { // absolute code pointer inside code object moves with the code object. - Memory::Address_at(pc_) += delta; + Memory::Address_at(pc_) += static_cast<int32_t>(delta); } else if (IsCodeTarget(rmode_)) { - Memory::int32_at(pc_) -= delta; + Memory::int32_at(pc_) -= static_cast<int32_t>(delta); } else if (rmode_ == JS_RETURN && IsPatchedReturnSequence()) { // Special handling of js_return when a break point is set (call // instruction has been inserted). - Memory::int32_at(pc_ + 1) -= delta; // relocate entry + Memory::int32_at(pc_ + 1) -= static_cast<int32_t>(delta); // relocate entry } } diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc index 61e8753618..2d524eaf4b 100644 --- a/deps/v8/src/x64/assembler-x64.cc +++ b/deps/v8/src/x64/assembler-x64.cc @@ -80,11 +80,15 @@ XMMRegister xmm15 = { 15 }; // fpu, tsc, cx8, cmov, mmx, sse, sse2, fxsr, syscall uint64_t CpuFeatures::supported_ = kDefaultCpuFeatures; uint64_t CpuFeatures::enabled_ = 0; +uint64_t CpuFeatures::found_by_runtime_probing_ = 0; void CpuFeatures::Probe() { ASSERT(Heap::HasBeenSetup()); ASSERT(supported_ == kDefaultCpuFeatures); - if (Serializer::enabled()) return; // No features if we might serialize. + if (Serializer::enabled()) { + supported_ |= OS::CpuFeaturesImpliedByPlatform(); + return; // No features if we might serialize. + } Assembler assm(NULL, 0); Label cpuid, done; @@ -160,6 +164,11 @@ void CpuFeatures::Probe() { typedef uint64_t (*F0)(); F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry()); supported_ = probe(); + found_by_runtime_probing_ = supported_; + found_by_runtime_probing_ &= ~kDefaultCpuFeatures; + uint64_t os_guarantees = OS::CpuFeaturesImpliedByPlatform(); + supported_ |= os_guarantees; + found_by_runtime_probing_ &= ~os_guarantees; // SSE2 and CMOV must be available on an X64 CPU. ASSERT(IsSupported(CPUID)); ASSERT(IsSupported(SSE2)); @@ -337,7 +346,8 @@ void Assembler::GetCode(CodeDesc* desc) { desc->buffer_size = buffer_size_; desc->instr_size = pc_offset(); ASSERT(desc->instr_size > 0); // Zero-size code objects upset the system. - desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); + desc->reloc_size = + static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos()); desc->origin = this; Counters::reloc_info_size.Increment(desc->reloc_size); @@ -400,7 +410,8 @@ void Assembler::GrowBuffer() { // setup new buffer desc.buffer = NewArray<byte>(desc.buffer_size); desc.instr_size = pc_offset(); - desc.reloc_size = (buffer_ + buffer_size_) - (reloc_info_writer.pos()); + desc.reloc_size = + static_cast<int>((buffer_ + buffer_size_) - (reloc_info_writer.pos())); // Clear the buffer in debug mode. Use 'int3' instructions to make // sure to get into problems if we ever run uninitialized code. @@ -887,7 +898,7 @@ void Assembler::cmpb_al(Immediate imm8) { void Assembler::cpuid() { - ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CPUID)); + ASSERT(CpuFeatures::IsEnabled(CPUID)); EnsureSpace ensure_space(this); last_pc_ = pc_; emit(0x0F); @@ -2045,7 +2056,7 @@ void Assembler::fistp_s(const Operand& adr) { void Assembler::fisttp_s(const Operand& adr) { - ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE3)); + ASSERT(CpuFeatures::IsEnabled(SSE3)); EnsureSpace ensure_space(this); last_pc_ = pc_; emit_optional_rex_32(adr); diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h index 617f092bb4..50f4e0e454 100644 --- a/deps/v8/src/x64/assembler-x64.h +++ b/deps/v8/src/x64/assembler-x64.h @@ -37,6 +37,8 @@ #ifndef V8_X64_ASSEMBLER_X64_H_ #define V8_X64_ASSEMBLER_X64_H_ +#include "serialize.h" + namespace v8 { namespace internal { @@ -362,20 +364,11 @@ class Operand BASE_EMBEDDED { // } class CpuFeatures : public AllStatic { public: - // Feature flags bit positions. They are mostly based on the CPUID spec. - // (We assign CPUID itself to one of the currently reserved bits -- - // feel free to change this if needed.) - enum Feature { SSE3 = 32, - SSE2 = 26, - CMOV = 15, - RDTSC = 4, - CPUID = 10, - SAHF = 0}; // Detect features of the target CPU. Set safe defaults if the serializer // is enabled (snapshots must be portable). static void Probe(); // Check whether a feature is supported by the target CPU. - static bool IsSupported(Feature f) { + static bool IsSupported(CpuFeature f) { if (f == SSE2 && !FLAG_enable_sse2) return false; if (f == SSE3 && !FLAG_enable_sse3) return false; if (f == CMOV && !FLAG_enable_cmov) return false; @@ -384,33 +377,35 @@ class CpuFeatures : public AllStatic { return (supported_ & (V8_UINT64_C(1) << f)) != 0; } // Check whether a feature is currently enabled. - static bool IsEnabled(Feature f) { + static bool IsEnabled(CpuFeature f) { return (enabled_ & (V8_UINT64_C(1) << f)) != 0; } // Enable a specified feature within a scope. class Scope BASE_EMBEDDED { #ifdef DEBUG public: - explicit Scope(Feature f) { + explicit Scope(CpuFeature f) { + uint64_t mask = (V8_UINT64_C(1) << f); ASSERT(CpuFeatures::IsSupported(f)); + ASSERT(!Serializer::enabled() || (found_by_runtime_probing_ & mask) == 0); old_enabled_ = CpuFeatures::enabled_; - CpuFeatures::enabled_ |= (V8_UINT64_C(1) << f); + CpuFeatures::enabled_ |= mask; } ~Scope() { CpuFeatures::enabled_ = old_enabled_; } private: uint64_t old_enabled_; #else public: - explicit Scope(Feature f) {} + explicit Scope(CpuFeature f) {} #endif }; private: // Safe defaults include SSE2 and CMOV for X64. It is always available, if // anyone checks, but they shouldn't need to check. - static const uint64_t kDefaultCpuFeatures = - (1 << CpuFeatures::SSE2 | 1 << CpuFeatures::CMOV); + static const uint64_t kDefaultCpuFeatures = (1 << SSE2 | 1 << CMOV); static uint64_t supported_; static uint64_t enabled_; + static uint64_t found_by_runtime_probing_; }; @@ -458,14 +453,25 @@ class Assembler : public Malloced { // the relative displacements stored in the code. static inline Address target_address_at(Address pc); static inline void set_target_address_at(Address pc, Address target); + // This sets the branch destination (which is in the instruction on x64). + // This is for calls and branches within generated code. inline static void set_target_at(Address instruction_payload, Address target) { set_target_address_at(instruction_payload, target); } + + // This sets the branch destination (which is a load instruction on x64). + // This is for calls and branches to runtime code. + inline static void set_external_target_at(Address instruction_payload, + Address target) { + *reinterpret_cast<Address*>(instruction_payload) = target; + } + inline Handle<Object> code_target_object_handle_at(Address pc); // Number of bytes taken up by the branch target in the code. - static const int kCallTargetSize = 4; // Use 32-bit displacement. + static const int kCallTargetSize = 4; // Use 32-bit displacement. + static const int kExternalTargetSize = 8; // Use 64-bit absolute. // Distance between the address of the code target in the call instruction // and the return address pushed on the stack. static const int kCallTargetAddressOffset = 4; // Use 32-bit displacement. @@ -836,12 +842,12 @@ class Assembler : public Malloced { } // Shifts dst right, duplicating sign bit, by cl % 64 bits. - void sar(Register dst) { + void sar_cl(Register dst) { shift(dst, 0x7); } // Shifts dst right, duplicating sign bit, by cl % 64 bits. - void sarl(Register dst) { + void sarl_cl(Register dst) { shift_32(dst, 0x7); } @@ -849,11 +855,11 @@ class Assembler : public Malloced { shift(dst, shift_amount, 0x4); } - void shl(Register dst) { + void shl_cl(Register dst) { shift(dst, 0x4); } - void shll(Register dst) { + void shll_cl(Register dst) { shift_32(dst, 0x4); } @@ -865,11 +871,11 @@ class Assembler : public Malloced { shift(dst, shift_amount, 0x5); } - void shr(Register dst) { + void shr_cl(Register dst) { shift(dst, 0x5); } - void shrl(Register dst) { + void shrl_cl(Register dst) { shift_32(dst, 0x5); } @@ -1120,7 +1126,7 @@ class Assembler : public Malloced { void RecordStatementPosition(int pos); void WriteRecordedPositions(); - int pc_offset() const { return pc_ - buffer_; } + int pc_offset() const { return static_cast<int>(pc_ - buffer_); } int current_statement_position() const { return current_statement_position_; } int current_position() const { return current_position_; } @@ -1132,7 +1138,9 @@ class Assembler : public Malloced { } // Get the number of bytes available in the buffer. - inline int available_space() const { return reloc_info_writer.pos() - pc_; } + inline int available_space() const { + return static_cast<int>(reloc_info_writer.pos() - pc_); + } // Avoid overflows for displacements etc. static const int kMaximalBufferSize = 512*MB; diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc index 8590365a17..f444d2cf85 100644 --- a/deps/v8/src/x64/builtins-x64.cc +++ b/deps/v8/src/x64/builtins-x64.cc @@ -320,42 +320,23 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { __ push(Operand(rbp, kArgumentsOffset)); __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION); - // Check the stack for overflow or a break request. - // We need to catch preemptions right here, otherwise an unlucky preemption - // could show up as a failed apply. - Label retry_preemption; - Label no_preemption; - __ bind(&retry_preemption); - ExternalReference stack_guard_limit = - ExternalReference::address_of_stack_guard_limit(); - __ movq(kScratchRegister, stack_guard_limit); - __ movq(rcx, rsp); - __ subq(rcx, Operand(kScratchRegister, 0)); - // rcx contains the difference between the stack limit and the stack top. - // We use it below to check that there is enough room for the arguments. - __ j(above, &no_preemption); - - // Preemption! - // Because runtime functions always remove the receiver from the stack, we - // have to fake one to avoid underflowing the stack. - __ push(rax); - __ Push(Smi::FromInt(0)); - - // Do call to runtime routine. - __ CallRuntime(Runtime::kStackGuard, 1); - __ pop(rax); - __ jmp(&retry_preemption); - - __ bind(&no_preemption); - + // Check the stack for overflow. We are not trying need to catch + // interruptions (e.g. debug break and preemption) here, so the "real stack + // limit" is checked. Label okay; + __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex); + __ movq(rcx, rsp); + // Make rcx the space we have left. The stack might already be overflowed + // here which will cause rcx to become negative. + __ subq(rcx, kScratchRegister); // Make rdx the space we need for the array when it is unrolled onto the // stack. __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2); + // Check if the arguments will overflow the stack. __ cmpq(rcx, rdx); - __ j(greater, &okay); + __ j(greater, &okay); // Signed comparison. - // Too bad: Out of stack space. + // Out of stack space. __ push(Operand(rbp, kFunctionOffset)); __ push(rax); __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION); diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc index 95f30d842d..e2296d9bd9 100644 --- a/deps/v8/src/x64/codegen-x64.cc +++ b/deps/v8/src/x64/codegen-x64.cc @@ -29,6 +29,7 @@ #include "bootstrapper.h" #include "codegen-inl.h" +#include "compiler.h" #include "debug.h" #include "ic-inl.h" #include "parser.h" @@ -74,7 +75,6 @@ void DeferredCode::RestoreRegisters() { CodeGenState::CodeGenState(CodeGenerator* owner) : owner_(owner), - typeof_state_(NOT_INSIDE_TYPEOF), destination_(NULL), previous_(NULL) { owner_->set_state(this); @@ -82,10 +82,8 @@ CodeGenState::CodeGenState(CodeGenerator* owner) CodeGenState::CodeGenState(CodeGenerator* owner, - TypeofState typeof_state, ControlDestination* destination) : owner_(owner), - typeof_state_(typeof_state), destination_(destination), previous_(owner->state()) { owner_->set_state(this); @@ -643,27 +641,6 @@ void DeferredReferenceSetKeyedValue::Generate() { } -class CallFunctionStub: public CodeStub { - public: - CallFunctionStub(int argc, InLoopFlag in_loop) - : argc_(argc), in_loop_(in_loop) { } - - void Generate(MacroAssembler* masm); - - private: - int argc_; - InLoopFlag in_loop_; - -#ifdef DEBUG - void Print() { PrintF("CallFunctionStub (args %d)\n", argc_); } -#endif - - Major MajorKey() { return CallFunction; } - int MinorKey() { return argc_; } - InLoopFlag InLoop() { return in_loop_; } -}; - - void CodeGenerator::CallApplyLazy(Property* apply, Expression* receiver, VariableProxy* arguments, @@ -676,7 +653,7 @@ void CodeGenerator::CallApplyLazy(Property* apply, // Load the apply function onto the stack. This will usually // give us a megamorphic load site. Not super, but it works. Reference ref(this, apply); - ref.GetValue(NOT_INSIDE_TYPEOF); + ref.GetValue(); ASSERT(ref.type() == Reference::NAMED); // Load the receiver and the existing arguments object onto the @@ -1001,7 +978,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) { JumpTarget then; JumpTarget else_; ControlDestination dest(&then, &else_, true); - LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true); + LoadCondition(node->condition(), &dest, true); if (dest.false_was_fall_through()) { // The else target was bound, so we compile the else part first. @@ -1028,7 +1005,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) { ASSERT(!has_else_stm); JumpTarget then; ControlDestination dest(&then, &exit, true); - LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true); + LoadCondition(node->condition(), &dest, true); if (dest.false_was_fall_through()) { // The exit label was bound. We may have dangling jumps to the @@ -1048,7 +1025,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) { ASSERT(!has_then_stm); JumpTarget else_; ControlDestination dest(&exit, &else_, false); - LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true); + LoadCondition(node->condition(), &dest, true); if (dest.true_was_fall_through()) { // The exit label was bound. We may have dangling jumps to the @@ -1070,7 +1047,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) { // or control flow effect). LoadCondition is called without // forcing control flow. ControlDestination dest(&exit, &exit, true); - LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, false); + LoadCondition(node->condition(), &dest, false); if (!dest.is_used()) { // We got a value on the frame rather than (or in addition to) // control flow. @@ -1341,8 +1318,10 @@ void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) { node->continue_target()->Bind(); } if (has_valid_frame()) { + Comment cmnt(masm_, "[ DoWhileCondition"); + CodeForDoWhileConditionPosition(node); ControlDestination dest(&body, node->break_target(), false); - LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true); + LoadCondition(node->cond(), &dest, true); } if (node->break_target()->is_linked()) { node->break_target()->Bind(); @@ -1399,7 +1378,7 @@ void CodeGenerator::VisitWhileStatement(WhileStatement* node) { // Compile the test with the body as the true target and preferred // fall-through and with the break target as the false target. ControlDestination dest(&body, node->break_target(), true); - LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true); + LoadCondition(node->cond(), &dest, true); if (dest.false_was_fall_through()) { // If we got the break target as fall-through, the test may have @@ -1446,7 +1425,7 @@ void CodeGenerator::VisitWhileStatement(WhileStatement* node) { // The break target is the fall-through (body is a backward // jump from here and thus an invalid fall-through). ControlDestination dest(&body, node->break_target(), false); - LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true); + LoadCondition(node->cond(), &dest, true); } } else { // If we have chosen not to recompile the test at the @@ -1538,7 +1517,7 @@ void CodeGenerator::VisitForStatement(ForStatement* node) { // Compile the test with the body as the true target and preferred // fall-through and with the break target as the false target. ControlDestination dest(&body, node->break_target(), true); - LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true); + LoadCondition(node->cond(), &dest, true); if (dest.false_was_fall_through()) { // If we got the break target as fall-through, the test may have @@ -1608,7 +1587,7 @@ void CodeGenerator::VisitForStatement(ForStatement* node) { // The break target is the fall-through (body is a backward // jump from here). ControlDestination dest(&body, node->break_target(), false); - LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &dest, true); + LoadCondition(node->cond(), &dest, true); } } else { // Otherwise, jump back to the test at the top. @@ -2188,7 +2167,8 @@ void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) { Comment cmnt(masm_, "[ FunctionLiteral"); // Build the function boilerplate and instantiate it. - Handle<JSFunction> boilerplate = BuildBoilerplate(node); + Handle<JSFunction> boilerplate = + Compiler::BuildBoilerplate(node, script_, this); // Check for stack-overflow exception. if (HasStackOverflow()) return; InstantiateBoilerplate(boilerplate); @@ -2208,25 +2188,25 @@ void CodeGenerator::VisitConditional(Conditional* node) { JumpTarget else_; JumpTarget exit; ControlDestination dest(&then, &else_, true); - LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &dest, true); + LoadCondition(node->condition(), &dest, true); if (dest.false_was_fall_through()) { // The else target was bound, so we compile the else part first. - Load(node->else_expression(), typeof_state()); + Load(node->else_expression()); if (then.is_linked()) { exit.Jump(); then.Bind(); - Load(node->then_expression(), typeof_state()); + Load(node->then_expression()); } } else { // The then target was bound, so we compile the then part first. - Load(node->then_expression(), typeof_state()); + Load(node->then_expression()); if (else_.is_linked()) { exit.Jump(); else_.Bind(); - Load(node->else_expression(), typeof_state()); + Load(node->else_expression()); } } @@ -2236,7 +2216,7 @@ void CodeGenerator::VisitConditional(Conditional* node) { void CodeGenerator::VisitSlot(Slot* node) { Comment cmnt(masm_, "[ Slot"); - LoadFromSlotCheckForArguments(node, typeof_state()); + LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF); } @@ -2249,7 +2229,7 @@ void CodeGenerator::VisitVariableProxy(VariableProxy* node) { } else { ASSERT(var->is_global()); Reference ref(this, node); - ref.GetValue(typeof_state()); + ref.GetValue(); } } @@ -2640,9 +2620,9 @@ void CodeGenerator::VisitAssignment(Assignment* node) { // the target, with an implicit promise that it will be written to again // before it is read. if (literal != NULL || (right_var != NULL && right_var != var)) { - target.TakeValue(NOT_INSIDE_TYPEOF); + target.TakeValue(); } else { - target.GetValue(NOT_INSIDE_TYPEOF); + target.GetValue(); } Load(node->value()); GenericBinaryOperation(node->binary_op(), @@ -2690,7 +2670,7 @@ void CodeGenerator::VisitThrow(Throw* node) { void CodeGenerator::VisitProperty(Property* node) { Comment cmnt(masm_, "[ Property"); Reference property(this, node); - property.GetValue(typeof_state()); + property.GetValue(); } @@ -2876,7 +2856,7 @@ void CodeGenerator::VisitCall(Call* node) { // Load the function to call from the property through a reference. Reference ref(this, property); - ref.GetValue(NOT_INSIDE_TYPEOF); + ref.GetValue(); // Pass receiver to called function. if (property->is_synthetic()) { @@ -2982,9 +2962,6 @@ void CodeGenerator::VisitCallRuntime(CallRuntime* node) { void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { - // Note that because of NOT and an optimization in comparison of a typeof - // expression to a literal string, this function can fail to leave a value - // on top of the frame or in the cc register. Comment cmnt(masm_, "[ UnaryOperation"); Token::Value op = node->op(); @@ -2993,7 +2970,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { // Swap the true and false targets but keep the same actual label // as the fall through. destination()->Invert(); - LoadCondition(node->expression(), NOT_INSIDE_TYPEOF, destination(), true); + LoadCondition(node->expression(), destination(), true); // Swap the labels back. destination()->Invert(); @@ -3233,7 +3210,7 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) { if (!is_postfix) frame_->Push(Smi::FromInt(0)); return; } - target.TakeValue(NOT_INSIDE_TYPEOF); + target.TakeValue(); Result new_value = frame_->Pop(); new_value.ToRegister(); @@ -3291,9 +3268,6 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { // TODO(X64): This code was copied verbatim from codegen-ia32. // Either find a reason to change it or move it to a shared location. - // Note that due to an optimization in comparison operations (typeof - // compared to a string literal), we can evaluate a binary expression such - // as AND or OR and not leave a value on the frame or in the cc register. Comment cmnt(masm_, "[ BinaryOperation"); Token::Value op = node->op(); @@ -3309,7 +3283,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { if (op == Token::AND) { JumpTarget is_true; ControlDestination dest(&is_true, destination()->false_target(), true); - LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false); + LoadCondition(node->left(), &dest, false); if (dest.false_was_fall_through()) { // The current false target was used as the fall-through. If @@ -3328,7 +3302,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { is_true.Bind(); // The left subexpression compiled to control flow, so the // right one is free to do so as well. - LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false); + LoadCondition(node->right(), destination(), false); } else { // We have actually just jumped to or bound the current false // target but the current control destination is not marked as @@ -3339,7 +3313,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { } else if (dest.is_used()) { // The left subexpression compiled to control flow (and is_true // was just bound), so the right is free to do so as well. - LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false); + LoadCondition(node->right(), destination(), false); } else { // We have a materialized value on the frame, so we exit with @@ -3372,7 +3346,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { } else if (op == Token::OR) { JumpTarget is_false; ControlDestination dest(destination()->true_target(), &is_false, false); - LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &dest, false); + LoadCondition(node->left(), &dest, false); if (dest.true_was_fall_through()) { // The current true target was used as the fall-through. If @@ -3391,7 +3365,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { is_false.Bind(); // The left subexpression compiled to control flow, so the // right one is free to do so as well. - LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false); + LoadCondition(node->right(), destination(), false); } else { // We have just jumped to or bound the current true target but // the current control destination is not marked as used. @@ -3401,7 +3375,7 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { } else if (dest.is_used()) { // The left subexpression compiled to control flow (and is_false // was just bound), so the right is free to do so as well. - LoadCondition(node->right(), NOT_INSIDE_TYPEOF, destination(), false); + LoadCondition(node->right(), destination(), false); } else { // We have a materialized value on the frame, so we exit with @@ -3523,6 +3497,9 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) { destination()->false_target()->Branch(is_smi); frame_->Spill(answer.reg()); __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg()); + destination()->true_target()->Branch(equal); + // Regular expressions are callable so typeof == 'function'. + __ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE); answer.Unuse(); destination()->Split(equal); @@ -3532,9 +3509,11 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) { __ CompareRoot(answer.reg(), Heap::kNullValueRootIndex); destination()->true_target()->Branch(equal); + // Regular expressions are typeof == 'function', not 'object'. + __ CmpObjectType(answer.reg(), JS_REGEXP_TYPE, kScratchRegister); + destination()->false_target()->Branch(equal); + // It can be an undetectable object. - __ movq(kScratchRegister, - FieldOperand(answer.reg(), HeapObject::kMapOffset)); __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset), Immediate(1 << Map::kIsUndetectable)); destination()->false_target()->Branch(not_zero); @@ -3679,7 +3658,6 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) { Label slow_case; Label end; Label not_a_flat_string; - Label a_cons_string; Label try_again_with_new_string; Label ascii_string; Label got_char_code; @@ -3758,7 +3736,7 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) { __ addq(rcx, Immediate(String::kLongLengthShift)); // Fetch the length field into the temporary register. __ movl(temp.reg(), FieldOperand(object.reg(), String::kLengthOffset)); - __ shrl(temp.reg()); // The shift amount in ecx is implicit operand. + __ shrl_cl(temp.reg()); // Check for index out of range. __ cmpl(index.reg(), temp.reg()); __ j(greater_equal, &slow_case); @@ -3767,10 +3745,11 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) { __ movzxbl(temp.reg(), FieldOperand(temp.reg(), Map::kInstanceTypeOffset)); // We need special handling for non-flat strings. - ASSERT(kSeqStringTag == 0); + ASSERT_EQ(0, kSeqStringTag); __ testb(temp.reg(), Immediate(kStringRepresentationMask)); __ j(not_zero, ¬_a_flat_string); // Check for 1-byte or 2-byte string. + ASSERT_EQ(0, kTwoByteStringTag); __ testb(temp.reg(), Immediate(kStringEncodingMask)); __ j(not_zero, &ascii_string); @@ -3797,21 +3776,16 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) { __ bind(¬_a_flat_string); __ and_(temp.reg(), Immediate(kStringRepresentationMask)); __ cmpb(temp.reg(), Immediate(kConsStringTag)); - __ j(equal, &a_cons_string); - __ cmpb(temp.reg(), Immediate(kSlicedStringTag)); __ j(not_equal, &slow_case); - // SlicedString. - // Add the offset to the index and trigger the slow case on overflow. - __ addl(index.reg(), FieldOperand(object.reg(), SlicedString::kStartOffset)); - __ j(overflow, &slow_case); - // Getting the underlying string is done by running the cons string code. - // ConsString. - __ bind(&a_cons_string); - // Get the first of the two strings. Both sliced and cons strings - // store their source string at the same offset. - ASSERT(SlicedString::kBufferOffset == ConsString::kFirstOffset); + // Check that the right hand side is the empty string (ie if this is really a + // flat string in a cons string). If that is not the case we would rather go + // to the runtime system now, to flatten the string. + __ movq(temp.reg(), FieldOperand(object.reg(), ConsString::kSecondOffset)); + __ CompareRoot(temp.reg(), Heap::kEmptyStringRootIndex); + __ j(not_equal, &slow_case); + // Get the first of the two strings. __ movq(object.reg(), FieldOperand(object.reg(), ConsString::kFirstOffset)); __ jmp(&try_again_with_new_string); @@ -4122,18 +4096,17 @@ void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) { // ----------------------------------------------------------------------------- // CodeGenerator implementation of Expressions -void CodeGenerator::LoadAndSpill(Expression* expression, - TypeofState typeof_state) { +void CodeGenerator::LoadAndSpill(Expression* expression) { // TODO(x64): No architecture specific code. Move to shared location. ASSERT(in_spilled_code()); set_in_spilled_code(false); - Load(expression, typeof_state); + Load(expression); frame_->SpillAll(); set_in_spilled_code(true); } -void CodeGenerator::Load(Expression* x, TypeofState typeof_state) { +void CodeGenerator::Load(Expression* expr) { #ifdef DEBUG int original_height = frame_->height(); #endif @@ -4141,7 +4114,7 @@ void CodeGenerator::Load(Expression* x, TypeofState typeof_state) { JumpTarget true_target; JumpTarget false_target; ControlDestination dest(&true_target, &false_target, true); - LoadCondition(x, typeof_state, &dest, false); + LoadCondition(expr, &dest, false); if (dest.false_was_fall_through()) { // The false target was just bound. @@ -4201,13 +4174,12 @@ void CodeGenerator::Load(Expression* x, TypeofState typeof_state) { // partially compiled) into control flow to the control destination. // If force_control is true, control flow is forced. void CodeGenerator::LoadCondition(Expression* x, - TypeofState typeof_state, ControlDestination* dest, bool force_control) { ASSERT(!in_spilled_code()); int original_height = frame_->height(); - { CodeGenState new_state(this, typeof_state, dest); + { CodeGenState new_state(this, dest); Visit(x); // If we hit a stack overflow, we may not have actually visited @@ -4835,23 +4807,25 @@ Result CodeGenerator::StoreArgumentsObject(bool initial) { } -// TODO(1241834): Get rid of this function in favor of just using Load, now -// that we have the INSIDE_TYPEOF typeof state. => Need to handle global -// variables w/o reference errors elsewhere. -void CodeGenerator::LoadTypeofExpression(Expression* x) { - Variable* variable = x->AsVariableProxy()->AsVariable(); +void CodeGenerator::LoadTypeofExpression(Expression* expr) { + // Special handling of identifiers as subexpressions of typeof. + Variable* variable = expr->AsVariableProxy()->AsVariable(); if (variable != NULL && !variable->is_this() && variable->is_global()) { - // NOTE: This is somewhat nasty. We force the compiler to load - // the variable as if through '<global>.<variable>' to make sure we - // do not get reference errors. + // For a global variable we build the property reference + // <global>.<variable> and perform a (regular non-contextual) property + // load to make sure we do not get reference errors. Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX); Literal key(variable->name()); - // TODO(1241834): Fetch the position from the variable instead of using - // no position. Property property(&global, &key, RelocInfo::kNoPosition); - Load(&property); + Reference ref(this, &property); + ref.GetValue(); + } else if (variable != NULL && variable->slot() != NULL) { + // For a variable that rewrites to a slot, we signal it is the immediate + // subexpression of a typeof. + LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF); } else { - Load(x, INSIDE_TYPEOF); + // Anything else can be handled normally. + Load(expr); } } @@ -5746,7 +5720,7 @@ Handle<String> Reference::GetName() { } -void Reference::GetValue(TypeofState typeof_state) { +void Reference::GetValue() { ASSERT(!cgen_->in_spilled_code()); ASSERT(cgen_->HasValidEntryRegisters()); ASSERT(!is_illegal()); @@ -5763,17 +5737,11 @@ void Reference::GetValue(TypeofState typeof_state) { Comment cmnt(masm, "[ Load from Slot"); Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot(); ASSERT(slot != NULL); - cgen_->LoadFromSlotCheckForArguments(slot, typeof_state); + cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF); break; } case NAMED: { - // TODO(1241834): Make sure that it is safe to ignore the - // distinction between expressions in a typeof and not in a - // typeof. If there is a chance that reference errors can be - // thrown below, we must distinguish between the two kinds of - // loads (typeof expression loads must not throw a reference - // error). Variable* var = expression_->AsVariableProxy()->AsVariable(); bool is_global = var != NULL; ASSERT(!is_global || var->is_global()); @@ -5855,8 +5823,6 @@ void Reference::GetValue(TypeofState typeof_state) { } case KEYED: { - // TODO(1241834): Make sure that this it is safe to ignore the - // distinction between expressions in a typeof and not in a typeof. Comment cmnt(masm, "[ Load from keyed Property"); Variable* var = expression_->AsVariableProxy()->AsVariable(); bool is_global = var != NULL; @@ -5978,7 +5944,7 @@ void Reference::GetValue(TypeofState typeof_state) { } -void Reference::TakeValue(TypeofState typeof_state) { +void Reference::TakeValue() { // TODO(X64): This function is completely architecture independent. Move // it somewhere shared. @@ -5987,7 +5953,7 @@ void Reference::TakeValue(TypeofState typeof_state) { ASSERT(!cgen_->in_spilled_code()); ASSERT(!is_illegal()); if (type_ != SLOT) { - GetValue(typeof_state); + GetValue(); return; } @@ -5997,7 +5963,7 @@ void Reference::TakeValue(TypeofState typeof_state) { slot->type() == Slot::CONTEXT || slot->var()->mode() == Variable::CONST || slot->is_arguments()) { - GetValue(typeof_state); + GetValue(); return; } @@ -6601,11 +6567,11 @@ void InstanceofStub::Generate(MacroAssembler* masm) { __ jmp(&loop); __ bind(&is_instance); - __ xor_(rax, rax); + __ xorl(rax, rax); __ ret(2 * kPointerSize); __ bind(&is_not_instance); - __ Move(rax, Smi::FromInt(1)); + __ movl(rax, Immediate(1)); __ ret(2 * kPointerSize); // Slow-case: Go through the JavaScript implementation. @@ -6771,7 +6737,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, Label* throw_normal_exception, Label* throw_termination_exception, Label* throw_out_of_memory_exception, - StackFrame::Type frame_type, + ExitFrame::Mode mode, bool do_gc, bool always_allocate_scope) { // rax: result parameter for PerformGC, if any. @@ -6854,7 +6820,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, __ j(zero, &failure_returned); // Exit the JavaScript to C++ exit frame. - __ LeaveExitFrame(frame_type, result_size_); + __ LeaveExitFrame(mode, result_size_); __ ret(0); // Handling of failure. @@ -6984,12 +6950,12 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) { // this by performing a garbage collection and retrying the // builtin once. - StackFrame::Type frame_type = is_debug_break ? - StackFrame::EXIT_DEBUG : - StackFrame::EXIT; + ExitFrame::Mode mode = is_debug_break ? + ExitFrame::MODE_DEBUG : + ExitFrame::MODE_NORMAL; // Enter the exit frame that transitions from JavaScript to C++. - __ EnterExitFrame(frame_type, result_size_); + __ EnterExitFrame(mode, result_size_); // rax: Holds the context at this point, but should not be used. // On entry to code generated by GenerateCore, it must hold @@ -7012,7 +6978,7 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) { &throw_normal_exception, &throw_termination_exception, &throw_out_of_memory_exception, - frame_type, + mode, false, false); @@ -7021,7 +6987,7 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) { &throw_normal_exception, &throw_termination_exception, &throw_out_of_memory_exception, - frame_type, + mode, true, false); @@ -7032,7 +6998,7 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) { &throw_normal_exception, &throw_termination_exception, &throw_out_of_memory_exception, - frame_type, + mode, true, true); @@ -7047,6 +7013,11 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) { } +void ApiGetterEntryStub::Generate(MacroAssembler* masm) { + UNREACHABLE(); +} + + void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { Label invoke, exit; #ifdef ENABLE_LOGGING_AND_PROFILING @@ -7604,7 +7575,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { if (use_sse3_) { // Truncate the operands to 32-bit integers and check for // exceptions in doing so. - CpuFeatures::Scope scope(CpuFeatures::SSE3); + CpuFeatures::Scope scope(SSE3); __ fisttp_s(Operand(rsp, 0 * kPointerSize)); __ fisttp_s(Operand(rsp, 1 * kPointerSize)); __ fnstsw_ax(); @@ -7633,9 +7604,9 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) { case Token::BIT_OR: __ orl(rax, rcx); break; case Token::BIT_AND: __ andl(rax, rcx); break; case Token::BIT_XOR: __ xorl(rax, rcx); break; - case Token::SAR: __ sarl(rax); break; - case Token::SHL: __ shll(rax); break; - case Token::SHR: __ shrl(rax); break; + case Token::SAR: __ sarl_cl(rax); break; + case Token::SHL: __ shll_cl(rax); break; + case Token::SHR: __ shrl_cl(rax); break; default: UNREACHABLE(); } if (op_ == Token::SHR) { @@ -7797,7 +7768,7 @@ ModuloFunction CreateModuloFunction() { &actual_size, true)); CHECK(buffer); - Assembler masm(buffer, actual_size); + Assembler masm(buffer, static_cast<int>(actual_size)); // Generated code is put into a fixed, unmovable, buffer, and not into // the V8 heap. We can't, and don't, refer to any relocatable addresses // (e.g. the JavaScript nan-object). diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h index 0721d5228f..0301daf3d7 100644 --- a/deps/v8/src/x64/codegen-x64.h +++ b/deps/v8/src/x64/codegen-x64.h @@ -77,12 +77,12 @@ class Reference BASE_EMBEDDED { // Generate code to push the value of the reference on top of the // expression stack. The reference is expected to be already on top of // the expression stack, and it is left in place with its value above it. - void GetValue(TypeofState typeof_state); + void GetValue(); // Like GetValue except that the slot is expected to be written to before // being read from again. Thae value of the reference may be invalidated, // causing subsequent attempts to read it to fail. - void TakeValue(TypeofState typeof_state); + void TakeValue(); // Generate code to store the value on top of the expression stack in the // reference. The reference is expected to be immediately below the value @@ -241,28 +241,20 @@ class CodeGenState BASE_EMBEDDED { explicit CodeGenState(CodeGenerator* owner); // Create a code generator state based on a code generator's current - // state. The new state may or may not be inside a typeof, and has its - // own control destination. - CodeGenState(CodeGenerator* owner, - TypeofState typeof_state, - ControlDestination* destination); + // state. The new state has its own control destination. + CodeGenState(CodeGenerator* owner, ControlDestination* destination); // Destroy a code generator state and restore the owning code generator's // previous state. ~CodeGenState(); // Accessors for the state. - TypeofState typeof_state() const { return typeof_state_; } ControlDestination* destination() const { return destination_; } private: // The owning code generator. CodeGenerator* owner_; - // A flag indicating whether we are compiling the immediate subexpression - // of a typeof expression. - TypeofState typeof_state_; - // A control destination in case the expression has a control-flow // effect. ControlDestination* destination_; @@ -307,17 +299,12 @@ class CodeGenerator: public AstVisitor { static bool ShouldGenerateLog(Expression* type); #endif - static void SetFunctionInfo(Handle<JSFunction> fun, - FunctionLiteral* lit, - bool is_toplevel, - Handle<Script> script); - static void RecordPositions(MacroAssembler* masm, int pos); // Accessors MacroAssembler* masm() { return masm_; } - VirtualFrame* frame() const { return frame_; } + Handle<Script> script() { return script_; } bool has_valid_frame() const { return frame_ != NULL; } @@ -353,7 +340,6 @@ class CodeGenerator: public AstVisitor { bool is_eval() { return is_eval_; } // State - TypeofState typeof_state() const { return state_->typeof_state(); } ControlDestination* destination() const { return state_->destination(); } // Track loop nesting level. @@ -414,18 +400,16 @@ class CodeGenerator: public AstVisitor { } void LoadCondition(Expression* x, - TypeofState typeof_state, ControlDestination* destination, bool force_control); - void Load(Expression* x, TypeofState typeof_state = NOT_INSIDE_TYPEOF); + void Load(Expression* expr); void LoadGlobal(); void LoadGlobalReceiver(); // Generate code to push the value of an expression on top of the frame // and then spill the frame fully to memory. This function is used // temporarily while the code generator is being transformed. - void LoadAndSpill(Expression* expression, - TypeofState typeof_state = NOT_INSIDE_TYPEOF); + void LoadAndSpill(Expression* expression); // Read a value from a slot and leave it on top of the expression stack. void LoadFromSlot(Slot* slot, TypeofState typeof_state); @@ -511,8 +495,6 @@ class CodeGenerator: public AstVisitor { static bool PatchInlineRuntimeEntry(Handle<String> name, const InlineRuntimeLUT& new_entry, InlineRuntimeLUT* old_entry); - static Handle<Code> ComputeLazyCompile(int argc); - Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node); void ProcessDeclarations(ZoneList<Declaration*>* declarations); static Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop); @@ -574,6 +556,7 @@ class CodeGenerator: public AstVisitor { void CodeForFunctionPosition(FunctionLiteral* fun); void CodeForReturnPosition(FunctionLiteral* fun); void CodeForStatementPosition(Statement* node); + void CodeForDoWhileConditionPosition(DoWhileStatement* stmt); void CodeForSourcePosition(int pos); #ifdef DEBUG @@ -633,6 +616,25 @@ class CodeGenerator: public AstVisitor { // times by generated code to perform common tasks, often the slow // case of a JavaScript operation. They are all subclasses of CodeStub, // which is declared in code-stubs.h. +class CallFunctionStub: public CodeStub { + public: + CallFunctionStub(int argc, InLoopFlag in_loop) + : argc_(argc), in_loop_(in_loop) { } + + void Generate(MacroAssembler* masm); + + private: + int argc_; + InLoopFlag in_loop_; + +#ifdef DEBUG + void Print() { PrintF("CallFunctionStub (args %d)\n", argc_); } +#endif + + Major MajorKey() { return CallFunction; } + int MinorKey() { return argc_; } + InLoopFlag InLoop() { return in_loop_; } +}; class ToBooleanStub: public CodeStub { @@ -664,7 +666,7 @@ class GenericBinaryOpStub: public CodeStub { flags_(flags), args_in_registers_(false), args_reversed_(false) { - use_sse3_ = CpuFeatures::IsSupported(CpuFeatures::SSE3); + use_sse3_ = CpuFeatures::IsSupported(SSE3); ASSERT(OpBits::is_valid(Token::NUM_TOKENS)); } diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc index 9fd581df39..0b43e766e6 100644 --- a/deps/v8/src/x64/disasm-x64.cc +++ b/deps/v8/src/x64/disasm-x64.cc @@ -1069,7 +1069,7 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) { } else { UnimplementedInstruction(); } - return current - data; + return static_cast<int>(current - data); } @@ -1474,7 +1474,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer, tmp_buffer_[tmp_buffer_pos_] = '\0'; } - int instr_len = data - instr; + int instr_len = static_cast<int>(data - instr); ASSERT(instr_len > 0); // Ensure progress. int outp = 0; @@ -1586,7 +1586,7 @@ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) { for (byte* bp = prev_pc; bp < pc; bp++) { fprintf(f, "%02x", *bp); } - for (int i = 6 - (pc - prev_pc); i >= 0; i--) { + for (int i = 6 - static_cast<int>(pc - prev_pc); i >= 0; i--) { fprintf(f, " "); } fprintf(f, " %s\n", buffer.start()); diff --git a/deps/v8/src/x64/fast-codegen-x64.cc b/deps/v8/src/x64/fast-codegen-x64.cc index b938119cd3..bb85ef5d69 100644 --- a/deps/v8/src/x64/fast-codegen-x64.cc +++ b/deps/v8/src/x64/fast-codegen-x64.cc @@ -28,6 +28,7 @@ #include "v8.h" #include "codegen-inl.h" +#include "compiler.h" #include "debug.h" #include "fast-codegen.h" #include "parser.h" @@ -61,9 +62,65 @@ void FastCodeGenerator::Generate(FunctionLiteral* fun) { { Comment cmnt(masm_, "[ Allocate locals"); int locals_count = fun->scope()->num_stack_slots(); - for (int i = 0; i < locals_count; i++) { - __ PushRoot(Heap::kUndefinedValueRootIndex); + if (locals_count <= 1) { + if (locals_count > 0) { + __ PushRoot(Heap::kUndefinedValueRootIndex); + } + } else { + __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex); + for (int i = 0; i < locals_count; i++) { + __ push(rdx); + } + } + } + + bool function_in_register = true; + + Variable* arguments = fun->scope()->arguments()->AsVariable(); + if (arguments != NULL) { + // Function uses arguments object. + Comment cmnt(masm_, "[ Allocate arguments object"); + __ push(rdi); + // The receiver is just before the parameters on the caller's stack. + __ lea(rdx, Operand(rbp, StandardFrameConstants::kCallerSPOffset + + fun->num_parameters() * kPointerSize)); + __ push(rdx); + __ Push(Smi::FromInt(fun->num_parameters())); + // Arguments to ArgumentsAccessStub: + // function, receiver address, parameter count. + // The stub will rewrite receiver and parameter count if the previous + // stack frame was an arguments adapter frame. + ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT); + __ CallStub(&stub); + // Store new arguments object in both "arguments" and ".arguments" slots. + __ movq(Operand(rbp, SlotOffset(arguments->slot())), rax); + Slot* dot_arguments_slot = + fun->scope()->arguments_shadow()->AsVariable()->slot(); + __ movq(Operand(rbp, SlotOffset(dot_arguments_slot)), rax); + function_in_register = false; + } + + // Possibly allocate a local context. + if (fun->scope()->num_heap_slots() > 0) { + Comment cmnt(masm_, "[ Allocate local context"); + if (function_in_register) { + // Argument to NewContext is the function, still in rdi. + __ push(rdi); + } else { + // Argument to NewContext is the function, no longer in rdi. + __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset)); } + __ CallRuntime(Runtime::kNewContext, 1); + // Context is returned in both rax and rsi. It replaces the context + // passed to us. It's saved in the stack and kept live in rsi. + __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi); +#ifdef DEBUG + // Assert we do not have to copy any parameters into the context. + for (int i = 0, len = fun->scope()->num_parameters(); i < len; i++) { + Slot* slot = fun->scope()->parameter(i)->slot(); + ASSERT(slot != NULL && slot->type() != Slot::CONTEXT); + } +#endif } { Comment cmnt(masm_, "[ Stack check"); @@ -84,25 +141,41 @@ void FastCodeGenerator::Generate(FunctionLiteral* fun) { } { Comment cmnt(masm_, "[ Body"); + ASSERT(loop_depth() == 0); VisitStatements(fun->body()); + ASSERT(loop_depth() == 0); } { Comment cmnt(masm_, "[ return <undefined>;"); - // Emit a 'return undefined' in case control fell off the end of the - // body. + // Emit a 'return undefined' in case control fell off the end of the body. __ LoadRoot(rax, Heap::kUndefinedValueRootIndex); - SetReturnPosition(fun); + EmitReturnSequence(function_->end_position()); + } +} + + +void FastCodeGenerator::EmitReturnSequence(int position) { + Comment cmnt(masm_, "[ Return sequence"); + if (return_label_.is_bound()) { + __ jmp(&return_label_); + } else { + __ bind(&return_label_); if (FLAG_trace) { __ push(rax); __ CallRuntime(Runtime::kTraceExit, 1); } +#ifdef DEBUG + // Add a label for checking the size of the code used for returning. + Label check_exit_codesize; + masm_->bind(&check_exit_codesize); +#endif + CodeGenerator::RecordPositions(masm_, position); __ RecordJSReturn(); - // Do not use the leave instruction here because it is too short to // patch with the code required by the debugger. __ movq(rsp, rbp); __ pop(rbp); - __ ret((fun->scope()->num_parameters() + 1) * kPointerSize); + __ ret((function_->scope()->num_parameters() + 1) * kPointerSize); #ifdef ENABLE_DEBUGGER_SUPPORT // Add padding that will be overwritten by a debugger breakpoint. We // have just generated "movq rsp, rbp; pop rbp; ret k" with length 7 @@ -111,59 +184,229 @@ void FastCodeGenerator::Generate(FunctionLiteral* fun) { for (int i = 0; i < kPadding; ++i) { masm_->int3(); } + // Check that the size of the code used for returning matches what is + // expected by the debugger. + ASSERT_EQ(Debug::kX64JSReturnSequenceLength, + masm_->SizeOfCodeGeneratedSince(&check_exit_codesize)); #endif } } -void FastCodeGenerator::Move(Location destination, Slot* source) { - switch (destination.type()) { - case Location::kUninitialized: +void FastCodeGenerator::Move(Expression::Context context, Register source) { + switch (context) { + case Expression::kUninitialized: UNREACHABLE(); - case Location::kEffect: + case Expression::kEffect: break; - case Location::kValue: - __ push(Operand(rbp, SlotOffset(source))); + case Expression::kValue: + __ push(source); break; + case Expression::kTest: + TestAndBranch(source, true_label_, false_label_); + break; + case Expression::kValueTest: { + Label discard; + __ push(source); + TestAndBranch(source, true_label_, &discard); + __ bind(&discard); + __ addq(rsp, Immediate(kPointerSize)); + __ jmp(false_label_); + break; + } + case Expression::kTestValue: { + Label discard; + __ push(source); + TestAndBranch(source, &discard, false_label_); + __ bind(&discard); + __ addq(rsp, Immediate(kPointerSize)); + __ jmp(true_label_); + break; + } } } -void FastCodeGenerator::Move(Location destination, Literal* expr) { - switch (destination.type()) { - case Location::kUninitialized: +void FastCodeGenerator::Move(Expression::Context context, Slot* source) { + switch (context) { + case Expression::kUninitialized: UNREACHABLE(); - case Location::kEffect: + case Expression::kEffect: break; - case Location::kValue: - __ Push(expr->handle()); + case Expression::kValue: + __ push(Operand(rbp, SlotOffset(source))); + break; + case Expression::kTest: // Fall through. + case Expression::kValueTest: // Fall through. + case Expression::kTestValue: + __ movq(rax, Operand(rbp, SlotOffset(source))); + Move(context, rax); break; } } -void FastCodeGenerator::Move(Slot* destination, Location source) { - switch (source.type()) { - case Location::kUninitialized: // Fall through. - case Location::kEffect: +void FastCodeGenerator::Move(Expression::Context context, Literal* expr) { + switch (context) { + case Expression::kUninitialized: UNREACHABLE(); - case Location::kValue: - __ pop(Operand(rbp, SlotOffset(destination))); + case Expression::kEffect: + break; + case Expression::kValue: + __ Push(expr->handle()); + break; + case Expression::kTest: // Fall through. + case Expression::kValueTest: // Fall through. + case Expression::kTestValue: + __ Move(rax, expr->handle()); + Move(context, rax); break; } } -void FastCodeGenerator::DropAndMove(Location destination, Register source) { - switch (destination.type()) { - case Location::kUninitialized: +void FastCodeGenerator::DropAndMove(Expression::Context context, + Register source) { + switch (context) { + case Expression::kUninitialized: UNREACHABLE(); - case Location::kEffect: + case Expression::kEffect: + __ addq(rsp, Immediate(kPointerSize)); + break; + case Expression::kValue: + __ movq(Operand(rsp, 0), source); + break; + case Expression::kTest: + ASSERT(!source.is(rsp)); + __ addq(rsp, Immediate(kPointerSize)); + TestAndBranch(source, true_label_, false_label_); + break; + case Expression::kValueTest: { + Label discard; + __ movq(Operand(rsp, 0), source); + TestAndBranch(source, true_label_, &discard); + __ bind(&discard); __ addq(rsp, Immediate(kPointerSize)); + __ jmp(false_label_); break; - case Location::kValue: + } + case Expression::kTestValue: { + Label discard; __ movq(Operand(rsp, 0), source); + TestAndBranch(source, &discard, false_label_); + __ bind(&discard); + __ addq(rsp, Immediate(kPointerSize)); + __ jmp(true_label_); + break; + } + } +} + + +void FastCodeGenerator::TestAndBranch(Register source, + Label* true_label, + Label* false_label) { + ASSERT_NE(NULL, true_label); + ASSERT_NE(NULL, false_label); + // Use the shared ToBoolean stub to compile the value in the register into + // control flow to the code generator's true and false labels. Perform + // the fast checks assumed by the stub. + + // The undefined value is false. + __ CompareRoot(source, Heap::kUndefinedValueRootIndex); + __ j(equal, false_label); + __ CompareRoot(source, Heap::kTrueValueRootIndex); // True is true. + __ j(equal, true_label); + __ CompareRoot(source, Heap::kFalseValueRootIndex); // False is false. + __ j(equal, false_label); + ASSERT_EQ(0, kSmiTag); + __ SmiCompare(source, Smi::FromInt(0)); // The smi zero is false. + __ j(equal, false_label); + Condition is_smi = masm_->CheckSmi(source); // All other smis are true. + __ j(is_smi, true_label); + + // Call the stub for all other cases. + __ push(source); + ToBooleanStub stub; + __ CallStub(&stub); + __ testq(rax, rax); // The stub returns nonzero for true. + __ j(not_zero, true_label); + __ jmp(false_label); +} + + +void FastCodeGenerator::VisitDeclaration(Declaration* decl) { + Comment cmnt(masm_, "[ Declaration"); + Variable* var = decl->proxy()->var(); + ASSERT(var != NULL); // Must have been resolved. + Slot* slot = var->slot(); + ASSERT(slot != NULL); // No global declarations here. + + // We have 3 cases for slots: LOOKUP, LOCAL, CONTEXT. + switch (slot->type()) { + case Slot::LOOKUP: { + __ push(rsi); + __ Push(var->name()); + // Declaration nodes are always introduced in one of two modes. + ASSERT(decl->mode() == Variable::VAR || decl->mode() == Variable::CONST); + PropertyAttributes attr = decl->mode() == Variable::VAR ? + NONE : READ_ONLY; + __ Push(Smi::FromInt(attr)); + // Push initial value, if any. + // Note: For variables we must not push an initial value (such as + // 'undefined') because we may have a (legal) redeclaration and we + // must not destroy the current value. + if (decl->mode() == Variable::CONST) { + __ Push(Factory::the_hole_value()); + } else if (decl->fun() != NULL) { + Visit(decl->fun()); + } else { + __ Push(Smi::FromInt(0)); // no initial value! + } + __ CallRuntime(Runtime::kDeclareContextSlot, 4); + break; + } + case Slot::LOCAL: + if (decl->mode() == Variable::CONST) { + __ Move(Operand(rbp, SlotOffset(var->slot())), + Factory::the_hole_value()); + } else if (decl->fun() != NULL) { + Visit(decl->fun()); + __ pop(Operand(rbp, SlotOffset(var->slot()))); + } + break; + case Slot::CONTEXT: + // The variable in the decl always resides in the current context. + ASSERT(function_->scope()->ContextChainLength(slot->var()->scope()) == 0); + if (decl->mode() == Variable::CONST) { + __ Move(rax, Factory::the_hole_value()); + if (FLAG_debug_code) { + // Check if we have the correct context pointer. + __ movq(rbx, CodeGenerator::ContextOperand( + rsi, Context::FCONTEXT_INDEX)); + __ cmpq(rbx, rsi); + __ Check(equal, "Unexpected declaration in current context."); + } + __ movq(CodeGenerator::ContextOperand(rsi, slot->index()), rax); + // No write barrier since the_hole_value is in old space. + ASSERT(Heap::InNewSpace(*Factory::the_hole_value())); + } else if (decl->fun() != NULL) { + Visit(decl->fun()); + __ pop(rax); + if (FLAG_debug_code) { + // Check if we have the correct context pointer. + __ movq(rbx, CodeGenerator::ContextOperand( + rsi, Context::FCONTEXT_INDEX)); + __ cmpq(rbx, rsi); + __ Check(equal, "Unexpected declaration in current context."); + } + __ movq(CodeGenerator::ContextOperand(rsi, slot->index()), rax); + int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize; + __ RecordWrite(rsi, offset, rax, rcx); + } break; + default: + UNREACHABLE(); } } @@ -180,36 +423,15 @@ void FastCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) { void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) { Comment cmnt(masm_, "[ ReturnStatement"); - SetStatementPosition(stmt); Expression* expr = stmt->expression(); - // Complete the statement based on the type of the subexpression. if (expr->AsLiteral() != NULL) { __ Move(rax, expr->AsLiteral()->handle()); } else { Visit(expr); - Move(rax, expr->location()); - } - - if (FLAG_trace) { - __ push(rax); - __ CallRuntime(Runtime::kTraceExit, 1); - } - - __ RecordJSReturn(); - // Do not use the leave instruction here because it is too short to - // patch with the code required by the debugger. - __ movq(rsp, rbp); - __ pop(rbp); - __ ret((function_->scope()->num_parameters() + 1) * kPointerSize); -#ifdef ENABLE_DEBUGGER_SUPPORT - // Add padding that will be overwritten by a debugger breakpoint. We - // have just generated "movq rsp, rbp; pop rbp; ret k" with length 7 - // (3 + 1 + 3). - const int kPadding = Debug::kX64JSReturnSequenceLength - 7; - for (int i = 0; i < kPadding; ++i) { - masm_->int3(); + ASSERT_EQ(Expression::kValue, expr->context()); + __ pop(rax); } -#endif + EmitReturnSequence(stmt->statement_pos()); } @@ -217,7 +439,8 @@ void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) { Comment cmnt(masm_, "[ FunctionLiteral"); // Build the function boilerplate and instantiate it. - Handle<JSFunction> boilerplate = BuildBoilerplate(expr); + Handle<JSFunction> boilerplate = + Compiler::BuildBoilerplate(expr, script_, this); if (HasStackOverflow()) return; ASSERT(boilerplate->IsBoilerplate()); @@ -226,7 +449,7 @@ void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) { __ push(rsi); __ Push(boilerplate); __ CallRuntime(Runtime::kNewClosure, 2); - Move(expr->location(), rax); + Move(expr->context(), rax); } @@ -234,6 +457,7 @@ void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) { Comment cmnt(masm_, "[ VariableProxy"); Expression* rewrite = expr->var()->rewrite(); if (rewrite == NULL) { + ASSERT(expr->var()->is_global()); Comment cmnt(masm_, "Global variable"); // Use inline caching. Variable name is passed in rcx and the global // object on the stack. @@ -241,20 +465,67 @@ void FastCodeGenerator::VisitVariableProxy(VariableProxy* expr) { __ Move(rcx, expr->name()); Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize)); __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT); - // A test rax instruction following the call is used by the IC to // indicate that the inobject property case was inlined. Ensure there // is no test rax instruction here. - DropAndMove(expr->location(), rax); + __ nop(); + + DropAndMove(expr->context(), rax); + } else if (rewrite->AsSlot() != NULL) { + Slot* slot = rewrite->AsSlot(); + switch (slot->type()) { + case Slot::LOCAL: + case Slot::PARAMETER: { + Comment cmnt(masm_, "Stack slot"); + Move(expr->context(), slot); + break; + } + + case Slot::CONTEXT: { + Comment cmnt(masm_, "Context slot"); + int chain_length = + function_->scope()->ContextChainLength(slot->var()->scope()); + if (chain_length > 0) { + // Move up the chain of contexts to the context containing the slot. + __ movq(rax, + Operand(rsi, Context::SlotOffset(Context::CLOSURE_INDEX))); + // Load the function context (which is the incoming, outer context). + __ movq(rax, FieldOperand(rax, JSFunction::kContextOffset)); + for (int i = 1; i < chain_length; i++) { + __ movq(rax, + Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX))); + __ movq(rax, FieldOperand(rax, JSFunction::kContextOffset)); + } + // The context may be an intermediate context, not a function context. + __ movq(rax, + Operand(rax, Context::SlotOffset(Context::FCONTEXT_INDEX))); + } else { // Slot is in the current function context. + // The context may be an intermediate context, not a function context. + __ movq(rax, + Operand(rsi, Context::SlotOffset(Context::FCONTEXT_INDEX))); + } + __ movq(rax, Operand(rax, Context::SlotOffset(slot->index()))); + Move(expr->context(), rax); + break; + } + + case Slot::LOOKUP: + UNREACHABLE(); + break; + } } else { - Comment cmnt(masm_, "Stack slot"); - Move(expr->location(), rewrite->AsSlot()); + // The parameter variable has been rewritten into an explict access to + // the arguments object. + Property* property = rewrite->AsProperty(); + ASSERT_NOT_NULL(property); + ASSERT_EQ(expr->context(), property->context()); + Visit(property); } } void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) { - Comment cmnt(masm_, "[ RegExp Literal"); + Comment cmnt(masm_, "[ RegExpLiteral"); Label done; // Registers will be used as follows: // rdi = JS function. @@ -276,7 +547,7 @@ void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) { __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4); // Label done: __ bind(&done); - Move(expr->location(), rax); + Move(expr->context(), rax); } @@ -329,7 +600,7 @@ void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { case ObjectLiteral::Property::COMPUTED: if (key->handle()->IsSymbol()) { Visit(value); - ASSERT(value->location().is_value()); + ASSERT_EQ(Expression::kValue, value->context()); __ pop(rax); __ Move(rcx, key->handle()); Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); @@ -341,9 +612,9 @@ void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { case ObjectLiteral::Property::PROTOTYPE: __ push(rax); Visit(key); - ASSERT(key->location().is_value()); + ASSERT_EQ(Expression::kValue, key->context()); Visit(value); - ASSERT(value->location().is_value()); + ASSERT_EQ(Expression::kValue, value->context()); __ CallRuntime(Runtime::kSetProperty, 3); __ movq(rax, Operand(rsp, 0)); // Restore result into rax. break; @@ -351,27 +622,49 @@ void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { case ObjectLiteral::Property::GETTER: __ push(rax); Visit(key); - ASSERT(key->location().is_value()); + ASSERT_EQ(Expression::kValue, key->context()); __ Push(property->kind() == ObjectLiteral::Property::SETTER ? Smi::FromInt(1) : Smi::FromInt(0)); Visit(value); - ASSERT(value->location().is_value()); + ASSERT_EQ(Expression::kValue, value->context()); __ CallRuntime(Runtime::kDefineAccessor, 4); __ movq(rax, Operand(rsp, 0)); // Restore result into rax. break; default: UNREACHABLE(); } } - switch (expr->location().type()) { - case Location::kUninitialized: + switch (expr->context()) { + case Expression::kUninitialized: UNREACHABLE(); - case Location::kEffect: + case Expression::kEffect: if (result_saved) __ addq(rsp, Immediate(kPointerSize)); break; - case Location::kValue: + case Expression::kValue: + if (!result_saved) __ push(rax); + break; + case Expression::kTest: + if (result_saved) __ pop(rax); + TestAndBranch(rax, true_label_, false_label_); + break; + case Expression::kValueTest: { + Label discard; + if (!result_saved) __ push(rax); + TestAndBranch(rax, true_label_, &discard); + __ bind(&discard); + __ addq(rsp, Immediate(kPointerSize)); + __ jmp(false_label_); + break; + } + case Expression::kTestValue: { + Label discard; if (!result_saved) __ push(rax); + TestAndBranch(rax, &discard, false_label_); + __ bind(&discard); + __ addq(rsp, Immediate(kPointerSize)); + __ jmp(true_label_); break; + } } } @@ -424,7 +717,7 @@ void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { result_saved = true; } Visit(subexpr); - ASSERT(subexpr->location().is_value()); + ASSERT_EQ(Expression::kValue, subexpr->context()); // Store the subexpression value in the array's elements. __ pop(rax); // Subexpression value. @@ -437,81 +730,218 @@ void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { __ RecordWrite(rbx, offset, rax, rcx); } - switch (expr->location().type()) { - case Location::kUninitialized: + switch (expr->context()) { + case Expression::kUninitialized: UNREACHABLE(); - case Location::kEffect: + case Expression::kEffect: if (result_saved) __ addq(rsp, Immediate(kPointerSize)); break; - case Location::kValue: + case Expression::kValue: + if (!result_saved) __ push(rax); + break; + case Expression::kTest: + if (result_saved) __ pop(rax); + TestAndBranch(rax, true_label_, false_label_); + break; + case Expression::kValueTest: { + Label discard; + if (!result_saved) __ push(rax); + TestAndBranch(rax, true_label_, &discard); + __ bind(&discard); + __ addq(rsp, Immediate(kPointerSize)); + __ jmp(false_label_); + break; + } + case Expression::kTestValue: { + Label discard; if (!result_saved) __ push(rax); + TestAndBranch(rax, &discard, false_label_); + __ bind(&discard); + __ addq(rsp, Immediate(kPointerSize)); + __ jmp(true_label_); break; + } } } -void FastCodeGenerator::VisitAssignment(Assignment* expr) { - Comment cmnt(masm_, "[ Assignment"); - ASSERT(expr->op() == Token::ASSIGN || expr->op() == Token::INIT_VAR); - - // Left-hand side can only be a global or a (parameter or local) slot. +void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) { Variable* var = expr->target()->AsVariableProxy()->AsVariable(); ASSERT(var != NULL); - ASSERT(var->is_global() || var->slot() != NULL); - Expression* rhs = expr->value(); - Location destination = expr->location(); if (var->is_global()) { - // Assignment to a global variable, use inline caching. Right-hand-side - // value is passed in rax, variable name in rcx, and the global object - // on the stack. - - // Code for the right-hand-side expression depends on its type. - if (rhs->AsLiteral() != NULL) { - __ Move(rax, rhs->AsLiteral()->handle()); - } else { - ASSERT(rhs->location().is_value()); - Visit(rhs); - __ pop(rax); - } + // Assignment to a global variable. Use inline caching for the + // assignment. Right-hand-side value is passed in rax, variable name in + // rcx, and the global object on the stack. + __ pop(rax); __ Move(rcx, var->name()); __ push(CodeGenerator::GlobalObject()); Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); __ Call(ic, RelocInfo::CODE_TARGET); // Overwrite the global object on the stack with the result if needed. - DropAndMove(expr->location(), rax); + DropAndMove(expr->context(), rax); + } else { - // Local or parameter assignment. - - // Code for the right-hand-side expression depends on its type. - if (rhs->AsLiteral() != NULL) { - // Two cases: 'temp <- (var = constant)', or 'var = constant' with a - // discarded result. Always perform the assignment. - __ Move(kScratchRegister, rhs->AsLiteral()->handle()); - __ movq(Operand(rbp, SlotOffset(var->slot())), kScratchRegister); - Move(expr->location(), kScratchRegister); - } else { - ASSERT(rhs->location().is_value()); - Visit(rhs); - switch (expr->location().type()) { - case Location::kUninitialized: - UNREACHABLE(); - case Location::kEffect: - // Case 'var = temp'. Discard right-hand-side temporary. - Move(var->slot(), rhs->location()); - break; - case Location::kValue: - // Case 'temp1 <- (var = temp0)'. Preserve right-hand-side - // temporary on the stack. - __ movq(kScratchRegister, Operand(rsp, 0)); - __ movq(Operand(rbp, SlotOffset(var->slot())), kScratchRegister); - break; + Slot* slot = var->slot(); + ASSERT_NOT_NULL(slot); // Variables rewritten as properties not handled. + switch (slot->type()) { + case Slot::LOCAL: + case Slot::PARAMETER: { + switch (expr->context()) { + case Expression::kUninitialized: + UNREACHABLE(); + case Expression::kEffect: + // Perform assignment and discard value. + __ pop(Operand(rbp, SlotOffset(var->slot()))); + break; + case Expression::kValue: + // Perform assignment and preserve value. + __ movq(rax, Operand(rsp, 0)); + __ movq(Operand(rbp, SlotOffset(var->slot())), rax); + break; + case Expression::kTest: + // Perform assignment and test (and discard) value. + __ pop(rax); + __ movq(Operand(rbp, SlotOffset(var->slot())), rax); + TestAndBranch(rax, true_label_, false_label_); + break; + case Expression::kValueTest: { + Label discard; + __ movq(rax, Operand(rsp, 0)); + __ movq(Operand(rbp, SlotOffset(var->slot())), rax); + TestAndBranch(rax, true_label_, &discard); + __ bind(&discard); + __ addq(rsp, Immediate(kPointerSize)); + __ jmp(false_label_); + break; + } + case Expression::kTestValue: { + Label discard; + __ movq(rax, Operand(rsp, 0)); + __ movq(Operand(rbp, SlotOffset(var->slot())), rax); + TestAndBranch(rax, &discard, false_label_); + __ bind(&discard); + __ addq(rsp, Immediate(kPointerSize)); + __ jmp(true_label_); + break; + } + } + break; } + + case Slot::CONTEXT: { + int chain_length = + function_->scope()->ContextChainLength(slot->var()->scope()); + if (chain_length > 0) { + // Move up the context chain to the context containing the slot. + __ movq(rax, + Operand(rsi, Context::SlotOffset(Context::CLOSURE_INDEX))); + // Load the function context (which is the incoming, outer context). + __ movq(rax, FieldOperand(rax, JSFunction::kContextOffset)); + for (int i = 1; i < chain_length; i++) { + __ movq(rax, + Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX))); + __ movq(rax, FieldOperand(rax, JSFunction::kContextOffset)); + } + } else { // Slot is in the current context. Generate optimized code. + __ movq(rax, rsi); // RecordWrite destroys the object register. + } + if (FLAG_debug_code) { + __ cmpq(rax, + Operand(rax, Context::SlotOffset(Context::FCONTEXT_INDEX))); + __ Check(equal, "Context Slot chain length wrong."); + } + __ pop(rcx); + __ movq(Operand(rax, Context::SlotOffset(slot->index())), rcx); + + // RecordWrite may destroy all its register arguments. + if (expr->context() == Expression::kValue) { + __ push(rcx); + } else if (expr->context() != Expression::kEffect) { + __ movq(rdx, rcx); + } + int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize; + __ RecordWrite(rax, offset, rcx, rbx); + if (expr->context() != Expression::kEffect && + expr->context() != Expression::kValue) { + Move(expr->context(), rdx); + } + break; + } + + case Slot::LOOKUP: + UNREACHABLE(); + break; } } } +void FastCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) { + // Assignment to a property, using a named store IC. + Property* prop = expr->target()->AsProperty(); + ASSERT(prop != NULL); + ASSERT(prop->key()->AsLiteral() != NULL); + + // If the assignment starts a block of assignments to the same object, + // change to slow case to avoid the quadratic behavior of repeatedly + // adding fast properties. + if (expr->starts_initialization_block()) { + __ push(Operand(rsp, kPointerSize)); // Receiver is under value. + __ CallRuntime(Runtime::kToSlowProperties, 1); + } + + __ pop(rax); + __ Move(rcx, prop->key()->AsLiteral()->handle()); + Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); + __ Call(ic, RelocInfo::CODE_TARGET); + + // If the assignment ends an initialization block, revert to fast case. + if (expr->ends_initialization_block()) { + __ push(rax); // Result of assignment, saved even if not needed. + __ push(Operand(rsp, kPointerSize)); // Receiver is under value. + __ CallRuntime(Runtime::kToFastProperties, 1); + __ pop(rax); + } + + DropAndMove(expr->context(), rax); +} + + +void FastCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) { + // Assignment to a property, using a keyed store IC. + + // If the assignment starts a block of assignments to the same object, + // change to slow case to avoid the quadratic behavior of repeatedly + // adding fast properties. + if (expr->starts_initialization_block()) { + // Reciever is under the key and value. + __ push(Operand(rsp, 2 * kPointerSize)); + __ CallRuntime(Runtime::kToSlowProperties, 1); + } + + __ pop(rax); + Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); + __ Call(ic, RelocInfo::CODE_TARGET); + // This nop signals to the IC that there is no inlined code at the call + // site for it to patch. + __ nop(); + + // If the assignment ends an initialization block, revert to fast case. + if (expr->ends_initialization_block()) { + __ push(rax); // Result of assignment, saved even if not needed. + // Reciever is under the key and value. + __ push(Operand(rsp, 2 * kPointerSize)); + __ CallRuntime(Runtime::kToFastProperties, 1); + __ pop(rax); + } + + // Receiver and key are still on stack. + __ addq(rsp, Immediate(2 * kPointerSize)); + Move(expr->context(), rax); +} + + void FastCodeGenerator::VisitProperty(Property* expr) { Comment cmnt(masm_, "[ Property"); Expression* key = expr->key(); @@ -523,6 +953,7 @@ void FastCodeGenerator::VisitProperty(Property* expr) { // Evaluate receiver. Visit(expr->obj()); + if (key->AsLiteral() != NULL && key->AsLiteral()->handle()->IsSymbol() && !String::cast(*(key->AsLiteral()->handle()))->AsArrayIndex(&dummy)) { // Do a NAMED property load. @@ -530,7 +961,7 @@ void FastCodeGenerator::VisitProperty(Property* expr) { __ Move(rcx, key->AsLiteral()->handle()); Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize)); __ call(ic, RelocInfo::CODE_TARGET); - // By emitting a nop we make sure that we do not have a "test eax,..." + // By emitting a nop we make sure that we do not have a "test rax,..." // instruction after the call it is treated specially by the LoadIC code. __ nop(); } else { @@ -538,79 +969,154 @@ void FastCodeGenerator::VisitProperty(Property* expr) { Visit(expr->key()); Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); __ call(ic, RelocInfo::CODE_TARGET); - // By emitting a nop we make sure that we do not have a "test ..." + // By emitting a nop we make sure that we do not have a "test rax,..." // instruction after the call it is treated specially by the LoadIC code. __ nop(); // Drop key left on the stack by IC. __ addq(rsp, Immediate(kPointerSize)); } - switch (expr->location().type()) { - case Location::kUninitialized: - UNREACHABLE(); - case Location::kValue: - __ movq(Operand(rsp, 0), rax); - break; - case Location::kEffect: - __ addq(rsp, Immediate(kPointerSize)); - break; - } + DropAndMove(expr->context(), rax); } -void FastCodeGenerator::VisitCall(Call* expr) { - Expression* fun = expr->expression(); +void FastCodeGenerator::EmitCallWithIC(Call* expr, RelocInfo::Mode reloc_info) { + // Code common for calls using the IC. ZoneList<Expression*>* args = expr->arguments(); - Variable* var = fun->AsVariableProxy()->AsVariable(); - ASSERT(var != NULL && !var->is_this() && var->is_global()); - ASSERT(!var->is_possibly_eval()); - - __ Push(var->name()); - // Push global object (receiver). - __ push(CodeGenerator::GlobalObject()); int arg_count = args->length(); for (int i = 0; i < arg_count; i++) { Visit(args->at(i)); - ASSERT(args->at(i)->location().is_value()); + ASSERT_EQ(Expression::kValue, args->at(i)->context()); } - // Record source position for debugger + // Record source position for debugger. SetSourcePosition(expr->position()); // Call the IC initialization code. Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count, NOT_IN_LOOP); - __ call(ic, RelocInfo::CODE_TARGET_CONTEXT); + __ call(ic, reloc_info); // Restore context register. __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); // Discard the function left on TOS. - DropAndMove(expr->location(), rax); + DropAndMove(expr->context(), rax); } -void FastCodeGenerator::VisitCallNew(CallNew* node) { +void FastCodeGenerator::EmitCallWithStub(Call* expr) { + // Code common for calls using the call stub. + ZoneList<Expression*>* args = expr->arguments(); + int arg_count = args->length(); + for (int i = 0; i < arg_count; i++) { + Visit(args->at(i)); + } + // Record source position for debugger. + SetSourcePosition(expr->position()); + CallFunctionStub stub(arg_count, NOT_IN_LOOP); + __ CallStub(&stub); + // Restore context register. + __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); + // Discard the function left on TOS. + DropAndMove(expr->context(), rax); +} + + +void FastCodeGenerator::VisitCall(Call* expr) { + Comment cmnt(masm_, "[ Call"); + Expression* fun = expr->expression(); + Variable* var = fun->AsVariableProxy()->AsVariable(); + + if (var != NULL && var->is_possibly_eval()) { + // Call to the identifier 'eval'. + UNREACHABLE(); + } else if (var != NULL && !var->is_this() && var->is_global()) { + // Call to a global variable. + __ Push(var->name()); + // Push global object as receiver for the call IC lookup. + __ push(CodeGenerator::GlobalObject()); + EmitCallWithIC(expr, RelocInfo::CODE_TARGET_CONTEXT); + } else if (var != NULL && var->slot() != NULL && + var->slot()->type() == Slot::LOOKUP) { + // Call to a lookup slot. + UNREACHABLE(); + } else if (fun->AsProperty() != NULL) { + // Call to an object property. + Property* prop = fun->AsProperty(); + Literal* key = prop->key()->AsLiteral(); + if (key != NULL && key->handle()->IsSymbol()) { + // Call to a named property, use call IC. + __ Push(key->handle()); + Visit(prop->obj()); + EmitCallWithIC(expr, RelocInfo::CODE_TARGET); + } else { + // Call to a keyed property, use keyed load IC followed by function + // call. + Visit(prop->obj()); + Visit(prop->key()); + // Record source code position for IC call. + SetSourcePosition(prop->position()); + Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); + __ call(ic, RelocInfo::CODE_TARGET); + // By emitting a nop we make sure that we do not have a "test eax,..." + // instruction after the call it is treated specially by the LoadIC code. + __ nop(); + // Drop key left on the stack by IC. + __ addq(rsp, Immediate(kPointerSize)); + // Pop receiver. + __ pop(rbx); + // Push result (function). + __ push(rax); + // Push receiver object on stack. + if (prop->is_synthetic()) { + __ push(CodeGenerator::GlobalObject()); + } else { + __ push(rbx); + } + EmitCallWithStub(expr); + } + } else { + // Call to some other expression. If the expression is an anonymous + // function literal not called in a loop, mark it as one that should + // also use the fast code generator. + FunctionLiteral* lit = fun->AsFunctionLiteral(); + if (lit != NULL && + lit->name()->Equals(Heap::empty_string()) && + loop_depth() == 0) { + lit->set_try_fast_codegen(true); + } + Visit(fun); + // Load global receiver object. + __ movq(rbx, CodeGenerator::GlobalObject()); + __ push(FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset)); + // Emit function call. + EmitCallWithStub(expr); + } +} + + +void FastCodeGenerator::VisitCallNew(CallNew* expr) { Comment cmnt(masm_, "[ CallNew"); // According to ECMA-262, section 11.2.2, page 44, the function // expression in new calls must be evaluated before the // arguments. // Push function on the stack. - Visit(node->expression()); - ASSERT(node->expression()->location().is_value()); + Visit(expr->expression()); + ASSERT_EQ(Expression::kValue, expr->expression()->context()); // If location is value, already on the stack, // Push global object (receiver). __ push(CodeGenerator::GlobalObject()); // Push the arguments ("left-to-right") on the stack. - ZoneList<Expression*>* args = node->arguments(); + ZoneList<Expression*>* args = expr->arguments(); int arg_count = args->length(); for (int i = 0; i < arg_count; i++) { Visit(args->at(i)); - ASSERT(args->at(i)->location().is_value()); + ASSERT_EQ(Expression::kValue, args->at(i)->context()); // If location is value, it is already on the stack, // so nothing to do here. } // Call the construct call builtin that handles allocation and // constructor invocation. - SetSourcePosition(node->position()); + SetSourcePosition(expr->position()); // Load function, arg_count into rdi and rax. __ Set(rax, arg_count); @@ -621,7 +1127,7 @@ void FastCodeGenerator::VisitCallNew(CallNew* node) { __ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL); // Replace function on TOS with result in rax, or pop it. - DropAndMove(node->location(), rax); + DropAndMove(expr->context(), rax); } @@ -636,19 +1142,220 @@ void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) { int arg_count = args->length(); for (int i = 0; i < arg_count; i++) { Visit(args->at(i)); - ASSERT(args->at(i)->location().is_value()); + ASSERT_EQ(Expression::kValue, args->at(i)->context()); } __ CallRuntime(function, arg_count); - Move(expr->location(), rax); + Move(expr->context(), rax); +} + +void FastCodeGenerator::VisitCountOperation(CountOperation* expr) { + Comment cmnt(masm_, "[ CountOperation"); + VariableProxy* proxy = expr->expression()->AsVariableProxy(); + ASSERT(proxy->AsVariable() != NULL); + ASSERT(proxy->AsVariable()->is_global()); + + Visit(proxy); + __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION); + + switch (expr->context()) { + case Expression::kUninitialized: + UNREACHABLE(); + case Expression::kValue: // Fall through + case Expression::kTest: // Fall through + case Expression::kTestValue: // Fall through + case Expression::kValueTest: + // Duplicate the result on the stack. + __ push(rax); + break; + case Expression::kEffect: + // Do not save result. + break; + } + // Call runtime for +1/-1. + __ push(rax); + __ Push(Smi::FromInt(1)); + if (expr->op() == Token::INC) { + __ CallRuntime(Runtime::kNumberAdd, 2); + } else { + __ CallRuntime(Runtime::kNumberSub, 2); + } + // Call Store IC. + __ Move(rcx, proxy->AsVariable()->name()); + __ push(CodeGenerator::GlobalObject()); + Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); + __ call(ic, RelocInfo::CODE_TARGET); + // Restore up stack after store IC + __ addq(rsp, Immediate(kPointerSize)); + + switch (expr->context()) { + case Expression::kUninitialized: + UNREACHABLE(); + case Expression::kEffect: // Fall through + case Expression::kValue: + // Do nothing. Result in either on the stack for value context + // or discarded for effect context. + break; + case Expression::kTest: + __ pop(rax); + TestAndBranch(rax, true_label_, false_label_); + break; + case Expression::kValueTest: { + Label discard; + __ movq(rax, Operand(rsp, 0)); + TestAndBranch(rax, true_label_, &discard); + __ bind(&discard); + __ addq(rsp, Immediate(kPointerSize)); + __ jmp(false_label_); + break; + } + case Expression::kTestValue: { + Label discard; + __ movq(rax, Operand(rsp, 0)); + TestAndBranch(rax, &discard, false_label_); + __ bind(&discard); + __ addq(rsp, Immediate(kPointerSize)); + __ jmp(true_label_); + break; + } + } +} + + +void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { + switch (expr->op()) { + case Token::VOID: { + Comment cmnt(masm_, "[ UnaryOperation (VOID)"); + Visit(expr->expression()); + ASSERT_EQ(Expression::kEffect, expr->expression()->context()); + switch (expr->context()) { + case Expression::kUninitialized: + UNREACHABLE(); + break; + case Expression::kEffect: + break; + case Expression::kValue: + __ PushRoot(Heap::kUndefinedValueRootIndex); + break; + case Expression::kTestValue: + // Value is false so it's needed. + __ PushRoot(Heap::kUndefinedValueRootIndex); + // Fall through. + case Expression::kTest: // Fall through. + case Expression::kValueTest: + __ jmp(false_label_); + break; + } + break; + } + + case Token::NOT: { + Comment cmnt(masm_, "[ UnaryOperation (NOT)"); + ASSERT_EQ(Expression::kTest, expr->expression()->context()); + + Label push_true; + Label push_false; + Label done; + Label* saved_true = true_label_; + Label* saved_false = false_label_; + switch (expr->context()) { + case Expression::kUninitialized: + UNREACHABLE(); + break; + + case Expression::kValue: + true_label_ = &push_false; + false_label_ = &push_true; + Visit(expr->expression()); + __ bind(&push_true); + __ PushRoot(Heap::kTrueValueRootIndex); + __ jmp(&done); + __ bind(&push_false); + __ PushRoot(Heap::kFalseValueRootIndex); + __ bind(&done); + break; + + case Expression::kEffect: + true_label_ = &done; + false_label_ = &done; + Visit(expr->expression()); + __ bind(&done); + break; + + case Expression::kTest: + true_label_ = saved_false; + false_label_ = saved_true; + Visit(expr->expression()); + break; + + case Expression::kValueTest: + true_label_ = saved_false; + false_label_ = &push_true; + Visit(expr->expression()); + __ bind(&push_true); + __ PushRoot(Heap::kTrueValueRootIndex); + __ jmp(saved_true); + break; + + case Expression::kTestValue: + true_label_ = &push_false; + false_label_ = saved_true; + Visit(expr->expression()); + __ bind(&push_false); + __ PushRoot(Heap::kFalseValueRootIndex); + __ jmp(saved_false); + break; + } + true_label_ = saved_true; + false_label_ = saved_false; + break; + } + + case Token::TYPEOF: { + Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)"); + ASSERT_EQ(Expression::kValue, expr->expression()->context()); + + VariableProxy* proxy = expr->expression()->AsVariableProxy(); + if (proxy != NULL && + !proxy->var()->is_this() && + proxy->var()->is_global()) { + Comment cmnt(masm_, "Global variable"); + __ push(CodeGenerator::GlobalObject()); + __ Move(rcx, proxy->name()); + Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize)); + // Use a regular load, not a contextual load, to avoid a reference + // error. + __ Call(ic, RelocInfo::CODE_TARGET); + __ movq(Operand(rsp, 0), rax); + } else if (proxy != NULL && + proxy->var()->slot() != NULL && + proxy->var()->slot()->type() == Slot::LOOKUP) { + __ push(rsi); + __ Push(proxy->name()); + __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2); + __ push(rax); + } else { + // This expression cannot throw a reference error at the top level. + Visit(expr->expression()); + } + + __ CallRuntime(Runtime::kTypeof, 1); + Move(expr->context(), rax); + break; + } + + default: + UNREACHABLE(); + } } void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) { + Comment cmnt(masm_, "[ BinaryOperation"); switch (expr->op()) { case Token::COMMA: - ASSERT(expr->left()->location().is_effect()); - ASSERT_EQ(expr->right()->location().type(), expr->location().type()); + ASSERT_EQ(Expression::kEffect, expr->left()->context()); + ASSERT_EQ(expr->context(), expr->right()->context()); Visit(expr->left()); Visit(expr->right()); break; @@ -669,8 +1376,8 @@ void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) { case Token::SHL: case Token::SHR: case Token::SAR: { - ASSERT(expr->left()->location().is_value()); - ASSERT(expr->right()->location().is_value()); + ASSERT_EQ(Expression::kValue, expr->left()->context()); + ASSERT_EQ(Expression::kValue, expr->right()->context()); Visit(expr->left()); Visit(expr->right()); @@ -678,7 +1385,7 @@ void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) { NO_OVERWRITE, NO_GENERIC_BINARY_FLAGS); __ CallStub(&stub); - Move(expr->location(), rax); + Move(expr->context(), rax); break; } @@ -688,93 +1395,163 @@ void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) { } -void FastCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) { - // Compile a short-circuited boolean operation in a non-test context. +void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) { + Comment cmnt(masm_, "[ CompareOperation"); + ASSERT_EQ(Expression::kValue, expr->left()->context()); + ASSERT_EQ(Expression::kValue, expr->right()->context()); + Visit(expr->left()); + Visit(expr->right()); - // Compile (e0 || e1) as if it were - // (let (temp = e0) temp ? temp : e1). - // Compile (e0 && e1) as if it were - // (let (temp = e0) !temp ? temp : e1). + // Convert current context to test context: Pre-test code. + Label push_true; + Label push_false; + Label done; + Label* saved_true = true_label_; + Label* saved_false = false_label_; + switch (expr->context()) { + case Expression::kUninitialized: + UNREACHABLE(); + break; - Label eval_right, done; - Label *left_true, *left_false; // Where to branch to if lhs has that value. - if (expr->op() == Token::OR) { - left_true = &done; - left_false = &eval_right; - } else { - left_true = &eval_right; - left_false = &done; - } - Location destination = expr->location(); - Expression* left = expr->left(); - Expression* right = expr->right(); - - // Use the shared ToBoolean stub to find the boolean value of the - // left-hand subexpression. Load the value into rax to perform some - // inlined checks assumed by the stub. - - // Compile the left-hand value into rax. Put it on the stack if we may - // need it as the value of the whole expression. - if (left->AsLiteral() != NULL) { - __ Move(rax, left->AsLiteral()->handle()); - if (destination.is_value()) __ push(rax); - } else { - Visit(left); - ASSERT(left->location().is_value()); - switch (destination.type()) { - case Location::kUninitialized: - UNREACHABLE(); - case Location::kEffect: - // Pop the left-hand value into rax because we will not need it as the - // final result. - __ pop(rax); - break; - case Location::kValue: - // Copy the left-hand value into rax because we may need it as the - // final result. - __ movq(rax, Operand(rsp, 0)); - break; - } - } - // The left-hand value is in rax. It is also on the stack iff the - // destination location is value. + case Expression::kValue: + true_label_ = &push_true; + false_label_ = &push_false; + break; - // Perform fast checks assumed by the stub. - // The undefined value is false. - __ CompareRoot(rax, Heap::kUndefinedValueRootIndex); - __ j(equal, left_false); - __ CompareRoot(rax, Heap::kTrueValueRootIndex); // True is true. - __ j(equal, left_true); - __ CompareRoot(rax, Heap::kFalseValueRootIndex); // False is false. - __ j(equal, left_false); - ASSERT(kSmiTag == 0); - __ SmiCompare(rax, Smi::FromInt(0)); // The smi zero is false. - __ j(equal, left_false); - Condition is_smi = masm_->CheckSmi(rax); // All other smis are true. - __ j(is_smi, left_true); + case Expression::kEffect: + true_label_ = &done; + false_label_ = &done; + break; - // Call the stub for all other cases. - __ push(rax); - ToBooleanStub stub; - __ CallStub(&stub); - __ testq(rax, rax); // The stub returns nonzero for true. - if (expr->op() == Token::OR) { - __ j(not_zero, &done); - } else { - __ j(zero, &done); + case Expression::kTest: + break; + + case Expression::kValueTest: + true_label_ = &push_true; + break; + + case Expression::kTestValue: + false_label_ = &push_false; + break; } + // Convert current context to test context: End pre-test code. - __ bind(&eval_right); - // Discard the left-hand value if present on the stack. - if (destination.is_value()) { - __ addq(rsp, Immediate(kPointerSize)); + switch (expr->op()) { + case Token::IN: { + __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION); + __ CompareRoot(rax, Heap::kTrueValueRootIndex); + __ j(equal, true_label_); + __ jmp(false_label_); + break; + } + + case Token::INSTANCEOF: { + InstanceofStub stub; + __ CallStub(&stub); + __ testq(rax, rax); + __ j(zero, true_label_); // The stub returns 0 for true. + __ jmp(false_label_); + break; + } + + default: { + Condition cc = no_condition; + bool strict = false; + switch (expr->op()) { + case Token::EQ_STRICT: + strict = true; + // Fall through + case Token::EQ: + cc = equal; + __ pop(rax); + __ pop(rdx); + break; + case Token::LT: + cc = less; + __ pop(rax); + __ pop(rdx); + break; + case Token::GT: + // Reverse left and right sizes to obtain ECMA-262 conversion order. + cc = less; + __ pop(rdx); + __ pop(rax); + break; + case Token::LTE: + // Reverse left and right sizes to obtain ECMA-262 conversion order. + cc = greater_equal; + __ pop(rdx); + __ pop(rax); + break; + case Token::GTE: + cc = greater_equal; + __ pop(rax); + __ pop(rdx); + break; + case Token::IN: + case Token::INSTANCEOF: + default: + UNREACHABLE(); + } + + // The comparison stub expects the smi vs. smi case to be handled + // before it is called. + Label slow_case; + __ JumpIfNotBothSmi(rax, rdx, &slow_case); + __ SmiCompare(rdx, rax); + __ j(cc, true_label_); + __ jmp(false_label_); + + __ bind(&slow_case); + CompareStub stub(cc, strict); + __ CallStub(&stub); + __ testq(rax, rax); + __ j(cc, true_label_); + __ jmp(false_label_); + } } - // Save or discard the right-hand value as needed. - Visit(right); - ASSERT_EQ(destination.type(), right->location().type()); - __ bind(&done); + // Convert current context to test context: Post-test code. + switch (expr->context()) { + case Expression::kUninitialized: + UNREACHABLE(); + break; + + case Expression::kValue: + __ bind(&push_true); + __ PushRoot(Heap::kTrueValueRootIndex); + __ jmp(&done); + __ bind(&push_false); + __ PushRoot(Heap::kFalseValueRootIndex); + __ bind(&done); + break; + + case Expression::kEffect: + __ bind(&done); + break; + + case Expression::kTest: + break; + + case Expression::kValueTest: + __ bind(&push_true); + __ PushRoot(Heap::kTrueValueRootIndex); + __ jmp(saved_true); + break; + + case Expression::kTestValue: + __ bind(&push_false); + __ PushRoot(Heap::kFalseValueRootIndex); + __ jmp(saved_false); + break; + } + true_label_ = saved_true; + false_label_ = saved_false; + // Convert current context to test context: End post-test code. } +#undef __ + + } } // namespace v8::internal diff --git a/deps/v8/src/x64/frames-x64.cc b/deps/v8/src/x64/frames-x64.cc index fe224ad998..6a0527cf6d 100644 --- a/deps/v8/src/x64/frames-x64.cc +++ b/deps/v8/src/x64/frames-x64.cc @@ -57,11 +57,7 @@ StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) { state->sp = sp; state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize); // Determine frame type. - if (Memory::Address_at(fp + ExitFrameConstants::kDebugMarkOffset) != 0) { - return EXIT_DEBUG; - } else { - return EXIT; - } + return EXIT; } int JavaScriptFrame::GetProvidedParametersCount() const { @@ -69,10 +65,10 @@ int JavaScriptFrame::GetProvidedParametersCount() const { } -void ExitFrame::Iterate(ObjectVisitor* a) const { - // Exit frames on X64 do not contain any pointers. The arguments - // are traversed as part of the expression stack of the calling - // frame. +void ExitFrame::Iterate(ObjectVisitor* v) const { + v->VisitPointer(&code_slot()); + // The arguments are traversed as part of the expression stack of + // the calling frame. } byte* InternalFrame::GetCallerStackPointer() const { diff --git a/deps/v8/src/x64/frames-x64.h b/deps/v8/src/x64/frames-x64.h index eefaa0aeb5..a92b248d88 100644 --- a/deps/v8/src/x64/frames-x64.h +++ b/deps/v8/src/x64/frames-x64.h @@ -63,7 +63,7 @@ class EntryFrameConstants : public AllStatic { class ExitFrameConstants : public AllStatic { public: - static const int kDebugMarkOffset = -2 * kPointerSize; + static const int kCodeOffset = -2 * kPointerSize; static const int kSPOffset = -1 * kPointerSize; static const int kCallerFPOffset = +0 * kPointerSize; diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc index 2812df1561..1642a0490e 100644 --- a/deps/v8/src/x64/ic-x64.cc +++ b/deps/v8/src/x64/ic-x64.cc @@ -313,7 +313,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ cmpl(rax, FieldOperand(rcx, PixelArray::kLengthOffset)); __ j(above_equal, &slow); __ movq(rcx, FieldOperand(rcx, PixelArray::kExternalPointerOffset)); - __ movb(rax, Operand(rcx, rax, times_1, 0)); + __ movzxbq(rax, Operand(rcx, rax, times_1, 0)); __ Integer32ToSmi(rax, rax); __ ret(0); @@ -790,6 +790,8 @@ void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm, // top of FPU stack: value if (array_type == kExternalFloatArray) { __ fstp_s(Operand(rcx, rbx, times_4, 0)); + __ movq(rax, rdx); // Return the original value. + __ ret(0); } else { // Need to perform float-to-int conversion. // Test the top of the FP stack for NaN. diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc index b2f69bb7a9..9dea616718 100644 --- a/deps/v8/src/x64/macro-assembler-x64.cc +++ b/deps/v8/src/x64/macro-assembler-x64.cc @@ -286,7 +286,7 @@ void MacroAssembler::Abort(const char* msg) { movq(kScratchRegister, p0, RelocInfo::NONE); push(kScratchRegister); movq(kScratchRegister, - reinterpret_cast<intptr_t>(Smi::FromInt(p1 - p0)), + reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))), RelocInfo::NONE); push(kScratchRegister); CallRuntime(Runtime::kAbort, 2); @@ -402,9 +402,9 @@ void MacroAssembler::Set(Register dst, int64_t x) { if (x == 0) { xor_(dst, dst); } else if (is_int32(x)) { - movq(dst, Immediate(x)); + movq(dst, Immediate(static_cast<int32_t>(x))); } else if (is_uint32(x)) { - movl(dst, Immediate(x)); + movl(dst, Immediate(static_cast<uint32_t>(x))); } else { movq(dst, x, RelocInfo::NONE); } @@ -416,9 +416,9 @@ void MacroAssembler::Set(const Operand& dst, int64_t x) { xor_(kScratchRegister, kScratchRegister); movq(dst, kScratchRegister); } else if (is_int32(x)) { - movq(dst, Immediate(x)); + movq(dst, Immediate(static_cast<int32_t>(x))); } else if (is_uint32(x)) { - movl(dst, Immediate(x)); + movl(dst, Immediate(static_cast<uint32_t>(x))); } else { movq(kScratchRegister, x, RelocInfo::NONE); movq(dst, kScratchRegister); @@ -1078,7 +1078,7 @@ void MacroAssembler::SmiShiftLeft(Register dst, SmiToInteger32(rcx, src2); // Shift amount specified by lower 5 bits, not six as the shl opcode. and_(rcx, Immediate(0x1f)); - shl(dst); + shl_cl(dst); } @@ -1099,7 +1099,7 @@ void MacroAssembler::SmiShiftLogicalRight(Register dst, } SmiToInteger32(rcx, src2); orl(rcx, Immediate(kSmiShift)); - shr(dst); // Shift is rcx modulo 0x1f + 32. + shr_cl(dst); // Shift is rcx modulo 0x1f + 32. shl(dst, Immediate(kSmiShift)); testq(dst, dst); if (src1.is(rcx) || src2.is(rcx)) { @@ -1135,7 +1135,7 @@ void MacroAssembler::SmiShiftArithmeticRight(Register dst, } SmiToInteger32(rcx, src2); orl(rcx, Immediate(kSmiShift)); - sar(dst); // Shift 32 + original rcx & 0x1f. + sar_cl(dst); // Shift 32 + original rcx & 0x1f. shl(dst, Immediate(kSmiShift)); if (src1.is(rcx)) { movq(src1, kScratchRegister); @@ -1787,9 +1787,7 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) { } -void MacroAssembler::EnterExitFrame(StackFrame::Type type, int result_size) { - ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG); - +void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode, int result_size) { // Setup the frame structure on the stack. // All constants are relative to the frame pointer of the exit frame. ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize); @@ -1801,7 +1799,12 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type, int result_size) { // Reserve room for entry stack pointer and push the debug marker. ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize); push(Immediate(0)); // saved entry sp, patched before call - push(Immediate(type == StackFrame::EXIT_DEBUG ? 1 : 0)); + if (mode == ExitFrame::MODE_DEBUG) { + push(Immediate(0)); + } else { + movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT); + push(kScratchRegister); + } // Save the frame pointer and the context in top. ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address); @@ -1821,7 +1824,7 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type, int result_size) { #ifdef ENABLE_DEBUGGER_SUPPORT // Save the state of all registers to the stack from the memory // location. This is needed to allow nested break points. - if (type == StackFrame::EXIT_DEBUG) { + if (mode == ExitFrame::MODE_DEBUG) { // TODO(1243899): This should be symmetric to // CopyRegistersFromStackToMemory() but it isn't! esp is assumed // correct here, but computed for the other call. Very error @@ -1860,17 +1863,17 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type, int result_size) { } -void MacroAssembler::LeaveExitFrame(StackFrame::Type type, int result_size) { +void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode, int result_size) { // Registers: // r15 : argv #ifdef ENABLE_DEBUGGER_SUPPORT // Restore the memory copy of the registers by digging them out from // the stack. This is needed to allow nested break points. - if (type == StackFrame::EXIT_DEBUG) { + if (mode == ExitFrame::MODE_DEBUG) { // It's okay to clobber register rbx below because we don't need // the function pointer after this. const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize; - int kOffset = ExitFrameConstants::kDebugMarkOffset - kCallerSavedSize; + int kOffset = ExitFrameConstants::kCodeOffset - kCallerSavedSize; lea(rbx, Operand(rbp, kOffset)); CopyRegistersFromStackToMemory(rbx, rcx, kJSCallerSaved); } diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h index 4c2f35bd9c..11cdfc3c4c 100644 --- a/deps/v8/src/x64/macro-assembler-x64.h +++ b/deps/v8/src/x64/macro-assembler-x64.h @@ -106,16 +106,16 @@ class MacroAssembler: public Assembler { void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); } void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); } - // Enter specific kind of exit frame; either EXIT or - // EXIT_DEBUG. Expects the number of arguments in register rax and + // Enter specific kind of exit frame; either in normal or + // debug mode. Expects the number of arguments in register rax and // sets up the number of arguments in register rdi and the pointer // to the first argument in register rsi. - void EnterExitFrame(StackFrame::Type type, int result_size = 1); + void EnterExitFrame(ExitFrame::Mode mode, int result_size = 1); // Leave the current exit frame. Expects/provides the return value in // register rax:rdx (untouched) and the pointer to the first // argument in register rsi. - void LeaveExitFrame(StackFrame::Type type, int result_size = 1); + void LeaveExitFrame(ExitFrame::Mode mode, int result_size = 1); // --------------------------------------------------------------------------- diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/x64/regexp-macro-assembler-x64.cc index 88636f843e..639f5e95b6 100644 --- a/deps/v8/src/x64/regexp-macro-assembler-x64.cc +++ b/deps/v8/src/x64/regexp-macro-assembler-x64.cc @@ -643,10 +643,10 @@ Handle<Object> RegExpMacroAssemblerX64::GetCode(Handle<String> source) { Label stack_limit_hit; Label stack_ok; - ExternalReference stack_guard_limit = - ExternalReference::address_of_stack_guard_limit(); + ExternalReference stack_limit = + ExternalReference::address_of_stack_limit(); __ movq(rcx, rsp); - __ movq(kScratchRegister, stack_guard_limit); + __ movq(kScratchRegister, stack_limit); __ subq(rcx, Operand(kScratchRegister, 0)); // Handle it if the stack pointer is already below the stack limit. __ j(below_equal, &stack_limit_hit); @@ -1079,7 +1079,7 @@ int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address, // If there is a difference, update the object pointer and start and end // addresses in the RegExp stack frame to match the new value. const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd); - int byte_length = end_address - start_address; + int byte_length = static_cast<int>(end_address - start_address); frame_entry<const String*>(re_frame, kInputString) = *subject; frame_entry<const byte*>(re_frame, kInputStart) = new_address; frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length; @@ -1196,9 +1196,9 @@ void RegExpMacroAssemblerX64::Drop() { void RegExpMacroAssemblerX64::CheckPreemption() { // Check for preemption. Label no_preempt; - ExternalReference stack_guard_limit = - ExternalReference::address_of_stack_guard_limit(); - __ load_rax(stack_guard_limit); + ExternalReference stack_limit = + ExternalReference::address_of_stack_limit(); + __ load_rax(stack_limit); __ cmpq(rsp, rax); __ j(above, &no_preempt); diff --git a/deps/v8/src/x64/simulator-x64.h b/deps/v8/src/x64/simulator-x64.h index 998c9095e7..c4f3a85af5 100644 --- a/deps/v8/src/x64/simulator-x64.h +++ b/deps/v8/src/x64/simulator-x64.h @@ -44,6 +44,12 @@ class SimulatorStack : public v8::internal::AllStatic { static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) { return c_limit; } + + static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) { + return try_catch_address; + } + + static inline void UnregisterCTryCatch() { } }; // Call the generated regexp code directly. The entry function pointer should @@ -51,4 +57,7 @@ class SimulatorStack : public v8::internal::AllStatic { #define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \ entry(p0, p1, p2, p3, p4, p5, p6) +#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ + reinterpret_cast<TryCatch*>(try_catch_address) + #endif // V8_X64_SIMULATOR_X64_H_ diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc index 58a3e0f6a2..584fd2b214 100644 --- a/deps/v8/src/x64/stub-cache-x64.cc +++ b/deps/v8/src/x64/stub-cache-x64.cc @@ -327,7 +327,7 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm, __ movl(rax, FieldOperand(receiver, String::kLengthOffset)); // rcx is also the receiver. __ lea(rcx, Operand(scratch, String::kLongLengthShift)); - __ shr(rax); // rcx is implicit shift register. + __ shr_cl(rax); __ Integer32ToSmi(rax, rax); __ ret(0); diff --git a/deps/v8/src/x64/virtual-frame-x64.cc b/deps/v8/src/x64/virtual-frame-x64.cc index 781efd14b9..fe65d34a08 100644 --- a/deps/v8/src/x64/virtual-frame-x64.cc +++ b/deps/v8/src/x64/virtual-frame-x64.cc @@ -893,16 +893,15 @@ void VirtualFrame::SyncRange(int begin, int end) { // on the stack. int start = Min(begin, stack_pointer_ + 1); - // Emit normal 'push' instructions for elements above stack pointer - // and use mov instructions if we are below stack pointer. + // If positive we have to adjust the stack pointer. + int delta = end - stack_pointer_; + if (delta > 0) { + stack_pointer_ = end; + __ subq(rsp, Immediate(delta * kPointerSize)); + } + for (int i = start; i <= end; i++) { - if (!elements_[i].is_synced()) { - if (i <= stack_pointer_) { - SyncElementBelowStackPointer(i); - } else { - SyncElementByPushing(i); - } - } + if (!elements_[i].is_synced()) SyncElementBelowStackPointer(i); } } diff --git a/deps/v8/src/zone.h b/deps/v8/src/zone.h index 4e4f1d7224..0d006dddf2 100644 --- a/deps/v8/src/zone.h +++ b/deps/v8/src/zone.h @@ -118,7 +118,7 @@ class Zone { class ZoneObject { public: // Allocate a new ZoneObject of 'size' bytes in the Zone. - void* operator new(size_t size) { return Zone::New(size); } + void* operator new(size_t size) { return Zone::New(static_cast<int>(size)); } // Ideally, the delete operator should be private instead of // public, but unfortunately the compiler sometimes synthesizes diff --git a/deps/v8/test/cctest/SConscript b/deps/v8/test/cctest/SConscript index f041041c11..e6c81d80e4 100644 --- a/deps/v8/test/cctest/SConscript +++ b/deps/v8/test/cctest/SConscript @@ -34,6 +34,7 @@ Import('context object_files') SOURCES = { 'all': [ + 'test-accessors.cc', 'test-alloc.cc', 'test-api.cc', 'test-ast.cc', @@ -51,6 +52,7 @@ SOURCES = { 'test-log.cc', 'test-log-utils.cc', 'test-mark-compact.cc', + 'test-parsing.cc', 'test-regexp.cc', 'test-serialize.cc', 'test-sockets.cc', diff --git a/deps/v8/test/cctest/cctest.cc b/deps/v8/test/cctest/cctest.cc index 82a33e6da5..f638ed480f 100644 --- a/deps/v8/test/cctest/cctest.cc +++ b/deps/v8/test/cctest/cctest.cc @@ -121,3 +121,6 @@ int main(int argc, char* argv[]) { v8::V8::Dispose(); return 0; } + +RegisterThreadedTest *RegisterThreadedTest::first_ = NULL; +int RegisterThreadedTest::count_ = 0; diff --git a/deps/v8/test/cctest/cctest.h b/deps/v8/test/cctest/cctest.h index a95645e010..404b692b27 100644 --- a/deps/v8/test/cctest/cctest.h +++ b/deps/v8/test/cctest/cctest.h @@ -28,6 +28,8 @@ #ifndef CCTEST_H_ #define CCTEST_H_ +#include "v8.h" + #ifndef TEST #define TEST(Name) \ static void Test##Name(); \ @@ -72,4 +74,138 @@ class CcTest { CcTest* prev_; }; +// Switches between all the Api tests using the threading support. +// In order to get a surprising but repeatable pattern of thread +// switching it has extra semaphores to control the order in which +// the tests alternate, not relying solely on the big V8 lock. +// +// A test is augmented with calls to ApiTestFuzzer::Fuzz() in its +// callbacks. This will have no effect when we are not running the +// thread fuzzing test. In the thread fuzzing test it will +// pseudorandomly select a successor thread and switch execution +// to that thread, suspending the current test. +class ApiTestFuzzer: public v8::internal::Thread { + public: + void CallTest(); + explicit ApiTestFuzzer(int num) + : test_number_(num), + gate_(v8::internal::OS::CreateSemaphore(0)), + active_(true) { + } + ~ApiTestFuzzer() { delete gate_; } + + // The ApiTestFuzzer is also a Thread, so it has a Run method. + virtual void Run(); + + enum PartOfTest { FIRST_PART, SECOND_PART }; + + static void Setup(PartOfTest part); + static void RunAllTests(); + static void TearDown(); + // This method switches threads if we are running the Threading test. + // Otherwise it does nothing. + static void Fuzz(); + private: + static bool fuzzing_; + static int tests_being_run_; + static int current_; + static int active_tests_; + static bool NextThread(); + int test_number_; + v8::internal::Semaphore* gate_; + bool active_; + void ContextSwitch(); + static int GetNextTestNumber(); + static v8::internal::Semaphore* all_tests_done_; +}; + + +#define THREADED_TEST(Name) \ + static void Test##Name(); \ + RegisterThreadedTest register_##Name(Test##Name, #Name); \ + /* */ TEST(Name) + + +class RegisterThreadedTest { + public: + explicit RegisterThreadedTest(CcTest::TestFunction* callback, + const char* name) + : fuzzer_(NULL), callback_(callback), name_(name) { + prev_ = first_; + first_ = this; + count_++; + } + static int count() { return count_; } + static RegisterThreadedTest* nth(int i) { + CHECK(i < count()); + RegisterThreadedTest* current = first_; + while (i > 0) { + i--; + current = current->prev_; + } + return current; + } + CcTest::TestFunction* callback() { return callback_; } + ApiTestFuzzer* fuzzer_; + const char* name() { return name_; } + + private: + static RegisterThreadedTest* first_; + static int count_; + CcTest::TestFunction* callback_; + RegisterThreadedTest* prev_; + const char* name_; +}; + + +// A LocalContext holds a reference to a v8::Context. +class LocalContext { + public: + LocalContext(v8::ExtensionConfiguration* extensions = 0, + v8::Handle<v8::ObjectTemplate> global_template = + v8::Handle<v8::ObjectTemplate>(), + v8::Handle<v8::Value> global_object = v8::Handle<v8::Value>()) + : context_(v8::Context::New(extensions, global_template, global_object)) { + context_->Enter(); + } + + virtual ~LocalContext() { + context_->Exit(); + context_.Dispose(); + } + + v8::Context* operator->() { return *context_; } + v8::Context* operator*() { return *context_; } + bool IsReady() { return !context_.IsEmpty(); } + + v8::Local<v8::Context> local() { + return v8::Local<v8::Context>::New(context_); + } + + private: + v8::Persistent<v8::Context> context_; +}; + + +static inline v8::Local<v8::Value> v8_num(double x) { + return v8::Number::New(x); +} + + +static inline v8::Local<v8::String> v8_str(const char* x) { + return v8::String::New(x); +} + + +static inline v8::Local<v8::Script> v8_compile(const char* x) { + return v8::Script::Compile(v8_str(x)); +} + + +// Helper function that compiles and runs the source. +static inline v8::Local<v8::Value> CompileRun(const char* source) { + return v8::Script::Compile(v8::String::New(source))->Run(); +} + + #endif // ifndef CCTEST_H_ diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status index 6ce241ff1e..a143cbdab2 100644 --- a/deps/v8/test/cctest/cctest.status +++ b/deps/v8/test/cctest/cctest.status @@ -33,12 +33,6 @@ test-debug/DebuggerAgent: PASS, (PASS || FAIL) if $system == linux # BUG(382): Weird test. Can't guarantee that it never times out. test-api/ApplyInterruption: PASS || TIMEOUT -# This is about to go away anyway since new snapshot code is on the way. -test-serialize/Deserialize: FAIL -test-serialize/DeserializeAndRunScript: FAIL -test-serialize/DeserializeNatives: FAIL -test-serialize/DeserializeExtensions: FAIL - # These tests always fail. They are here to test test.py. If # they don't fail then test.py has failed. test-serialize/TestThatAlwaysFails: FAIL @@ -47,13 +41,6 @@ test-serialize/DependentTestThatAlwaysFails: FAIL [ $arch == arm ] -# New serialization doesn't work on ARM yet. -test-serialize/Deserialize2: SKIP -test-serialize/DeserializeAndRunScript2: SKIP - -# BUG(113): Test seems flaky on ARM. -test-spaces/LargeObjectSpace: PASS || FAIL - # BUG(240): Test seems flaky on ARM. test-api/RegExpInterruption: SKIP @@ -65,11 +52,3 @@ test-api/OutOfMemoryNested: SKIP # BUG(355): Test crashes on ARM. test-log/ProfLazyMode: SKIP - -[ $simulator == arm ] - -# BUG(271): During exception propagation, we compare pointers into the -# stack. These tests fail on the ARM simulator because the C++ and -# the JavaScript stacks are separate. -test-api/ExceptionOrder: FAIL -test-api/TryCatchInTryFinally: FAIL diff --git a/deps/v8/test/cctest/test-accessors.cc b/deps/v8/test/cctest/test-accessors.cc new file mode 100644 index 0000000000..25f5c39518 --- /dev/null +++ b/deps/v8/test/cctest/test-accessors.cc @@ -0,0 +1,450 @@ +// Copyright 2009 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include <stdlib.h> + +#include "v8.h" + +#include "api.h" +#include "cctest.h" +#include "frames-inl.h" +#include "string-stream.h" + +using ::v8::ObjectTemplate; +using ::v8::Value; +using ::v8::Context; +using ::v8::Local; +using ::v8::String; +using ::v8::Script; +using ::v8::Function; +using ::v8::AccessorInfo; +using ::v8::Extension; + +namespace i = ::v8::internal; + +static v8::Handle<Value> handle_property(Local<String> name, + const AccessorInfo&) { + ApiTestFuzzer::Fuzz(); + return v8_num(900); +} + + +THREADED_TEST(PropertyHandler) { + v8::HandleScope scope; + Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New(); + fun_templ->InstanceTemplate()->SetAccessor(v8_str("foo"), handle_property); + LocalContext env; + Local<Function> fun = fun_templ->GetFunction(); + env->Global()->Set(v8_str("Fun"), fun); + Local<Script> getter = v8_compile("var obj = new Fun(); obj.foo;"); + CHECK_EQ(900, getter->Run()->Int32Value()); + Local<Script> setter = v8_compile("obj.foo = 901;"); + CHECK_EQ(901, setter->Run()->Int32Value()); +} + + +static v8::Handle<Value> GetIntValue(Local<String> property, + const AccessorInfo& info) { + ApiTestFuzzer::Fuzz(); + int* value = + static_cast<int*>(v8::Handle<v8::External>::Cast(info.Data())->Value()); + return v8_num(*value); +} + + +static void SetIntValue(Local<String> property, + Local<Value> value, + const AccessorInfo& info) { + int* field = + static_cast<int*>(v8::Handle<v8::External>::Cast(info.Data())->Value()); + *field = value->Int32Value(); +} + +int foo, bar, baz; + +THREADED_TEST(GlobalVariableAccess) { + foo = 0; + bar = -4; + baz = 10; + v8::HandleScope scope; + v8::Handle<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(); + templ->InstanceTemplate()->SetAccessor(v8_str("foo"), + GetIntValue, + SetIntValue, + v8::External::New(&foo)); + templ->InstanceTemplate()->SetAccessor(v8_str("bar"), + GetIntValue, + SetIntValue, + v8::External::New(&bar)); + templ->InstanceTemplate()->SetAccessor(v8_str("baz"), + GetIntValue, + SetIntValue, + v8::External::New(&baz)); + LocalContext env(0, templ->InstanceTemplate()); + v8_compile("foo = (++bar) + baz")->Run(); + CHECK_EQ(bar, -3); + CHECK_EQ(foo, 7); +} + + +static int x_register = 0; +static v8::Handle<v8::Object> x_receiver; +static v8::Handle<v8::Object> x_holder; + + +static v8::Handle<Value> XGetter(Local<String> name, const AccessorInfo& info) { + ApiTestFuzzer::Fuzz(); + CHECK_EQ(x_receiver, info.This()); + CHECK_EQ(x_holder, info.Holder()); + return v8_num(x_register); +} + + +static void XSetter(Local<String> name, + Local<Value> value, + const AccessorInfo& info) { + CHECK_EQ(x_holder, info.This()); + CHECK_EQ(x_holder, info.Holder()); + x_register = value->Int32Value(); +} + + +THREADED_TEST(AccessorIC) { + v8::HandleScope scope; + v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(); + obj->SetAccessor(v8_str("x"), XGetter, XSetter); + LocalContext context; + x_holder = obj->NewInstance(); + context->Global()->Set(v8_str("holder"), x_holder); + x_receiver = v8::Object::New(); + context->Global()->Set(v8_str("obj"), x_receiver); + v8::Handle<v8::Array> array = v8::Handle<v8::Array>::Cast(CompileRun( + "obj.__proto__ = holder;" + "var result = [];" + "for (var i = 0; i < 10; i++) {" + " holder.x = i;" + " result.push(obj.x);" + "}" + "result")); + CHECK_EQ(10, array->Length()); + for (int i = 0; i < 10; i++) { + v8::Handle<Value> entry = array->Get(v8::Integer::New(i)); + CHECK_EQ(v8::Integer::New(i), entry); + } +} + + +static v8::Handle<Value> AccessorProhibitsOverwritingGetter( + Local<String> name, + const AccessorInfo& info) { + ApiTestFuzzer::Fuzz(); + return v8::True(); +} + + +THREADED_TEST(AccessorProhibitsOverwriting) { + v8::HandleScope scope; + LocalContext context; + Local<ObjectTemplate> templ = ObjectTemplate::New(); + templ->SetAccessor(v8_str("x"), + AccessorProhibitsOverwritingGetter, + 0, + v8::Handle<Value>(), + v8::PROHIBITS_OVERWRITING, + v8::ReadOnly); + Local<v8::Object> instance = templ->NewInstance(); + context->Global()->Set(v8_str("obj"), instance); + Local<Value> value = CompileRun( + "obj.__defineGetter__('x', function() { return false; });" + "obj.x"); + CHECK(value->BooleanValue()); + value = CompileRun( + "var setter_called = false;" + "obj.__defineSetter__('x', function() { setter_called = true; });" + "obj.x = 42;" + "setter_called"); + CHECK(!value->BooleanValue()); + value = CompileRun( + "obj2 = {};" + "obj2.__proto__ = obj;" + "obj2.__defineGetter__('x', function() { return false; });" + "obj2.x"); + CHECK(value->BooleanValue()); + value = CompileRun( + "var setter_called = false;" + "obj2 = {};" + "obj2.__proto__ = obj;" + "obj2.__defineSetter__('x', function() { setter_called = true; });" + "obj2.x = 42;" + "setter_called"); + CHECK(!value->BooleanValue()); +} + + +template <int C> +static v8::Handle<Value> HandleAllocatingGetter(Local<String> name, + const AccessorInfo& info) { + ApiTestFuzzer::Fuzz(); + for (int i = 0; i < C; i++) + v8::String::New("foo"); + return v8::String::New("foo"); +} + + +THREADED_TEST(HandleScopePop) { + v8::HandleScope scope; + v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(); + obj->SetAccessor(v8_str("one"), HandleAllocatingGetter<1>); + obj->SetAccessor(v8_str("many"), HandleAllocatingGetter<1024>); + LocalContext context; + v8::Handle<v8::Object> inst = obj->NewInstance(); + context->Global()->Set(v8::String::New("obj"), inst); + int count_before = i::HandleScope::NumberOfHandles(); + { + v8::HandleScope scope; + CompileRun( + "for (var i = 0; i < 1000; i++) {" + " obj.one;" + " obj.many;" + "}"); + } + int count_after = i::HandleScope::NumberOfHandles(); + CHECK_EQ(count_before, count_after); +} + +static v8::Handle<Value> CheckAccessorArgsCorrect(Local<String> name, + const AccessorInfo& info) { + CHECK(info.This() == info.Holder()); + CHECK(info.Data()->Equals(v8::String::New("data"))); + ApiTestFuzzer::Fuzz(); + CHECK(info.This() == info.Holder()); + CHECK(info.Data()->Equals(v8::String::New("data"))); + i::Heap::CollectAllGarbage(true); + CHECK(info.This() == info.Holder()); + CHECK(info.Data()->Equals(v8::String::New("data"))); + return v8::Integer::New(17); +} + +THREADED_TEST(DirectCall) { + v8::HandleScope scope; + v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(); + obj->SetAccessor(v8_str("xxx"), + CheckAccessorArgsCorrect, + NULL, + v8::String::New("data")); + LocalContext context; + v8::Handle<v8::Object> inst = obj->NewInstance(); + context->Global()->Set(v8::String::New("obj"), inst); + Local<Script> scr = v8::Script::Compile(v8::String::New("obj.xxx")); + for (int i = 0; i < 10; i++) { + Local<Value> result = scr->Run(); + CHECK(!result.IsEmpty()); + CHECK_EQ(17, result->Int32Value()); + } +} + +static v8::Handle<Value> EmptyGetter(Local<String> name, + const AccessorInfo& info) { + CheckAccessorArgsCorrect(name, info); + ApiTestFuzzer::Fuzz(); + CheckAccessorArgsCorrect(name, info); + return v8::Handle<v8::Value>(); +} + +THREADED_TEST(EmptyResult) { + v8::HandleScope scope; + v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(); + obj->SetAccessor(v8_str("xxx"), EmptyGetter, NULL, v8::String::New("data")); + LocalContext context; + v8::Handle<v8::Object> inst = obj->NewInstance(); + context->Global()->Set(v8::String::New("obj"), inst); + Local<Script> scr = v8::Script::Compile(v8::String::New("obj.xxx")); + for (int i = 0; i < 10; i++) { + Local<Value> result = scr->Run(); + CHECK(result == v8::Undefined()); + } +} + + +THREADED_TEST(NoReuseRegress) { + // Check that the IC generated for the one test doesn't get reused + // for the other. + v8::HandleScope scope; + { + v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(); + obj->SetAccessor(v8_str("xxx"), EmptyGetter, NULL, v8::String::New("data")); + LocalContext context; + v8::Handle<v8::Object> inst = obj->NewInstance(); + context->Global()->Set(v8::String::New("obj"), inst); + Local<Script> scr = v8::Script::Compile(v8::String::New("obj.xxx")); + for (int i = 0; i < 2; i++) { + Local<Value> result = scr->Run(); + CHECK(result == v8::Undefined()); + } + } + { + v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(); + obj->SetAccessor(v8_str("xxx"), + CheckAccessorArgsCorrect, + NULL, + v8::String::New("data")); + LocalContext context; + v8::Handle<v8::Object> inst = obj->NewInstance(); + context->Global()->Set(v8::String::New("obj"), inst); + Local<Script> scr = v8::Script::Compile(v8::String::New("obj.xxx")); + for (int i = 0; i < 10; i++) { + Local<Value> result = scr->Run(); + CHECK(!result.IsEmpty()); + CHECK_EQ(17, result->Int32Value()); + } + } +} + +static v8::Handle<Value> ThrowingGetAccessor(Local<String> name, + const AccessorInfo& info) { + ApiTestFuzzer::Fuzz(); + return v8::ThrowException(v8_str("g")); +} + + +static void ThrowingSetAccessor(Local<String> name, + Local<Value> value, + const AccessorInfo& info) { + v8::ThrowException(value); +} + + +THREADED_TEST(Regress1054726) { + v8::HandleScope scope; + v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(); + obj->SetAccessor(v8_str("x"), + ThrowingGetAccessor, + ThrowingSetAccessor, + Local<Value>()); + + LocalContext env; + env->Global()->Set(v8_str("obj"), obj->NewInstance()); + + // Use the throwing property setter/getter in a loop to force + // the accessor ICs to be initialized. + v8::Handle<Value> result; + result = Script::Compile(v8_str( + "var result = '';" + "for (var i = 0; i < 5; i++) {" + " try { obj.x; } catch (e) { result += e; }" + "}; result"))->Run(); + CHECK_EQ(v8_str("ggggg"), result); + + result = Script::Compile(String::New( + "var result = '';" + "for (var i = 0; i < 5; i++) {" + " try { obj.x = i; } catch (e) { result += e; }" + "}; result"))->Run(); + CHECK_EQ(v8_str("01234"), result); +} + + +static v8::Handle<Value> AllocGetter(Local<String> name, + const AccessorInfo& info) { + ApiTestFuzzer::Fuzz(); + return v8::Array::New(1000); +} + + +THREADED_TEST(Gc) { + v8::HandleScope scope; + v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(); + obj->SetAccessor(v8_str("xxx"), AllocGetter); + LocalContext env; + env->Global()->Set(v8_str("obj"), obj->NewInstance()); + Script::Compile(String::New( + "var last = [];" + "for (var i = 0; i < 2048; i++) {" + " var result = obj.xxx;" + " result[0] = last;" + " last = result;" + "}"))->Run(); +} + + +static v8::Handle<Value> StackCheck(Local<String> name, + const AccessorInfo& info) { + i::StackFrameIterator iter; + for (int i = 0; !iter.done(); i++) { + i::StackFrame* frame = iter.frame(); + CHECK(i != 0 || (frame->type() == i::StackFrame::EXIT)); + CHECK(frame->code()->IsCode()); + i::Address pc = frame->pc(); + i::Code* code = frame->code(); + CHECK(code->contains(pc)); + iter.Advance(); + } + return v8::Undefined(); +} + + +THREADED_TEST(StackIteration) { + v8::HandleScope scope; + v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(); + i::StringStream::ClearMentionedObjectCache(); + obj->SetAccessor(v8_str("xxx"), StackCheck); + LocalContext env; + env->Global()->Set(v8_str("obj"), obj->NewInstance()); + Script::Compile(String::New( + "function foo() {" + " return obj.xxx;" + "}" + "for (var i = 0; i < 100; i++) {" + " foo();" + "}"))->Run(); +} + + +static v8::Handle<Value> AllocateHandles(Local<String> name, + const AccessorInfo& info) { + for (int i = 0; i < i::kHandleBlockSize + 1; i++) { + v8::Local<v8::Value>::New(name); + } + return v8::Integer::New(100); +} + + +THREADED_TEST(HandleScopeSegment) { + // Check that we can return values past popping of handle scope + // segments. + v8::HandleScope scope; + v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(); + obj->SetAccessor(v8_str("xxx"), AllocateHandles); + LocalContext env; + env->Global()->Set(v8_str("obj"), obj->NewInstance()); + v8::Handle<v8::Value> result = Script::Compile(String::New( + "var result;" + "for (var i = 0; i < 4; i++)" + " result = obj.xxx;" + "result;"))->Run(); + CHECK_EQ(100, result->Int32Value()); +} diff --git a/deps/v8/test/cctest/test-alloc.cc b/deps/v8/test/cctest/test-alloc.cc index 1235b13b2d..7921d2abe4 100644 --- a/deps/v8/test/cctest/test-alloc.cc +++ b/deps/v8/test/cctest/test-alloc.cc @@ -195,9 +195,9 @@ TEST(CodeRange) { Pseudorandom() % 5000 + 1; size_t allocated = 0; void* base = CodeRange::AllocateRawMemory(requested, &allocated); - blocks.Add(Block(base, allocated)); - current_allocated += allocated; - total_allocated += allocated; + blocks.Add(Block(base, static_cast<int>(allocated))); + current_allocated += static_cast<int>(allocated); + total_allocated += static_cast<int>(allocated); } else { // Free a block. int index = Pseudorandom() % blocks.length(); diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc index 1d4b2c34c3..6791685e17 100644 --- a/deps/v8/test/cctest/test-api.cc +++ b/deps/v8/test/cctest/test-api.cc @@ -38,6 +38,8 @@ #include "utils.h" #include "cctest.h" +static const bool kLogThreading = false; + static bool IsNaN(double x) { #ifdef WIN32 return _isnan(x); @@ -58,131 +60,6 @@ using ::v8::Extension; namespace i = ::v8::internal; -static Local<Value> v8_num(double x) { - return v8::Number::New(x); -} - - -static Local<String> v8_str(const char* x) { - return String::New(x); -} - - -static Local<Script> v8_compile(const char* x) { - return Script::Compile(v8_str(x)); -} - - -// A LocalContext holds a reference to a v8::Context. -class LocalContext { - public: - LocalContext(v8::ExtensionConfiguration* extensions = 0, - v8::Handle<ObjectTemplate> global_template = - v8::Handle<ObjectTemplate>(), - v8::Handle<Value> global_object = v8::Handle<Value>()) - : context_(Context::New(extensions, global_template, global_object)) { - context_->Enter(); - } - - virtual ~LocalContext() { - context_->Exit(); - context_.Dispose(); - } - - Context* operator->() { return *context_; } - Context* operator*() { return *context_; } - Local<Context> local() { return Local<Context>::New(context_); } - bool IsReady() { return !context_.IsEmpty(); } - - private: - v8::Persistent<Context> context_; -}; - - -// Switches between all the Api tests using the threading support. -// In order to get a surprising but repeatable pattern of thread -// switching it has extra semaphores to control the order in which -// the tests alternate, not relying solely on the big V8 lock. -// -// A test is augmented with calls to ApiTestFuzzer::Fuzz() in its -// callbacks. This will have no effect when we are not running the -// thread fuzzing test. In the thread fuzzing test it will -// pseudorandomly select a successor thread and switch execution -// to that thread, suspending the current test. -class ApiTestFuzzer: public v8::internal::Thread { - public: - void CallTest(); - explicit ApiTestFuzzer(int num) - : test_number_(num), - gate_(v8::internal::OS::CreateSemaphore(0)), - active_(true) { - } - ~ApiTestFuzzer() { delete gate_; } - - // The ApiTestFuzzer is also a Thread, so it has a Run method. - virtual void Run(); - - enum PartOfTest { FIRST_PART, SECOND_PART }; - - static void Setup(PartOfTest part); - static void RunAllTests(); - static void TearDown(); - // This method switches threads if we are running the Threading test. - // Otherwise it does nothing. - static void Fuzz(); - private: - static bool fuzzing_; - static int tests_being_run_; - static int current_; - static int active_tests_; - static bool NextThread(); - int test_number_; - v8::internal::Semaphore* gate_; - bool active_; - void ContextSwitch(); - static int GetNextTestNumber(); - static v8::internal::Semaphore* all_tests_done_; -}; - - -#define THREADED_TEST(Name) \ - static void Test##Name(); \ - RegisterThreadedTest register_##Name(Test##Name); \ - /* */ TEST(Name) - - -class RegisterThreadedTest { - public: - explicit RegisterThreadedTest(CcTest::TestFunction* callback) - : fuzzer_(NULL), callback_(callback) { - prev_ = first_; - first_ = this; - count_++; - } - static int count() { return count_; } - static RegisterThreadedTest* nth(int i) { - CHECK(i < count()); - RegisterThreadedTest* current = first_; - while (i > 0) { - i--; - current = current->prev_; - } - return current; - } - CcTest::TestFunction* callback() { return callback_; } - ApiTestFuzzer* fuzzer_; - - private: - static RegisterThreadedTest* first_; - static int count_; - CcTest::TestFunction* callback_; - RegisterThreadedTest* prev_; -}; - - -RegisterThreadedTest *RegisterThreadedTest::first_ = NULL; -int RegisterThreadedTest::count_ = 0; - static int signature_callback_count; static v8::Handle<Value> IncrementingSignatureCallback( @@ -231,11 +108,6 @@ THREADED_TEST(Handles) { } -// Helper function that compiles and runs the source. -static Local<Value> CompileRun(const char* source) { - return Script::Compile(String::New(source))->Run(); -} - THREADED_TEST(ReceiverSignature) { v8::HandleScope scope; LocalContext env; @@ -382,9 +254,9 @@ THREADED_TEST(Script) { static uint16_t* AsciiToTwoByteString(const char* source) { - size_t array_length = strlen(source) + 1; + int array_length = i::StrLength(source) + 1; uint16_t* converted = i::NewArray<uint16_t>(array_length); - for (size_t i = 0; i < array_length; i++) converted[i] = source[i]; + for (int i = 0; i < array_length; i++) converted[i] = source[i]; return converted; } @@ -720,27 +592,6 @@ THREADED_TEST(FindInstanceInPrototypeChain) { } -static v8::Handle<Value> handle_property(Local<String> name, - const AccessorInfo&) { - ApiTestFuzzer::Fuzz(); - return v8_num(900); -} - - -THREADED_TEST(PropertyHandler) { - v8::HandleScope scope; - Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New(); - fun_templ->InstanceTemplate()->SetAccessor(v8_str("foo"), handle_property); - LocalContext env; - Local<Function> fun = fun_templ->GetFunction(); - env->Global()->Set(v8_str("Fun"), fun); - Local<Script> getter = v8_compile("var obj = new Fun(); obj.foo;"); - CHECK_EQ(900, getter->Run()->Int32Value()); - Local<Script> setter = v8_compile("obj.foo = 901;"); - CHECK_EQ(901, setter->Run()->Int32Value()); -} - - THREADED_TEST(TinyInteger) { v8::HandleScope scope; LocalContext env; @@ -907,49 +758,6 @@ THREADED_TEST(GlobalPrototype) { } -static v8::Handle<Value> GetIntValue(Local<String> property, - const AccessorInfo& info) { - ApiTestFuzzer::Fuzz(); - int* value = - static_cast<int*>(v8::Handle<v8::External>::Cast(info.Data())->Value()); - return v8_num(*value); -} - -static void SetIntValue(Local<String> property, - Local<Value> value, - const AccessorInfo& info) { - int* field = - static_cast<int*>(v8::Handle<v8::External>::Cast(info.Data())->Value()); - *field = value->Int32Value(); -} - -int foo, bar, baz; - -THREADED_TEST(GlobalVariableAccess) { - foo = 0; - bar = -4; - baz = 10; - v8::HandleScope scope; - v8::Handle<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(); - templ->InstanceTemplate()->SetAccessor(v8_str("foo"), - GetIntValue, - SetIntValue, - v8::External::New(&foo)); - templ->InstanceTemplate()->SetAccessor(v8_str("bar"), - GetIntValue, - SetIntValue, - v8::External::New(&bar)); - templ->InstanceTemplate()->SetAccessor(v8_str("baz"), - GetIntValue, - SetIntValue, - v8::External::New(&baz)); - LocalContext env(0, templ->InstanceTemplate()); - v8_compile("foo = (++bar) + baz")->Run(); - CHECK_EQ(bar, -3); - CHECK_EQ(foo, 7); -} - - THREADED_TEST(ObjectTemplate) { v8::HandleScope scope; Local<ObjectTemplate> templ1 = ObjectTemplate::New(); @@ -1365,50 +1173,6 @@ THREADED_TEST(CallbackExceptionRegression) { } -static v8::Handle<Value> ThrowingGetAccessor(Local<String> name, - const AccessorInfo& info) { - ApiTestFuzzer::Fuzz(); - return v8::ThrowException(v8_str("g")); -} - - -static void ThrowingSetAccessor(Local<String> name, - Local<Value> value, - const AccessorInfo& info) { - v8::ThrowException(value); -} - - -THREADED_TEST(Regress1054726) { - v8::HandleScope scope; - v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(); - obj->SetAccessor(v8_str("x"), - ThrowingGetAccessor, - ThrowingSetAccessor, - Local<Value>()); - - LocalContext env; - env->Global()->Set(v8_str("obj"), obj->NewInstance()); - - // Use the throwing property setter/getter in a loop to force - // the accessor ICs to be initialized. - v8::Handle<Value> result; - result = Script::Compile(v8_str( - "var result = '';" - "for (var i = 0; i < 5; i++) {" - " try { obj.x; } catch (e) { result += e; }" - "}; result"))->Run(); - CHECK_EQ(v8_str("ggggg"), result); - - result = Script::Compile(String::New( - "var result = '';" - "for (var i = 0; i < 5; i++) {" - " try { obj.x = i; } catch (e) { result += e; }" - "}; result"))->Run(); - CHECK_EQ(v8_str("01234"), result); -} - - THREADED_TEST(FunctionPrototype) { v8::HandleScope scope; Local<v8::FunctionTemplate> Foo = v8::FunctionTemplate::New(); @@ -1580,17 +1344,10 @@ THREADED_TEST(HiddenProperties) { } +static bool interceptor_for_hidden_properties_called; static v8::Handle<Value> InterceptorForHiddenProperties( Local<String> name, const AccessorInfo& info) { - // Make sure objects move. - bool saved_always_compact = i::FLAG_always_compact; - if (!i::FLAG_never_compact) { - i::FLAG_always_compact = true; - } - // The whole goal of this interceptor is to cause a GC during local property - // lookup. - i::Heap::CollectAllGarbage(false); - i::FLAG_always_compact = saved_always_compact; + interceptor_for_hidden_properties_called = true; return v8::Handle<Value>(); } @@ -1599,6 +1356,8 @@ THREADED_TEST(HiddenPropertiesWithInterceptors) { v8::HandleScope scope; LocalContext context; + interceptor_for_hidden_properties_called = false; + v8::Local<v8::String> key = v8_str("api-test::hidden-key"); // Associate an interceptor with an object and start setting hidden values. @@ -1609,6 +1368,7 @@ THREADED_TEST(HiddenPropertiesWithInterceptors) { Local<v8::Object> obj = function->NewInstance(); CHECK(obj->SetHiddenValue(key, v8::Integer::New(2302))); CHECK_EQ(2302, obj->GetHiddenValue(key)->Int32Value()); + CHECK(!interceptor_for_hidden_properties_called); } @@ -3157,6 +2917,58 @@ THREADED_TEST(WeakReference) { } +static bool in_scavenge = false; +static int last = -1; + +static void ForceScavenge(v8::Persistent<v8::Value> obj, void* data) { + CHECK_EQ(-1, last); + last = 0; + obj.Dispose(); + obj.Clear(); + in_scavenge = true; + i::Heap::PerformScavenge(); + in_scavenge = false; + *(reinterpret_cast<bool*>(data)) = true; +} + +static void CheckIsNotInvokedInScavenge(v8::Persistent<v8::Value> obj, + void* data) { + CHECK_EQ(0, last); + last = 1; + *(reinterpret_cast<bool*>(data)) = in_scavenge; + obj.Dispose(); + obj.Clear(); +} + +THREADED_TEST(NoWeakRefCallbacksInScavenge) { + // Test verifies that scavenge cannot invoke WeakReferenceCallbacks. + // Calling callbacks from scavenges is unsafe as objects held by those + // handlers might have become strongly reachable, but scavenge doesn't + // check that. + v8::Persistent<Context> context = Context::New(); + Context::Scope context_scope(context); + + v8::Persistent<v8::Object> object_a; + v8::Persistent<v8::Object> object_b; + + { + v8::HandleScope handle_scope; + object_b = v8::Persistent<v8::Object>::New(v8::Object::New()); + object_a = v8::Persistent<v8::Object>::New(v8::Object::New()); + } + + bool object_a_disposed = false; + object_a.MakeWeak(&object_a_disposed, &ForceScavenge); + bool released_in_scavenge = false; + object_b.MakeWeak(&released_in_scavenge, &CheckIsNotInvokedInScavenge); + + while (!object_a_disposed) { + i::Heap::CollectAllGarbage(false); + } + CHECK(!released_in_scavenge); +} + + v8::Handle<Function> args_fun; @@ -3184,53 +2996,6 @@ THREADED_TEST(Arguments) { } -static int x_register = 0; -static v8::Handle<v8::Object> x_receiver; -static v8::Handle<v8::Object> x_holder; - - -static v8::Handle<Value> XGetter(Local<String> name, const AccessorInfo& info) { - ApiTestFuzzer::Fuzz(); - CHECK_EQ(x_receiver, info.This()); - CHECK_EQ(x_holder, info.Holder()); - return v8_num(x_register); -} - - -static void XSetter(Local<String> name, - Local<Value> value, - const AccessorInfo& info) { - CHECK_EQ(x_holder, info.This()); - CHECK_EQ(x_holder, info.Holder()); - x_register = value->Int32Value(); -} - - -THREADED_TEST(AccessorIC) { - v8::HandleScope scope; - v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(); - obj->SetAccessor(v8_str("x"), XGetter, XSetter); - LocalContext context; - x_holder = obj->NewInstance(); - context->Global()->Set(v8_str("holder"), x_holder); - x_receiver = v8::Object::New(); - context->Global()->Set(v8_str("obj"), x_receiver); - v8::Handle<v8::Array> array = v8::Handle<v8::Array>::Cast(CompileRun( - "obj.__proto__ = holder;" - "var result = [];" - "for (var i = 0; i < 10; i++) {" - " holder.x = i;" - " result.push(obj.x);" - "}" - "result")); - CHECK_EQ(10, array->Length()); - for (int i = 0; i < 10; i++) { - v8::Handle<Value> entry = array->Get(v8::Integer::New(i)); - CHECK_EQ(v8::Integer::New(i), entry); - } -} - - static v8::Handle<Value> NoBlockGetterX(Local<String> name, const AccessorInfo&) { return v8::Handle<Value>(); @@ -6094,13 +5859,17 @@ void ApiTestFuzzer::Fuzz() { // not start immediately. bool ApiTestFuzzer::NextThread() { int test_position = GetNextTestNumber(); - int test_number = RegisterThreadedTest::nth(current_)->fuzzer_->test_number_; + const char* test_name = RegisterThreadedTest::nth(current_)->name(); if (test_position == current_) { - printf("Stay with %d\n", test_number); + if (kLogThreading) + printf("Stay with %s\n", test_name); return false; } - printf("Switch from %d to %d\n", - current_ < 0 ? 0 : test_number, test_position < 0 ? 0 : test_number); + if (kLogThreading) { + printf("Switch from %s to %s\n", + test_name, + RegisterThreadedTest::nth(test_position)->name()); + } current_ = test_position; RegisterThreadedTest::nth(current_)->fuzzer_->gate_->Signal(); return true; @@ -6209,9 +5978,11 @@ TEST(Threading2) { void ApiTestFuzzer::CallTest() { - printf("Start test %d\n", test_number_); + if (kLogThreading) + printf("Start test %d\n", test_number_); CallTestNumber(test_number_); - printf("End test %d\n", test_number_); + if (kLogThreading) + printf("End test %d\n", test_number_); } @@ -6455,6 +6226,31 @@ THREADED_TEST(DoNotUseDeletedNodesInSecondLevelGc) { i::Heap::CollectAllGarbage(false); } +void DisposingCallback(v8::Persistent<v8::Value> handle, void*) { + handle.Dispose(); +} + +void HandleCreatingCallback(v8::Persistent<v8::Value> handle, void*) { + v8::HandleScope scope; + v8::Persistent<v8::Object>::New(v8::Object::New()); +} + + +THREADED_TEST(NoGlobalHandlesOrphaningDueToWeakCallback) { + LocalContext context; + + v8::Persistent<v8::Object> handle1, handle2, handle3; + { + v8::HandleScope scope; + handle3 = v8::Persistent<v8::Object>::New(v8::Object::New()); + handle2 = v8::Persistent<v8::Object>::New(v8::Object::New()); + handle1 = v8::Persistent<v8::Object>::New(v8::Object::New()); + } + handle2.MakeWeak(NULL, DisposingCallback); + handle3.MakeWeak(NULL, HandleCreatingCallback); + i::Heap::CollectAllGarbage(false); +} + THREADED_TEST(CheckForCrossContextObjectLiterals) { v8::V8::Initialize(); @@ -6699,53 +6495,6 @@ THREADED_TEST(PropertyEnumeration) { } -static v8::Handle<Value> AccessorProhibitsOverwritingGetter( - Local<String> name, - const AccessorInfo& info) { - ApiTestFuzzer::Fuzz(); - return v8::True(); -} - - -THREADED_TEST(AccessorProhibitsOverwriting) { - v8::HandleScope scope; - LocalContext context; - Local<ObjectTemplate> templ = ObjectTemplate::New(); - templ->SetAccessor(v8_str("x"), - AccessorProhibitsOverwritingGetter, - 0, - v8::Handle<Value>(), - v8::PROHIBITS_OVERWRITING, - v8::ReadOnly); - Local<v8::Object> instance = templ->NewInstance(); - context->Global()->Set(v8_str("obj"), instance); - Local<Value> value = CompileRun( - "obj.__defineGetter__('x', function() { return false; });" - "obj.x"); - CHECK(value->BooleanValue()); - value = CompileRun( - "var setter_called = false;" - "obj.__defineSetter__('x', function() { setter_called = true; });" - "obj.x = 42;" - "setter_called"); - CHECK(!value->BooleanValue()); - value = CompileRun( - "obj2 = {};" - "obj2.__proto__ = obj;" - "obj2.__defineGetter__('x', function() { return false; });" - "obj2.x"); - CHECK(value->BooleanValue()); - value = CompileRun( - "var setter_called = false;" - "obj2 = {};" - "obj2.__proto__ = obj;" - "obj2.__defineSetter__('x', function() { setter_called = true; });" - "obj2.x = 42;" - "setter_called"); - CHECK(!value->BooleanValue()); -} - - static bool NamedSetAccessBlocker(Local<v8::Object> obj, Local<Value> name, v8::AccessType type, @@ -6921,7 +6670,8 @@ TEST(PreCompile) { // a workaround for now to make this test not fail. v8::V8::Initialize(); const char *script = "function foo(a) { return a+1; }"; - v8::ScriptData *sd = v8::ScriptData::PreCompile(script, strlen(script)); + v8::ScriptData *sd = + v8::ScriptData::PreCompile(script, i::StrLength(script)); CHECK_NE(sd->Length(), 0); CHECK_NE(sd->Data(), NULL); delete sd; @@ -7317,9 +7067,10 @@ THREADED_TEST(MorphCompositeStringTest) { v8::HandleScope scope; LocalContext env; AsciiVectorResource ascii_resource( - i::Vector<const char>(c_string, strlen(c_string))); + i::Vector<const char>(c_string, i::StrLength(c_string))); UC16VectorResource uc16_resource( - i::Vector<const uint16_t>(two_byte_string, strlen(c_string))); + i::Vector<const uint16_t>(two_byte_string, + i::StrLength(c_string))); Local<String> lhs(v8::Utils::ToLocal( i::Factory::NewExternalStringFromAscii(&ascii_resource))); @@ -7377,7 +7128,8 @@ TEST(CompileExternalTwoByteSource) { for (int i = 0; ascii_sources[i] != NULL; i++) { uint16_t* two_byte_string = AsciiToTwoByteString(ascii_sources[i]); UC16VectorResource uc16_resource( - i::Vector<const uint16_t>(two_byte_string, strlen(ascii_sources[i]))); + i::Vector<const uint16_t>(two_byte_string, + i::StrLength(ascii_sources[i]))); v8::Local<v8::String> source = v8::String::NewExternal(&uc16_resource); v8::Script::Compile(source); } @@ -7863,18 +7615,18 @@ THREADED_TEST(Regress16276) { THREADED_TEST(PixelArray) { v8::HandleScope scope; LocalContext context; - const int kElementCount = 40; + const int kElementCount = 260; uint8_t* pixel_data = reinterpret_cast<uint8_t*>(malloc(kElementCount)); i::Handle<i::PixelArray> pixels = i::Factory::NewPixelArray(kElementCount, pixel_data); i::Heap::CollectAllGarbage(false); // Force GC to trigger verification. for (int i = 0; i < kElementCount; i++) { - pixels->set(i, i); + pixels->set(i, i % 256); } i::Heap::CollectAllGarbage(false); // Force GC to trigger verification. for (int i = 0; i < kElementCount; i++) { - CHECK_EQ(i, pixels->get(i)); - CHECK_EQ(i, pixel_data[i]); + CHECK_EQ(i % 256, pixels->get(i)); + CHECK_EQ(i % 256, pixel_data[i]); } v8::Handle<v8::Object> obj = v8::Object::New(); @@ -8038,6 +7790,15 @@ THREADED_TEST(PixelArray) { result = CompileRun("pixels[1] = 23;"); CHECK_EQ(23, result->Int32Value()); + // Test for index greater than 255. Regression test for: + // http://code.google.com/p/chromium/issues/detail?id=26337. + result = CompileRun("pixels[256] = 255;"); + CHECK_EQ(255, result->Int32Value()); + result = CompileRun("var i = 0;" + "for (var j = 0; j < 8; j++) { i = pixels[256]; }" + "i"); + CHECK_EQ(255, result->Int32Value()); + free(pixel_data); } @@ -8489,11 +8250,11 @@ THREADED_TEST(GetHeapStatistics) { v8::HandleScope scope; LocalContext c1; v8::HeapStatistics heap_statistics; - CHECK_EQ(heap_statistics.total_heap_size(), 0); - CHECK_EQ(heap_statistics.used_heap_size(), 0); + CHECK_EQ(static_cast<int>(heap_statistics.total_heap_size()), 0); + CHECK_EQ(static_cast<int>(heap_statistics.used_heap_size()), 0); v8::V8::GetHeapStatistics(&heap_statistics); - CHECK_NE(heap_statistics.total_heap_size(), 0); - CHECK_NE(heap_statistics.used_heap_size(), 0); + CHECK_NE(static_cast<int>(heap_statistics.total_heap_size()), 0); + CHECK_NE(static_cast<int>(heap_statistics.used_heap_size()), 0); } @@ -8610,3 +8371,41 @@ THREADED_TEST(QuietSignalingNaNs) { } } } + + +static v8::Handle<Value> SpaghettiIncident(const v8::Arguments& args) { + v8::HandleScope scope; + v8::TryCatch tc; + v8::Handle<v8::String> str = args[0]->ToString(); + if (tc.HasCaught()) + return tc.ReThrow(); + return v8::Undefined(); +} + + +// Test that an exception can be propagated down through a spaghetti +// stack using ReThrow. +THREADED_TEST(SpaghettiStackReThrow) { + v8::HandleScope scope; + LocalContext context; + context->Global()->Set( + v8::String::New("s"), + v8::FunctionTemplate::New(SpaghettiIncident)->GetFunction()); + v8::TryCatch try_catch; + CompileRun( + "var i = 0;" + "var o = {" + " toString: function () {" + " if (i == 10) {" + " throw 'Hey!';" + " } else {" + " i++;" + " return s(o);" + " }" + " }" + "};" + "s(o);"); + CHECK(try_catch.HasCaught()); + v8::String::Utf8Value value(try_catch.Exception()); + CHECK_EQ(0, strcmp(*value, "Hey!")); +} diff --git a/deps/v8/test/cctest/test-assembler-ia32.cc b/deps/v8/test/cctest/test-assembler-ia32.cc index 9ad7c76f11..76eb6bb3ff 100644 --- a/deps/v8/test/cctest/test-assembler-ia32.cc +++ b/deps/v8/test/cctest/test-assembler-ia32.cc @@ -173,8 +173,8 @@ TEST(AssemblerIa323) { v8::internal::byte buffer[256]; Assembler assm(buffer, sizeof buffer); - CHECK(CpuFeatures::IsSupported(CpuFeatures::SSE2)); - { CpuFeatures::Scope fscope(CpuFeatures::SSE2); + CHECK(CpuFeatures::IsSupported(SSE2)); + { CpuFeatures::Scope fscope(SSE2); __ cvttss2si(eax, Operand(esp, 4)); __ ret(0); } @@ -207,8 +207,8 @@ TEST(AssemblerIa324) { v8::internal::byte buffer[256]; Assembler assm(buffer, sizeof buffer); - CHECK(CpuFeatures::IsSupported(CpuFeatures::SSE2)); - CpuFeatures::Scope fscope(CpuFeatures::SSE2); + CHECK(CpuFeatures::IsSupported(SSE2)); + CpuFeatures::Scope fscope(SSE2); __ cvttsd2si(eax, Operand(esp, 4)); __ ret(0); @@ -260,8 +260,8 @@ typedef double (*F5)(double x, double y); TEST(AssemblerIa326) { InitializeVM(); v8::HandleScope scope; - CHECK(CpuFeatures::IsSupported(CpuFeatures::SSE2)); - CpuFeatures::Scope fscope(CpuFeatures::SSE2); + CHECK(CpuFeatures::IsSupported(SSE2)); + CpuFeatures::Scope fscope(SSE2); v8::internal::byte buffer[256]; Assembler assm(buffer, sizeof buffer); @@ -305,8 +305,8 @@ typedef double (*F6)(int x); TEST(AssemblerIa328) { InitializeVM(); v8::HandleScope scope; - CHECK(CpuFeatures::IsSupported(CpuFeatures::SSE2)); - CpuFeatures::Scope fscope(CpuFeatures::SSE2); + CHECK(CpuFeatures::IsSupported(SSE2)); + CpuFeatures::Scope fscope(SSE2); v8::internal::byte buffer[256]; Assembler assm(buffer, sizeof buffer); __ mov(eax, Operand(esp, 4)); diff --git a/deps/v8/test/cctest/test-assembler-x64.cc b/deps/v8/test/cctest/test-assembler-x64.cc index 81aa973db4..f100b73485 100644 --- a/deps/v8/test/cctest/test-assembler-x64.cc +++ b/deps/v8/test/cctest/test-assembler-x64.cc @@ -86,7 +86,7 @@ TEST(AssemblerX64ReturnOperation) { &actual_size, true)); CHECK(buffer); - Assembler assm(buffer, actual_size); + Assembler assm(buffer, static_cast<int>(actual_size)); // Assemble a simple function that copies argument 2 and returns it. __ movq(rax, arg2); @@ -107,7 +107,7 @@ TEST(AssemblerX64StackOperations) { &actual_size, true)); CHECK(buffer); - Assembler assm(buffer, actual_size); + Assembler assm(buffer, static_cast<int>(actual_size)); // Assemble a simple function that copies argument 2 and returns it. // We compile without stack frame pointers, so the gdb debugger shows @@ -138,7 +138,7 @@ TEST(AssemblerX64ArithmeticOperations) { &actual_size, true)); CHECK(buffer); - Assembler assm(buffer, actual_size); + Assembler assm(buffer, static_cast<int>(actual_size)); // Assemble a simple function that adds arguments returning the sum. __ movq(rax, arg2); @@ -159,7 +159,7 @@ TEST(AssemblerX64ImulOperation) { &actual_size, true)); CHECK(buffer); - Assembler assm(buffer, actual_size); + Assembler assm(buffer, static_cast<int>(actual_size)); // Assemble a simple function that multiplies arguments returning the high // word. @@ -186,7 +186,7 @@ TEST(AssemblerX64MemoryOperands) { &actual_size, true)); CHECK(buffer); - Assembler assm(buffer, actual_size); + Assembler assm(buffer, static_cast<int>(actual_size)); // Assemble a simple function that copies argument 2 and returns it. __ push(rbp); @@ -219,7 +219,7 @@ TEST(AssemblerX64ControlFlow) { &actual_size, true)); CHECK(buffer); - Assembler assm(buffer, actual_size); + Assembler assm(buffer, static_cast<int>(actual_size)); // Assemble a simple function that copies argument 1 and returns it. __ push(rbp); @@ -247,7 +247,7 @@ TEST(AssemblerX64LoopImmediates) { &actual_size, true)); CHECK(buffer); - Assembler assm(buffer, actual_size); + Assembler assm(buffer, static_cast<int>(actual_size)); // Assemble two loops using rax as counter, and verify the ending counts. Label Fail; __ movq(rax, Immediate(-3)); diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc index 4ffcee3dbf..d938174e7c 100644 --- a/deps/v8/test/cctest/test-debug.cc +++ b/deps/v8/test/cctest/test-debug.cc @@ -53,7 +53,7 @@ using ::v8::internal::StepIn; // From StepAction enum using ::v8::internal::StepNext; // From StepAction enum using ::v8::internal::StepOut; // From StepAction enum using ::v8::internal::Vector; - +using ::v8::internal::StrLength; // Size of temp buffer for formatting small strings. #define SMALL_STRING_BUFFER_SIZE 80 @@ -178,12 +178,6 @@ static v8::Local<v8::Function> CompileFunction(const char* source, } -// Helper function that compiles and runs the source. -static v8::Local<v8::Value> CompileRun(const char* source) { - return v8::Script::Compile(v8::String::New(source))->Run(); -} - - // Is there any debug info for the function? static bool HasDebugInfo(v8::Handle<v8::Function> fun) { Handle<v8::internal::JSFunction> f = v8::Utils::OpenHandle(*fun); @@ -806,14 +800,14 @@ static void DebugEventStepSequence(v8::DebugEvent event, if (event == v8::Break || event == v8::Exception) { // Check that the current function is the expected. CHECK(break_point_hit_count < - static_cast<int>(strlen(expected_step_sequence))); + StrLength(expected_step_sequence)); const int argc = 1; v8::Handle<v8::Value> argv[argc] = { exec_state }; v8::Handle<v8::Value> result = frame_function_name->Call(exec_state, argc, argv); CHECK(result->IsString()); v8::String::AsciiValue function_name(result->ToString()); - CHECK_EQ(1, strlen(*function_name)); + CHECK_EQ(1, StrLength(*function_name)); CHECK_EQ((*function_name)[0], expected_step_sequence[break_point_hit_count]); @@ -872,6 +866,26 @@ static void DebugEventBreak(v8::DebugEvent event, } +// Debug event handler which re-issues a debug break until a limit has been +// reached. +int max_break_point_hit_count = 0; +static void DebugEventBreakMax(v8::DebugEvent event, + v8::Handle<v8::Object> exec_state, + v8::Handle<v8::Object> event_data, + v8::Handle<v8::Value> data) { + // When hitting a debug event listener there must be a break set. + CHECK_NE(v8::internal::Debug::break_id(), 0); + + if (event == v8::Break && break_point_hit_count < max_break_point_hit_count) { + // Count the number of breaks. + break_point_hit_count++; + + // Set the break flag again to come back here as soon as possible. + v8::Debug::DebugBreak(); + } +} + + // --- M e s s a g e C a l l b a c k @@ -1917,7 +1931,7 @@ TEST(ScriptBreakPointLine) { // Chesk that a break point was hit when the script was run. CHECK_EQ(1, break_point_hit_count); - CHECK_EQ(0, strlen(last_function_hit)); + CHECK_EQ(0, StrLength(last_function_hit)); // Call f and check that the script break point. f->Call(env->Global(), 0, NULL); @@ -1953,7 +1967,7 @@ TEST(ScriptBreakPointLine) { break_point_hit_count = 0; v8::Script::Compile(script, &origin)->Run(); CHECK_EQ(2, break_point_hit_count); - CHECK_EQ(0, strlen(last_function_hit)); + CHECK_EQ(0, StrLength(last_function_hit)); // Set a break point in the code after the last function decleration. int sbp6 = SetScriptBreakPointByNameFromJS("test.html", 12, -1); @@ -1962,7 +1976,7 @@ TEST(ScriptBreakPointLine) { break_point_hit_count = 0; v8::Script::Compile(script, &origin)->Run(); CHECK_EQ(3, break_point_hit_count); - CHECK_EQ(0, strlen(last_function_hit)); + CHECK_EQ(0, StrLength(last_function_hit)); // Clear the last break points, and reload the script which should not hit any // break points. @@ -2478,21 +2492,24 @@ TEST(StepInOutSimple) { break_point_hit_count = 0; expected_step_sequence = "abcbaca"; a->Call(env->Global(), 0, NULL); - CHECK_EQ(strlen(expected_step_sequence), break_point_hit_count); + CHECK_EQ(StrLength(expected_step_sequence), + break_point_hit_count); // Step through invocation of a with step next. step_action = StepNext; break_point_hit_count = 0; expected_step_sequence = "aaa"; a->Call(env->Global(), 0, NULL); - CHECK_EQ(strlen(expected_step_sequence), break_point_hit_count); + CHECK_EQ(StrLength(expected_step_sequence), + break_point_hit_count); // Step through invocation of a with step out. step_action = StepOut; break_point_hit_count = 0; expected_step_sequence = "a"; a->Call(env->Global(), 0, NULL); - CHECK_EQ(strlen(expected_step_sequence), break_point_hit_count); + CHECK_EQ(StrLength(expected_step_sequence), + break_point_hit_count); // Get rid of the debug event listener. v8::Debug::SetDebugEventListener(NULL); @@ -2525,21 +2542,24 @@ TEST(StepInOutTree) { break_point_hit_count = 0; expected_step_sequence = "adacadabcbadacada"; a->Call(env->Global(), 0, NULL); - CHECK_EQ(strlen(expected_step_sequence), break_point_hit_count); + CHECK_EQ(StrLength(expected_step_sequence), + break_point_hit_count); // Step through invocation of a with step next. step_action = StepNext; break_point_hit_count = 0; expected_step_sequence = "aaaa"; a->Call(env->Global(), 0, NULL); - CHECK_EQ(strlen(expected_step_sequence), break_point_hit_count); + CHECK_EQ(StrLength(expected_step_sequence), + break_point_hit_count); // Step through invocation of a with step out. step_action = StepOut; break_point_hit_count = 0; expected_step_sequence = "a"; a->Call(env->Global(), 0, NULL); - CHECK_EQ(strlen(expected_step_sequence), break_point_hit_count); + CHECK_EQ(StrLength(expected_step_sequence), + break_point_hit_count); // Get rid of the debug event listener. v8::Debug::SetDebugEventListener(NULL); @@ -2571,7 +2591,8 @@ TEST(StepInOutBranch) { break_point_hit_count = 0; expected_step_sequence = "abaca"; a->Call(env->Global(), 0, NULL); - CHECK_EQ(strlen(expected_step_sequence), break_point_hit_count); + CHECK_EQ(StrLength(expected_step_sequence), + break_point_hit_count); // Get rid of the debug event listener. v8::Debug::SetDebugEventListener(NULL); @@ -2707,6 +2728,37 @@ TEST(DebugStepFunctionCall) { } +// Tests that breakpoint will be hit if it's set in script. +TEST(PauseInScript) { + v8::HandleScope scope; + DebugLocalContext env; + env.ExposeDebug(); + + // Register a debug event listener which counts. + v8::Debug::SetDebugEventListener(DebugEventCounter); + + // Create a script that returns a function. + const char* src = "(function (evt) {})"; + const char* script_name = "StepInHandlerTest"; + + // Set breakpoint in the script. + SetScriptBreakPointByNameFromJS(script_name, 0, -1); + break_point_hit_count = 0; + + v8::ScriptOrigin origin(v8::String::New(script_name), v8::Integer::New(0)); + v8::Handle<v8::Script> script = v8::Script::Compile(v8::String::New(src), + &origin); + v8::Local<v8::Value> r = script->Run(); + + CHECK(r->IsFunction()); + CHECK_EQ(1, break_point_hit_count); + + // Get rid of the debug event listener. + v8::Debug::SetDebugEventListener(NULL); + CheckDebuggerUnloaded(); +} + + // Test break on exceptions. For each exception break combination the number // of debug event exception callbacks and message callbacks are collected. The // number of debug event exception callbacks are used to check that the @@ -2938,7 +2990,8 @@ TEST(StepWithException) { break_point_hit_count = 0; expected_step_sequence = "aa"; a->Call(env->Global(), 0, NULL); - CHECK_EQ(strlen(expected_step_sequence), break_point_hit_count); + CHECK_EQ(StrLength(expected_step_sequence), + break_point_hit_count); // Step through invocation of b + c. v8::Local<v8::Function> b = CompileFunction(&env, src, "b"); @@ -2947,7 +3000,8 @@ TEST(StepWithException) { break_point_hit_count = 0; expected_step_sequence = "bcc"; b->Call(env->Global(), 0, NULL); - CHECK_EQ(strlen(expected_step_sequence), break_point_hit_count); + CHECK_EQ(StrLength(expected_step_sequence), + break_point_hit_count); // Step through invocation of d + e. v8::Local<v8::Function> d = CompileFunction(&env, src, "d"); @@ -2957,7 +3011,8 @@ TEST(StepWithException) { break_point_hit_count = 0; expected_step_sequence = "dded"; d->Call(env->Global(), 0, NULL); - CHECK_EQ(strlen(expected_step_sequence), break_point_hit_count); + CHECK_EQ(StrLength(expected_step_sequence), + break_point_hit_count); // Step through invocation of d + e now with break on caught exceptions. ChangeBreakOnException(true, true); @@ -2965,7 +3020,8 @@ TEST(StepWithException) { break_point_hit_count = 0; expected_step_sequence = "ddeed"; d->Call(env->Global(), 0, NULL); - CHECK_EQ(strlen(expected_step_sequence), break_point_hit_count); + CHECK_EQ(StrLength(expected_step_sequence), + break_point_hit_count); // Step through invocation of f + g + h. v8::Local<v8::Function> f = CompileFunction(&env, src, "f"); @@ -2975,7 +3031,8 @@ TEST(StepWithException) { break_point_hit_count = 0; expected_step_sequence = "ffghf"; f->Call(env->Global(), 0, NULL); - CHECK_EQ(strlen(expected_step_sequence), break_point_hit_count); + CHECK_EQ(StrLength(expected_step_sequence), + break_point_hit_count); // Step through invocation of f + g + h now with break on caught exceptions. ChangeBreakOnException(true, true); @@ -2983,7 +3040,8 @@ TEST(StepWithException) { break_point_hit_count = 0; expected_step_sequence = "ffghhf"; f->Call(env->Global(), 0, NULL); - CHECK_EQ(strlen(expected_step_sequence), break_point_hit_count); + CHECK_EQ(StrLength(expected_step_sequence), + break_point_hit_count); // Get rid of the debug event listener. v8::Debug::SetDebugEventListener(NULL); @@ -3425,6 +3483,75 @@ TEST(NativeGetterThrowingErrorPropertyMirror) { } +// Test that hidden properties object is not returned as an unnamed property +// among regular properties. +// See http://crbug.com/26491 +TEST(NoHiddenProperties) { + // Create a V8 environment with debug access. + v8::HandleScope scope; + DebugLocalContext env; + env.ExposeDebug(); + + // Create an object in the global scope. + const char* source = "var obj = {a: 1};"; + v8::Script::Compile(v8::String::New(source))->Run(); + v8::Local<v8::Object> obj = v8::Local<v8::Object>::Cast( + env->Global()->Get(v8::String::New("obj"))); + // Set a hidden property on the object. + obj->SetHiddenValue(v8::String::New("v8::test-debug::a"), + v8::Int32::New(11)); + + // Get mirror for the object with property getter. + CompileRun("var obj_mirror = debug.MakeMirror(obj);"); + CHECK(CompileRun( + "obj_mirror instanceof debug.ObjectMirror")->BooleanValue()); + CompileRun("var named_names = obj_mirror.propertyNames();"); + // There should be exactly one property. But there is also an unnamed + // property whose value is hidden properties dictionary. The latter + // property should not be in the list of reguar properties. + CHECK_EQ(1, CompileRun("named_names.length")->Int32Value()); + CHECK(CompileRun("named_names[0] == 'a'")->BooleanValue()); + CHECK(CompileRun( + "obj_mirror.property('a').value().value() == 1")->BooleanValue()); + + // Object created by t0 will become hidden prototype of object 'obj'. + v8::Handle<v8::FunctionTemplate> t0 = v8::FunctionTemplate::New(); + t0->InstanceTemplate()->Set(v8::String::New("b"), v8::Number::New(2)); + t0->SetHiddenPrototype(true); + v8::Handle<v8::FunctionTemplate> t1 = v8::FunctionTemplate::New(); + t1->InstanceTemplate()->Set(v8::String::New("c"), v8::Number::New(3)); + + // Create proto objects, add hidden properties to them and set them on + // the global object. + v8::Handle<v8::Object> protoObj = t0->GetFunction()->NewInstance(); + protoObj->SetHiddenValue(v8::String::New("v8::test-debug::b"), + v8::Int32::New(12)); + env->Global()->Set(v8::String::New("protoObj"), protoObj); + v8::Handle<v8::Object> grandProtoObj = t1->GetFunction()->NewInstance(); + grandProtoObj->SetHiddenValue(v8::String::New("v8::test-debug::c"), + v8::Int32::New(13)); + env->Global()->Set(v8::String::New("grandProtoObj"), grandProtoObj); + + // Setting prototypes: obj->protoObj->grandProtoObj + protoObj->Set(v8::String::New("__proto__"), grandProtoObj); + obj->Set(v8::String::New("__proto__"), protoObj); + + // Get mirror for the object with property getter. + CompileRun("var obj_mirror = debug.MakeMirror(obj);"); + CHECK(CompileRun( + "obj_mirror instanceof debug.ObjectMirror")->BooleanValue()); + CompileRun("var named_names = obj_mirror.propertyNames();"); + // There should be exactly two properties - one from the object itself and + // another from its hidden prototype. + CHECK_EQ(2, CompileRun("named_names.length")->Int32Value()); + CHECK(CompileRun("named_names.sort(); named_names[0] == 'a' &&" + "named_names[1] == 'b'")->BooleanValue()); + CHECK(CompileRun( + "obj_mirror.property('a').value().value() == 1")->BooleanValue()); + CHECK(CompileRun( + "obj_mirror.property('b').value().value() == 2")->BooleanValue()); +} + // Multithreaded tests of JSON debugger protocol @@ -4564,6 +4691,71 @@ TEST(DebuggerHostDispatch) { } +/* Test DebugMessageDispatch */ +/* In this test, the V8 thread waits for a message from the debug thread. + * The DebugMessageDispatchHandler is executed from the debugger thread + * which signals the V8 thread to wake up. + */ + +class DebugMessageDispatchV8Thread : public v8::internal::Thread { + public: + void Run(); +}; + +class DebugMessageDispatchDebuggerThread : public v8::internal::Thread { + public: + void Run(); +}; + +Barriers* debug_message_dispatch_barriers; + + +static void DebugMessageHandler() { + debug_message_dispatch_barriers->semaphore_1->Signal(); +} + + +void DebugMessageDispatchV8Thread::Run() { + v8::HandleScope scope; + DebugLocalContext env; + + // Setup debug message dispatch handler. + v8::Debug::SetDebugMessageDispatchHandler(DebugMessageHandler); + + CompileRun("var y = 1 + 2;\n"); + debug_message_dispatch_barriers->barrier_1.Wait(); + debug_message_dispatch_barriers->semaphore_1->Wait(); + debug_message_dispatch_barriers->barrier_2.Wait(); +} + + +void DebugMessageDispatchDebuggerThread::Run() { + debug_message_dispatch_barriers->barrier_1.Wait(); + SendContinueCommand(); + debug_message_dispatch_barriers->barrier_2.Wait(); +} + +DebugMessageDispatchDebuggerThread debug_message_dispatch_debugger_thread; +DebugMessageDispatchV8Thread debug_message_dispatch_v8_thread; + + +TEST(DebuggerDebugMessageDispatch) { + i::FLAG_debugger_auto_break = true; + + // Create a V8 environment + Barriers stack_allocated_debug_message_dispatch_barriers; + stack_allocated_debug_message_dispatch_barriers.Initialize(); + debug_message_dispatch_barriers = + &stack_allocated_debug_message_dispatch_barriers; + + debug_message_dispatch_v8_thread.Start(); + debug_message_dispatch_debugger_thread.Start(); + + debug_message_dispatch_v8_thread.Join(); + debug_message_dispatch_debugger_thread.Join(); +} + + TEST(DebuggerAgent) { // Make sure these ports is not used by other tests to allow tests to run in // parallel. @@ -4709,7 +4901,8 @@ TEST(DebuggerAgentProtocolOverflowHeader) { // Add empty body to request. const char* content_length_zero_header = "Content-Length:0\r\n"; - client->Send(content_length_zero_header, strlen(content_length_zero_header)); + client->Send(content_length_zero_header, + StrLength(content_length_zero_header)); client->Send("\r\n", 2); // Wait until data is received. @@ -5444,3 +5637,119 @@ TEST(GetMirror) { v8::Handle<v8::Value> result = run_test->Call(env->Global(), 1, &obj); CHECK(result->IsTrue()); } + + +// Test that the debug break flag works with function.apply. +TEST(DebugBreakFunctionApply) { + v8::HandleScope scope; + DebugLocalContext env; + + // Create a function for testing breaking in apply. + v8::Local<v8::Function> foo = CompileFunction( + &env, + "function baz(x) { }" + "function bar(x) { baz(); }" + "function foo(){ bar.apply(this, [1]); }", + "foo"); + + // Register a debug event listener which steps and counts. + v8::Debug::SetDebugEventListener(DebugEventBreakMax); + + // Set the debug break flag before calling the code using function.apply. + v8::Debug::DebugBreak(); + + // Limit the number of debug breaks. This is a regression test for issue 493 + // where this test would enter an infinite loop. + break_point_hit_count = 0; + max_break_point_hit_count = 10000; // 10000 => infinite loop. + foo->Call(env->Global(), 0, NULL); + + // When keeping the debug break several break will happen. + CHECK_EQ(3, break_point_hit_count); + + v8::Debug::SetDebugEventListener(NULL); + CheckDebuggerUnloaded(); +} + + +v8::Handle<v8::Context> debugee_context; +v8::Handle<v8::Context> debugger_context; + + +// Property getter that checks that current and calling contexts +// are both the debugee contexts. +static v8::Handle<v8::Value> NamedGetterWithCallingContextCheck( + v8::Local<v8::String> name, + const v8::AccessorInfo& info) { + CHECK_EQ(0, strcmp(*v8::String::AsciiValue(name), "a")); + v8::Handle<v8::Context> current = v8::Context::GetCurrent(); + CHECK(current == debugee_context); + CHECK(current != debugger_context); + v8::Handle<v8::Context> calling = v8::Context::GetCalling(); + CHECK(calling == debugee_context); + CHECK(calling != debugger_context); + return v8::Int32::New(1); +} + + +// Debug event listener that checks if the first argument of a function is +// an object with property 'a' == 1. If the property has custom accessor +// this handler will eventually invoke it. +static void DebugEventGetAtgumentPropertyValue( + v8::DebugEvent event, + v8::Handle<v8::Object> exec_state, + v8::Handle<v8::Object> event_data, + v8::Handle<v8::Value> data) { + if (event == v8::Break) { + break_point_hit_count++; + CHECK(debugger_context == v8::Context::GetCurrent()); + v8::Handle<v8::Function> func(v8::Function::Cast(*CompileRun( + "(function(exec_state) {\n" + " return (exec_state.frame(0).argumentValue(0).property('a').\n" + " value().value() == 1);\n" + "})"))); + const int argc = 1; + v8::Handle<v8::Value> argv[argc] = { exec_state }; + v8::Handle<v8::Value> result = func->Call(exec_state, argc, argv); + CHECK(result->IsTrue()); + } +} + + +TEST(CallingContextIsNotDebugContext) { + // Create and enter a debugee context. + v8::HandleScope scope; + DebugLocalContext env; + env.ExposeDebug(); + + // Save handles to the debugger and debugee contexts to be used in + // NamedGetterWithCallingContextCheck. + debugee_context = v8::Local<v8::Context>(*env); + debugger_context = v8::Utils::ToLocal(Debug::debug_context()); + + // Create object with 'a' property accessor. + v8::Handle<v8::ObjectTemplate> named = v8::ObjectTemplate::New(); + named->SetAccessor(v8::String::New("a"), + NamedGetterWithCallingContextCheck); + env->Global()->Set(v8::String::New("obj"), + named->NewInstance()); + + // Register the debug event listener + v8::Debug::SetDebugEventListener(DebugEventGetAtgumentPropertyValue); + + // Create a function that invokes debugger. + v8::Local<v8::Function> foo = CompileFunction( + &env, + "function bar(x) { debugger; }" + "function foo(){ bar(obj); }", + "foo"); + + break_point_hit_count = 0; + foo->Call(env->Global(), 0, NULL); + CHECK_EQ(1, break_point_hit_count); + + v8::Debug::SetDebugEventListener(NULL); + debugee_context = v8::Handle<v8::Context>(); + debugger_context = v8::Handle<v8::Context>(); + CheckDebuggerUnloaded(); +} diff --git a/deps/v8/test/cctest/test-disasm-ia32.cc b/deps/v8/test/cctest/test-disasm-ia32.cc index 74db23463a..b8b3364ac4 100644 --- a/deps/v8/test/cctest/test-disasm-ia32.cc +++ b/deps/v8/test/cctest/test-disasm-ia32.cc @@ -105,13 +105,13 @@ TEST(DisasmIa320) { __ xor_(edx, 3); __ nop(); { - CHECK(CpuFeatures::IsSupported(CpuFeatures::CPUID)); - CpuFeatures::Scope fscope(CpuFeatures::CPUID); + CHECK(CpuFeatures::IsSupported(CPUID)); + CpuFeatures::Scope fscope(CPUID); __ cpuid(); } { - CHECK(CpuFeatures::IsSupported(CpuFeatures::RDTSC)); - CpuFeatures::Scope fscope(CpuFeatures::RDTSC); + CHECK(CpuFeatures::IsSupported(RDTSC)); + CpuFeatures::Scope fscope(RDTSC); __ rdtsc(); } __ movsx_b(edx, Operand(ecx)); @@ -194,15 +194,16 @@ TEST(DisasmIa320) { __ rcl(edx, 7); __ sar(edx, 1); __ sar(edx, 6); - __ sar(edx); + __ sar_cl(edx); __ sbb(edx, Operand(ebx, ecx, times_4, 10000)); __ shld(edx, Operand(ebx, ecx, times_4, 10000)); __ shl(edx, 1); __ shl(edx, 6); - __ shl(edx); + __ shl_cl(edx); __ shrd(edx, Operand(ebx, ecx, times_4, 10000)); + __ shr(edx, 1); __ shr(edx, 7); - __ shr(edx); + __ shr_cl(edx); // Immediates @@ -353,8 +354,8 @@ TEST(DisasmIa320) { __ fwait(); __ nop(); { - CHECK(CpuFeatures::IsSupported(CpuFeatures::SSE2)); - CpuFeatures::Scope fscope(CpuFeatures::SSE2); + CHECK(CpuFeatures::IsSupported(SSE2)); + CpuFeatures::Scope fscope(SSE2); __ cvttss2si(edx, Operand(ebx, ecx, times_4, 10000)); __ cvtsi2sd(xmm1, Operand(ebx, ecx, times_4, 10000)); __ addsd(xmm1, xmm0); @@ -368,8 +369,8 @@ TEST(DisasmIa320) { // cmov. { - CHECK(CpuFeatures::IsSupported(CpuFeatures::CMOV)); - CpuFeatures::Scope use_cmov(CpuFeatures::CMOV); + CHECK(CpuFeatures::IsSupported(CMOV)); + CpuFeatures::Scope use_cmov(CMOV); __ cmov(overflow, eax, Operand(eax, 0)); __ cmov(no_overflow, eax, Operand(eax, 1)); __ cmov(below, eax, Operand(eax, 2)); diff --git a/deps/v8/test/cctest/test-flags.cc b/deps/v8/test/cctest/test-flags.cc index 9019a89ef4..32f1264f7f 100644 --- a/deps/v8/test/cctest/test-flags.cc +++ b/deps/v8/test/cctest/test-flags.cc @@ -75,7 +75,7 @@ TEST(Flags2b) { " -notesting-bool-flag notaflag --testing_int_flag=77 " "-testing_float_flag=.25 " "--testing_string_flag no_way! "; - CHECK_EQ(0, FlagList::SetFlagsFromString(str, strlen(str))); + CHECK_EQ(0, FlagList::SetFlagsFromString(str, StrLength(str))); CHECK(!FLAG_testing_bool_flag); CHECK_EQ(77, FLAG_testing_int_flag); CHECK_EQ(.25, FLAG_testing_float_flag); @@ -107,7 +107,7 @@ TEST(Flags3b) { "--testing_bool_flag notaflag --testing_int_flag -666 " "--testing_float_flag -12E10 " "-testing-string-flag=foo-bar"; - CHECK_EQ(0, FlagList::SetFlagsFromString(str, strlen(str))); + CHECK_EQ(0, FlagList::SetFlagsFromString(str, StrLength(str))); CHECK(FLAG_testing_bool_flag); CHECK_EQ(-666, FLAG_testing_int_flag); CHECK_EQ(-12E10, FLAG_testing_float_flag); @@ -129,7 +129,7 @@ TEST(Flags4) { TEST(Flags4b) { SetFlagsToDefault(); const char* str = "--testing_bool_flag --foo"; - CHECK_EQ(2, FlagList::SetFlagsFromString(str, strlen(str))); + CHECK_EQ(2, FlagList::SetFlagsFromString(str, StrLength(str))); } @@ -147,7 +147,7 @@ TEST(Flags5) { TEST(Flags5b) { SetFlagsToDefault(); const char* str = " --testing_int_flag=\"foobar\""; - CHECK_EQ(1, FlagList::SetFlagsFromString(str, strlen(str))); + CHECK_EQ(1, FlagList::SetFlagsFromString(str, StrLength(str))); } @@ -166,7 +166,7 @@ TEST(Flags6) { TEST(Flags6b) { SetFlagsToDefault(); const char* str = " --testing-int-flag 0 --testing_float_flag "; - CHECK_EQ(3, FlagList::SetFlagsFromString(str, strlen(str))); + CHECK_EQ(3, FlagList::SetFlagsFromString(str, StrLength(str))); } @@ -191,7 +191,7 @@ TEST(FlagsJSArguments1) { TEST(FlagsJSArguments1b) { SetFlagsToDefault(); const char* str = "--testing-int-flag 42 -- testing-float-flag 7"; - CHECK_EQ(0, FlagList::SetFlagsFromString(str, strlen(str))); + CHECK_EQ(0, FlagList::SetFlagsFromString(str, StrLength(str))); CHECK_EQ(42, FLAG_testing_int_flag); CHECK_EQ(2.5, FLAG_testing_float_flag); CHECK_EQ(2, FLAG_js_arguments.argc()); @@ -203,7 +203,7 @@ TEST(FlagsJSArguments1b) { TEST(FlagsJSArguments2) { SetFlagsToDefault(); const char* str = "--testing-int-flag 42 --js-arguments testing-float-flag 7"; - CHECK_EQ(0, FlagList::SetFlagsFromString(str, strlen(str))); + CHECK_EQ(0, FlagList::SetFlagsFromString(str, StrLength(str))); CHECK_EQ(42, FLAG_testing_int_flag); CHECK_EQ(2.5, FLAG_testing_float_flag); CHECK_EQ(2, FLAG_js_arguments.argc()); @@ -215,7 +215,7 @@ TEST(FlagsJSArguments2) { TEST(FlagsJSArguments3) { SetFlagsToDefault(); const char* str = "--testing-int-flag 42 --js-arguments=testing-float-flag 7"; - CHECK_EQ(0, FlagList::SetFlagsFromString(str, strlen(str))); + CHECK_EQ(0, FlagList::SetFlagsFromString(str, StrLength(str))); CHECK_EQ(42, FLAG_testing_int_flag); CHECK_EQ(2.5, FLAG_testing_float_flag); CHECK_EQ(2, FLAG_js_arguments.argc()); @@ -227,7 +227,7 @@ TEST(FlagsJSArguments3) { TEST(FlagsJSArguments4) { SetFlagsToDefault(); const char* str = "--testing-int-flag 42 --"; - CHECK_EQ(0, FlagList::SetFlagsFromString(str, strlen(str))); + CHECK_EQ(0, FlagList::SetFlagsFromString(str, StrLength(str))); CHECK_EQ(42, FLAG_testing_int_flag); CHECK_EQ(0, FLAG_js_arguments.argc()); } diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc index b199507d70..04e0037b03 100644 --- a/deps/v8/test/cctest/test-heap-profiler.cc +++ b/deps/v8/test/cctest/test-heap-profiler.cc @@ -384,8 +384,8 @@ TEST(RetainerProfile) { const char* retainers_of_a = printer.GetRetainers("A"); // The order of retainers is unspecified, so we check string length, and // verify each retainer separately. - CHECK_EQ(static_cast<int>(strlen("(global property);1,B;2,C;2")), - static_cast<int>(strlen(retainers_of_a))); + CHECK_EQ(i::StrLength("(global property);1,B;2,C;2"), + i::StrLength(retainers_of_a)); CHECK(strstr(retainers_of_a, "(global property);1") != NULL); CHECK(strstr(retainers_of_a, "B;2") != NULL); CHECK(strstr(retainers_of_a, "C;2") != NULL); diff --git a/deps/v8/test/cctest/test-heap.cc b/deps/v8/test/cctest/test-heap.cc index 9911ce42bc..fb9a48e790 100644 --- a/deps/v8/test/cctest/test-heap.cc +++ b/deps/v8/test/cctest/test-heap.cc @@ -262,7 +262,7 @@ TEST(GarbageCollection) { static void VerifyStringAllocation(const char* string) { String* s = String::cast(Heap::AllocateStringFromUtf8(CStrVector(string))); - CHECK_EQ(static_cast<int>(strlen(string)), s->length()); + CHECK_EQ(StrLength(string), s->length()); for (int index = 0; index < s->length(); index++) { CHECK_EQ(static_cast<uint16_t>(string[index]), s->Get(index)); } } @@ -285,7 +285,7 @@ TEST(LocalHandles) { v8::HandleScope scope; const char* name = "Kasper the spunky"; Handle<String> string = Factory::NewStringFromAscii(CStrVector(name)); - CHECK_EQ(static_cast<int>(strlen(name)), string->length()); + CHECK_EQ(StrLength(name), string->length()); } diff --git a/deps/v8/test/cctest/test-log-stack-tracer.cc b/deps/v8/test/cctest/test-log-stack-tracer.cc index 39f90647e3..68cbc26191 100644 --- a/deps/v8/test/cctest/test-log-stack-tracer.cc +++ b/deps/v8/test/cctest/test-log-stack-tracer.cc @@ -163,11 +163,6 @@ v8::Handle<v8::Value> TraceExtension::JSEntrySP(const v8::Arguments& args) { } -static void CompileRun(const char* source) { - Script::Compile(String::New(source))->Run(); -} - - v8::Handle<v8::Value> TraceExtension::JSEntrySPLevel2( const v8::Arguments& args) { v8::HandleScope scope; diff --git a/deps/v8/test/cctest/test-log-utils.cc b/deps/v8/test/cctest/test-log-utils.cc index a08a0a1101..c99d770d42 100644 --- a/deps/v8/test/cctest/test-log-utils.cc +++ b/deps/v8/test/cctest/test-log-utils.cc @@ -16,6 +16,7 @@ using v8::internal::LogRecordCompressor; using v8::internal::MutableCStrVector; using v8::internal::ScopedVector; using v8::internal::Vector; +using v8::internal::StrLength; // Fills 'ref_buffer' with test data: a sequence of two-digit // hex numbers: '0001020304...'. Then writes 'ref_buffer' contents to 'dynabuf'. @@ -118,7 +119,7 @@ TEST(DynaBufReadTruncation) { TEST(DynaBufSealing) { const char* seal = "Sealed"; - const int seal_size = strlen(seal); + const int seal_size = StrLength(seal); LogDynamicBuffer dynabuf(32, 128, seal, seal_size); EmbeddedVector<char, 100> ref_buf; WriteData(&dynabuf, &ref_buf); diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc index b1cb63c632..57004d7c88 100644 --- a/deps/v8/test/cctest/test-log.cc +++ b/deps/v8/test/cctest/test-log.cc @@ -19,6 +19,7 @@ using v8::internal::Address; using v8::internal::EmbeddedVector; using v8::internal::Logger; +using v8::internal::StrLength; namespace i = v8::internal; @@ -55,7 +56,7 @@ TEST(GetMessages) { CHECK_EQ(0, Logger::GetLogLines(0, log_lines, 3)); // See Logger::StringEvent. const char* line_1 = "aaa,\"bbb\"\n"; - const int line_1_len = strlen(line_1); + const int line_1_len = StrLength(line_1); // Still smaller than log message length. CHECK_EQ(0, Logger::GetLogLines(0, log_lines, line_1_len - 1)); // The exact size. @@ -68,7 +69,7 @@ TEST(GetMessages) { CHECK_EQ(line_1, log_lines); memset(log_lines, 0, sizeof(log_lines)); const char* line_2 = "cccc,\"dddd\"\n"; - const int line_2_len = strlen(line_2); + const int line_2_len = StrLength(line_2); // Now start with line_2 beginning. CHECK_EQ(0, Logger::GetLogLines(line_1_len, log_lines, 0)); CHECK_EQ(0, Logger::GetLogLines(line_1_len, log_lines, 3)); @@ -82,7 +83,7 @@ TEST(GetMessages) { memset(log_lines, 0, sizeof(log_lines)); // Now get entire buffer contents. const char* all_lines = "aaa,\"bbb\"\ncccc,\"dddd\"\n"; - const int all_lines_len = strlen(all_lines); + const int all_lines_len = StrLength(all_lines); CHECK_EQ(all_lines_len, Logger::GetLogLines(0, log_lines, all_lines_len)); CHECK_EQ(all_lines, log_lines); memset(log_lines, 0, sizeof(log_lines)); @@ -104,7 +105,7 @@ TEST(BeyondWritePosition) { Logger::StringEvent("cccc", "dddd"); // See Logger::StringEvent. const char* all_lines = "aaa,\"bbb\"\ncccc,\"dddd\"\n"; - const int all_lines_len = strlen(all_lines); + const int all_lines_len = StrLength(all_lines); EmbeddedVector<char, 100> buffer; const int beyond_write_pos = all_lines_len; CHECK_EQ(0, Logger::GetLogLines(beyond_write_pos, buffer.start(), 1)); @@ -437,7 +438,7 @@ namespace { class SimpleExternalString : public v8::String::ExternalStringResource { public: explicit SimpleExternalString(const char* source) - : utf_source_(strlen(source)) { + : utf_source_(StrLength(source)) { for (int i = 0; i < utf_source_.length(); ++i) utf_source_[i] = source[i]; } @@ -592,7 +593,7 @@ class ParseLogResult { entities[i] = NULL; } const size_t map_length = bounds.Length(); - entities_map = i::NewArray<int>(map_length); + entities_map = i::NewArray<int>(static_cast<int>(map_length)); for (size_t i = 0; i < map_length; ++i) { entities_map[i] = -1; } @@ -768,7 +769,7 @@ static inline void PrintCodeEntityInfo(CodeEntityInfo entity) { const int max_len = 50; if (entity != NULL) { char* eol = strchr(entity, '\n'); - int len = eol - entity; + int len = static_cast<int>(eol - entity); len = len <= max_len ? len : max_len; printf("%-*.*s ", max_len, len, entity); } else { @@ -788,7 +789,7 @@ static void PrintCodeEntitiesInfo( static inline int StrChrLen(const char* s, char c) { - return strchr(s, c) - s; + return static_cast<int>(strchr(s, c) - s); } diff --git a/deps/v8/test/cctest/test-macro-assembler-x64.cc b/deps/v8/test/cctest/test-macro-assembler-x64.cc index f344ac864e..511b933a50 100755 --- a/deps/v8/test/cctest/test-macro-assembler-x64.cc +++ b/deps/v8/test/cctest/test-macro-assembler-x64.cc @@ -133,7 +133,7 @@ TEST(SmiMove) { true)); CHECK(buffer); HandleScope handles; - MacroAssembler assembler(buffer, actual_size); + MacroAssembler assembler(buffer, static_cast<int>(actual_size)); MacroAssembler* masm = &assembler; // Create a pointer for the __ macro. masm->set_allow_stub_calls(false); Label exit; @@ -218,7 +218,7 @@ TEST(SmiCompare) { true)); CHECK(buffer); HandleScope handles; - MacroAssembler assembler(buffer, actual_size); + MacroAssembler assembler(buffer, static_cast<int>(actual_size)); MacroAssembler* masm = &assembler; masm->set_allow_stub_calls(false); @@ -265,7 +265,7 @@ TEST(Integer32ToSmi) { true)); CHECK(buffer); HandleScope handles; - MacroAssembler assembler(buffer, actual_size); + MacroAssembler assembler(buffer, static_cast<int>(actual_size)); MacroAssembler* masm = &assembler; masm->set_allow_stub_calls(false); @@ -364,7 +364,7 @@ void TestI64PlusConstantToSmi(MacroAssembler* masm, int64_t result = x + y; ASSERT(Smi::IsValid(result)); __ movl(rax, Immediate(id)); - __ Move(r8, Smi::FromInt(result)); + __ Move(r8, Smi::FromInt(static_cast<int>(result))); __ movq(rcx, x, RelocInfo::NONE); __ movq(r11, rcx); __ Integer64PlusConstantToSmi(rdx, rcx, y); @@ -390,7 +390,7 @@ TEST(Integer64PlusConstantToSmi) { true)); CHECK(buffer); HandleScope handles; - MacroAssembler assembler(buffer, actual_size); + MacroAssembler assembler(buffer, static_cast<int>(actual_size)); MacroAssembler* masm = &assembler; masm->set_allow_stub_calls(false); @@ -431,7 +431,7 @@ TEST(SmiCheck) { true)); CHECK(buffer); HandleScope handles; - MacroAssembler assembler(buffer, actual_size); + MacroAssembler assembler(buffer, static_cast<int>(actual_size)); MacroAssembler* masm = &assembler; masm->set_allow_stub_calls(false); @@ -676,7 +676,7 @@ TEST(SmiNeg) { true)); CHECK(buffer); HandleScope handles; - MacroAssembler assembler(buffer, actual_size); + MacroAssembler assembler(buffer, static_cast<int>(actual_size)); MacroAssembler* masm = &assembler; masm->set_allow_stub_calls(false); @@ -761,7 +761,7 @@ TEST(SmiAdd) { true)); CHECK(buffer); HandleScope handles; - MacroAssembler assembler(buffer, actual_size); + MacroAssembler assembler(buffer, static_cast<int>(actual_size)); MacroAssembler* masm = &assembler; masm->set_allow_stub_calls(false); @@ -948,7 +948,7 @@ TEST(SmiSub) { true)); CHECK(buffer); HandleScope handles; - MacroAssembler assembler(buffer, actual_size); + MacroAssembler assembler(buffer, static_cast<int>(actual_size)); MacroAssembler* masm = &assembler; masm->set_allow_stub_calls(false); @@ -1035,7 +1035,7 @@ TEST(SmiMul) { true)); CHECK(buffer); HandleScope handles; - MacroAssembler assembler(buffer, actual_size); + MacroAssembler assembler(buffer, static_cast<int>(actual_size)); MacroAssembler* masm = &assembler; masm->set_allow_stub_calls(false); @@ -1138,7 +1138,7 @@ TEST(SmiDiv) { true)); CHECK(buffer); HandleScope handles; - MacroAssembler assembler(buffer, actual_size); + MacroAssembler assembler(buffer, static_cast<int>(actual_size)); MacroAssembler* masm = &assembler; masm->set_allow_stub_calls(false); @@ -1245,7 +1245,7 @@ TEST(SmiMod) { true)); CHECK(buffer); HandleScope handles; - MacroAssembler assembler(buffer, actual_size); + MacroAssembler assembler(buffer, static_cast<int>(actual_size)); MacroAssembler* masm = &assembler; masm->set_allow_stub_calls(false); @@ -1338,7 +1338,7 @@ TEST(SmiIndex) { true)); CHECK(buffer); HandleScope handles; - MacroAssembler assembler(buffer, actual_size); + MacroAssembler assembler(buffer, static_cast<int>(actual_size)); MacroAssembler* masm = &assembler; masm->set_allow_stub_calls(false); @@ -1404,7 +1404,7 @@ TEST(SmiSelectNonSmi) { true)); CHECK(buffer); HandleScope handles; - MacroAssembler assembler(buffer, actual_size); + MacroAssembler assembler(buffer, static_cast<int>(actual_size)); MacroAssembler* masm = &assembler; masm->set_allow_stub_calls(false); // Avoid inline checks. @@ -1480,7 +1480,7 @@ TEST(SmiAnd) { true)); CHECK(buffer); HandleScope handles; - MacroAssembler assembler(buffer, actual_size); + MacroAssembler assembler(buffer, static_cast<int>(actual_size)); MacroAssembler* masm = &assembler; masm->set_allow_stub_calls(false); @@ -1558,7 +1558,7 @@ TEST(SmiOr) { true)); CHECK(buffer); HandleScope handles; - MacroAssembler assembler(buffer, actual_size); + MacroAssembler assembler(buffer, static_cast<int>(actual_size)); MacroAssembler* masm = &assembler; masm->set_allow_stub_calls(false); @@ -1638,7 +1638,7 @@ TEST(SmiXor) { true)); CHECK(buffer); HandleScope handles; - MacroAssembler assembler(buffer, actual_size); + MacroAssembler assembler(buffer, static_cast<int>(actual_size)); MacroAssembler* masm = &assembler; masm->set_allow_stub_calls(false); @@ -1702,7 +1702,7 @@ TEST(SmiNot) { true)); CHECK(buffer); HandleScope handles; - MacroAssembler assembler(buffer, actual_size); + MacroAssembler assembler(buffer, static_cast<int>(actual_size)); MacroAssembler* masm = &assembler; masm->set_allow_stub_calls(false); @@ -1843,7 +1843,7 @@ TEST(SmiShiftLeft) { true)); CHECK(buffer); HandleScope handles; - MacroAssembler assembler(buffer, actual_size); + MacroAssembler assembler(buffer, static_cast<int>(actual_size)); MacroAssembler* masm = &assembler; masm->set_allow_stub_calls(false); @@ -1880,7 +1880,7 @@ void TestSmiShiftLogicalRight(MacroAssembler* masm, int shift = shifts[i]; intptr_t result = static_cast<unsigned int>(x) >> shift; if (Smi::IsValid(result)) { - __ Move(r8, Smi::FromInt(result)); + __ Move(r8, Smi::FromInt(static_cast<int>(result))); __ Move(rcx, Smi::FromInt(x)); __ SmiShiftLogicalRightConstant(r9, rcx, shift, exit); @@ -1946,7 +1946,7 @@ TEST(SmiShiftLogicalRight) { true)); CHECK(buffer); HandleScope handles; - MacroAssembler assembler(buffer, actual_size); + MacroAssembler assembler(buffer, static_cast<int>(actual_size)); MacroAssembler* masm = &assembler; masm->set_allow_stub_calls(false); @@ -2012,7 +2012,7 @@ TEST(SmiShiftArithmeticRight) { true)); CHECK(buffer); HandleScope handles; - MacroAssembler assembler(buffer, actual_size); + MacroAssembler assembler(buffer, static_cast<int>(actual_size)); MacroAssembler* masm = &assembler; masm->set_allow_stub_calls(false); @@ -2073,7 +2073,7 @@ TEST(PositiveSmiTimesPowerOfTwoToInteger64) { true)); CHECK(buffer); HandleScope handles; - MacroAssembler assembler(buffer, actual_size); + MacroAssembler assembler(buffer, static_cast<int>(actual_size)); MacroAssembler* masm = &assembler; masm->set_allow_stub_calls(false); diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc new file mode 100644 index 0000000000..d62b6a5d53 --- /dev/null +++ b/deps/v8/test/cctest/test-parsing.cc @@ -0,0 +1,129 @@ +// Copyright 2006-2009 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include <stdlib.h> + +#include "v8.h" + +#include "token.h" +#include "scanner.h" +#include "utils.h" + +#include "cctest.h" + +namespace i = ::v8::internal; + +TEST(KeywordMatcher) { + struct KeywordToken { + const char* keyword; + i::Token::Value token; + }; + + static const KeywordToken keywords[] = { +#define KEYWORD(t, s, d) { s, i::Token::t }, +#define IGNORE(t, s, d) /* */ + TOKEN_LIST(IGNORE, KEYWORD, IGNORE) +#undef KEYWORD + { NULL, i::Token::IDENTIFIER } + }; + + static const char* future_keywords[] = { +#define FUTURE(t, s, d) s, + TOKEN_LIST(IGNORE, IGNORE, FUTURE) +#undef FUTURE +#undef IGNORE + NULL + }; + + KeywordToken key_token; + for (int i = 0; (key_token = keywords[i]).keyword != NULL; i++) { + i::KeywordMatcher matcher; + const char* keyword = key_token.keyword; + int length = i::StrLength(keyword); + for (int j = 0; j < length; j++) { + if (key_token.token == i::Token::INSTANCEOF && j == 2) { + // "in" is a prefix of "instanceof". It's the only keyword + // that is a prefix of another. + CHECK_EQ(i::Token::IN, matcher.token()); + } else { + CHECK_EQ(i::Token::IDENTIFIER, matcher.token()); + } + matcher.AddChar(keyword[j]); + } + CHECK_EQ(key_token.token, matcher.token()); + // Adding more characters will make keyword matching fail. + matcher.AddChar('z'); + CHECK_EQ(i::Token::IDENTIFIER, matcher.token()); + // Adding a keyword later will not make it match again. + matcher.AddChar('i'); + matcher.AddChar('f'); + CHECK_EQ(i::Token::IDENTIFIER, matcher.token()); + } + + // Future keywords are not recognized. + const char* future_keyword; + for (int i = 0; (future_keyword = future_keywords[i]) != NULL; i++) { + i::KeywordMatcher matcher; + int length = i::StrLength(future_keyword); + for (int j = 0; j < length; j++) { + matcher.AddChar(future_keyword[j]); + } + CHECK_EQ(i::Token::IDENTIFIER, matcher.token()); + } + + // Zero isn't ignored at first. + i::KeywordMatcher bad_start; + bad_start.AddChar(0); + CHECK_EQ(i::Token::IDENTIFIER, bad_start.token()); + bad_start.AddChar('i'); + bad_start.AddChar('f'); + CHECK_EQ(i::Token::IDENTIFIER, bad_start.token()); + + // Zero isn't ignored at end. + i::KeywordMatcher bad_end; + bad_end.AddChar('i'); + bad_end.AddChar('f'); + CHECK_EQ(i::Token::IF, bad_end.token()); + bad_end.AddChar(0); + CHECK_EQ(i::Token::IDENTIFIER, bad_end.token()); + + // Case isn't ignored. + i::KeywordMatcher bad_case; + bad_case.AddChar('i'); + bad_case.AddChar('F'); + CHECK_EQ(i::Token::IDENTIFIER, bad_case.token()); + + // If we mark it as failure, continuing won't help. + i::KeywordMatcher full_stop; + full_stop.AddChar('i'); + CHECK_EQ(i::Token::IDENTIFIER, full_stop.token()); + full_stop.Fail(); + CHECK_EQ(i::Token::IDENTIFIER, full_stop.token()); + full_stop.AddChar('f'); + CHECK_EQ(i::Token::IDENTIFIER, full_stop.token()); +} + diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc index 81c2205204..6aa0730c0d 100644 --- a/deps/v8/test/cctest/test-regexp.cc +++ b/deps/v8/test/cctest/test-regexp.cc @@ -74,7 +74,7 @@ static SmartPointer<const char> Parse(const char* input) { static bool CheckSimple(const char* input) { V8::Initialize(NULL); v8::HandleScope scope; - unibrow::Utf8InputBuffer<> buffer(input, strlen(input)); + unibrow::Utf8InputBuffer<> buffer(input, StrLength(input)); ZoneScope zone_scope(DELETE_ON_EXIT); FlatStringReader reader(CStrVector(input)); RegExpCompileData result; @@ -92,7 +92,7 @@ struct MinMaxPair { static MinMaxPair CheckMinMaxMatch(const char* input) { V8::Initialize(NULL); v8::HandleScope scope; - unibrow::Utf8InputBuffer<> buffer(input, strlen(input)); + unibrow::Utf8InputBuffer<> buffer(input, StrLength(input)); ZoneScope zone_scope(DELETE_ON_EXIT); FlatStringReader reader(CStrVector(input)); RegExpCompileData result; @@ -1466,7 +1466,7 @@ static void TestRangeCaseIndependence(CharacterRange input, ZoneScope zone_scope(DELETE_ON_EXIT); int count = expected.length(); ZoneList<CharacterRange>* list = new ZoneList<CharacterRange>(count); - input.AddCaseEquivalents(list); + input.AddCaseEquivalents(list, false); CHECK_EQ(count, list->length()); for (int i = 0; i < list->length(); i++) { CHECK_EQ(expected[i].from(), list->at(i).from()); diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc index 01e07157a1..9ed487450d 100644 --- a/deps/v8/test/cctest/test-serialize.cc +++ b/deps/v8/test/cctest/test-serialize.cc @@ -123,13 +123,17 @@ TEST(ExternalReferenceEncoder) { ExternalReference::the_hole_value_location(); CHECK_EQ(make_code(UNCLASSIFIED, 2), encoder.Encode(the_hole_value_location.address())); - ExternalReference stack_guard_limit_address = - ExternalReference::address_of_stack_guard_limit(); + ExternalReference stack_limit_address = + ExternalReference::address_of_stack_limit(); CHECK_EQ(make_code(UNCLASSIFIED, 4), - encoder.Encode(stack_guard_limit_address.address())); - CHECK_EQ(make_code(UNCLASSIFIED, 10), + encoder.Encode(stack_limit_address.address())); + ExternalReference real_stack_limit_address = + ExternalReference::address_of_real_stack_limit(); + CHECK_EQ(make_code(UNCLASSIFIED, 5), + encoder.Encode(real_stack_limit_address.address())); + CHECK_EQ(make_code(UNCLASSIFIED, 11), encoder.Encode(ExternalReference::debug_break().address())); - CHECK_EQ(make_code(UNCLASSIFIED, 6), + CHECK_EQ(make_code(UNCLASSIFIED, 7), encoder.Encode(ExternalReference::new_space_start().address())); CHECK_EQ(make_code(UNCLASSIFIED, 3), encoder.Encode(ExternalReference::roots_address().address())); @@ -158,102 +162,44 @@ TEST(ExternalReferenceDecoder) { decoder.Decode(make_code(UNCLASSIFIED, 1))); CHECK_EQ(ExternalReference::the_hole_value_location().address(), decoder.Decode(make_code(UNCLASSIFIED, 2))); - CHECK_EQ(ExternalReference::address_of_stack_guard_limit().address(), + CHECK_EQ(ExternalReference::address_of_stack_limit().address(), decoder.Decode(make_code(UNCLASSIFIED, 4))); + CHECK_EQ(ExternalReference::address_of_real_stack_limit().address(), + decoder.Decode(make_code(UNCLASSIFIED, 5))); CHECK_EQ(ExternalReference::debug_break().address(), - decoder.Decode(make_code(UNCLASSIFIED, 10))); + decoder.Decode(make_code(UNCLASSIFIED, 11))); CHECK_EQ(ExternalReference::new_space_start().address(), - decoder.Decode(make_code(UNCLASSIFIED, 6))); + decoder.Decode(make_code(UNCLASSIFIED, 7))); } static void Serialize() { -#ifdef DEBUG - FLAG_debug_serialization = true; -#endif - StatsTable::SetCounterFunction(counter_function); - - v8::HandleScope scope; - const int kExtensionCount = 1; - const char* extension_list[kExtensionCount] = { "v8/gc" }; - v8::ExtensionConfiguration extensions(kExtensionCount, extension_list); - Serializer::Enable(); - v8::Persistent<v8::Context> env = v8::Context::New(&extensions); - env->Enter(); - - Snapshot::WriteToFile(FLAG_testing_serialization_file); -} - - -static void Serialize2() { - Serializer::Enable(); // We have to create one context. One reason for this is so that the builtins // can be loaded from v8natives.js and their addresses can be processed. This // will clear the pending fixups array, which would otherwise contain GC roots // that would confuse the serialization/deserialization process. v8::Persistent<v8::Context> env = v8::Context::New(); env.Dispose(); - Snapshot::WriteToFile2(FLAG_testing_serialization_file); -} - - -// Test that the whole heap can be serialized when running from the -// internal snapshot. -// (Smoke test.) -TEST(SerializeInternal) { - Snapshot::Initialize(NULL); - Serialize(); -} - - -// Test that the whole heap can be serialized when running from a -// bootstrapped heap. -// (Smoke test.) -TEST(Serialize) { - if (Snapshot::IsEnabled()) return; - Serialize(); + Snapshot::WriteToFile(FLAG_testing_serialization_file); } // Test that the whole heap can be serialized. -TEST(Serialize2) { +TEST(Serialize) { + Serializer::Enable(); v8::V8::Initialize(); - Serialize2(); + Serialize(); } -// Test that the heap isn't destroyed after a serialization. -TEST(SerializeNondestructive) { - if (Snapshot::IsEnabled()) return; - StatsTable::SetCounterFunction(counter_function); - v8::HandleScope scope; - Serializer::Enable(); - v8::Persistent<v8::Context> env = v8::Context::New(); - v8::Context::Scope context_scope(env); - Serializer().Serialize(); - const char* c_source = "\"abcd\".charAt(2) == 'c'"; - v8::Local<v8::String> source = v8::String::New(c_source); - v8::Local<v8::Script> script = v8::Script::Compile(source); - v8::Local<v8::Value> value = script->Run(); - CHECK(value->BooleanValue()); -} - //---------------------------------------------------------------------------- // Tests that the heap can be deserialized. static void Deserialize() { -#ifdef DEBUG - FLAG_debug_serialization = true; -#endif CHECK(Snapshot::Initialize(FLAG_testing_serialization_file)); } -static void Deserialize2() { - CHECK(Snapshot::Initialize2(FLAG_testing_serialization_file)); -} - - static void SanityCheck() { v8::HandleScope scope; #ifdef DEBUG @@ -272,15 +218,6 @@ DEPENDENT_TEST(Deserialize, Serialize) { Deserialize(); - SanityCheck(); -} - - -DEPENDENT_TEST(Deserialize2, Serialize2) { - v8::HandleScope scope; - - Deserialize2(); - fflush(stdout); v8::Persistent<v8::Context> env = v8::Context::New(); @@ -290,23 +227,11 @@ DEPENDENT_TEST(Deserialize2, Serialize2) { } -DEPENDENT_TEST(DeserializeAndRunScript, Serialize) { +DEPENDENT_TEST(DeserializeAndRunScript2, Serialize) { v8::HandleScope scope; Deserialize(); - const char* c_source = "\"1234\".length"; - v8::Local<v8::String> source = v8::String::New(c_source); - v8::Local<v8::Script> script = v8::Script::Compile(source); - CHECK_EQ(4, script->Run()->Int32Value()); -} - - -DEPENDENT_TEST(DeserializeAndRunScript2, Serialize2) { - v8::HandleScope scope; - - Deserialize2(); - v8::Persistent<v8::Context> env = v8::Context::New(); env->Enter(); @@ -317,31 +242,6 @@ DEPENDENT_TEST(DeserializeAndRunScript2, Serialize2) { } -DEPENDENT_TEST(DeserializeNatives, Serialize) { - v8::HandleScope scope; - - Deserialize(); - - const char* c_source = "\"abcd\".charAt(2) == 'c'"; - v8::Local<v8::String> source = v8::String::New(c_source); - v8::Local<v8::Script> script = v8::Script::Compile(source); - v8::Local<v8::Value> value = script->Run(); - CHECK(value->BooleanValue()); -} - - -DEPENDENT_TEST(DeserializeExtensions, Serialize) { - v8::HandleScope scope; - - Deserialize(); - const char* c_source = "gc();"; - v8::Local<v8::String> source = v8::String::New(c_source); - v8::Local<v8::Script> script = v8::Script::Compile(source); - v8::Local<v8::Value> value = script->Run(); - CHECK(value->IsUndefined()); -} - - TEST(TestThatAlwaysSucceeds) { } diff --git a/deps/v8/test/cctest/test-strings.cc b/deps/v8/test/cctest/test-strings.cc index bb9a6f99af..0e9bf7a063 100644 --- a/deps/v8/test/cctest/test-strings.cc +++ b/deps/v8/test/cctest/test-strings.cc @@ -241,17 +241,6 @@ TEST(Traverse) { printf("6\n"); TraverseFirst(left_asymmetric, right_deep_asymmetric, 65536); printf("7\n"); - Handle<String> right_deep_slice = - Factory::NewStringSlice(left_deep_asymmetric, - left_deep_asymmetric->length() - 1050, - left_deep_asymmetric->length() - 50); - Handle<String> left_deep_slice = - Factory::NewStringSlice(right_deep_asymmetric, - right_deep_asymmetric->length() - 1050, - right_deep_asymmetric->length() - 50); - printf("8\n"); - Traverse(right_deep_slice, left_deep_slice); - printf("9\n"); FlattenString(left_asymmetric); printf("10\n"); Traverse(flat, left_asymmetric); @@ -269,60 +258,6 @@ TEST(Traverse) { } -static Handle<String> SliceOf(Handle<String> underlying) { - int start = gen() % underlying->length(); - int end = start + gen() % (underlying->length() - start); - return Factory::NewStringSlice(underlying, - start, - end); -} - - -static Handle<String> ConstructSliceTree( - Handle<String> building_blocks[NUMBER_OF_BUILDING_BLOCKS], - int from, - int to) { - CHECK(to > from); - if (to - from <= 1) - return SliceOf(building_blocks[from % NUMBER_OF_BUILDING_BLOCKS]); - if (to - from == 2) { - Handle<String> lhs = building_blocks[from % NUMBER_OF_BUILDING_BLOCKS]; - if (gen() % 2 == 0) - lhs = SliceOf(lhs); - Handle<String> rhs = building_blocks[(from+1) % NUMBER_OF_BUILDING_BLOCKS]; - if (gen() % 2 == 0) - rhs = SliceOf(rhs); - return Factory::NewConsString(lhs, rhs); - } - Handle<String> part1 = - ConstructBalancedHelper(building_blocks, from, from + ((to - from) / 2)); - Handle<String> part2 = - ConstructBalancedHelper(building_blocks, from + ((to - from) / 2), to); - Handle<String> branch = Factory::NewConsString(part1, part2); - if (gen() % 2 == 0) - return branch; - return(SliceOf(branch)); -} - - -TEST(Slice) { - printf("TestSlice\n"); - InitializeVM(); - v8::HandleScope scope; - Handle<String> building_blocks[NUMBER_OF_BUILDING_BLOCKS]; - ZoneScope zone(DELETE_ON_EXIT); - InitializeBuildingBlocks(building_blocks); - - seed = 42; - Handle<String> slice_tree = - ConstructSliceTree(building_blocks, 0, DEEP_DEPTH); - seed = 42; - Handle<String> flat_slice_tree = - ConstructSliceTree(building_blocks, 0, DEEP_DEPTH); - FlattenString(flat_slice_tree); - Traverse(flat_slice_tree, slice_tree); -} - static const int DEEP_ASCII_DEPTH = 100000; @@ -357,8 +292,10 @@ TEST(Utf8Conversion) { v8::HandleScope handle_scope; // A simple ascii string const char* ascii_string = "abcdef12345"; - int len = v8::String::New(ascii_string, strlen(ascii_string))->Utf8Length(); - CHECK_EQ(strlen(ascii_string), len); + int len = + v8::String::New(ascii_string, + StrLength(ascii_string))->Utf8Length(); + CHECK_EQ(StrLength(ascii_string), len); // A mixed ascii and non-ascii string // U+02E4 -> CB A4 // U+0064 -> 64 @@ -414,105 +351,3 @@ class TwoByteResource: public v8::String::ExternalStringResource { size_t length_; bool* destructed_; }; - - -// Regression test case for http://crbug.com/9746. The problem was -// that when we marked objects reachable only through weak pointers, -// we ended up keeping a sliced symbol alive, even though we already -// invoked the weak callback on the underlying external string thus -// deleting its resource. -TEST(Regress9746) { - InitializeVM(); - - // Setup lengths that guarantee we'll get slices instead of simple - // flat strings. - static const int kFullStringLength = String::kMinNonFlatLength * 2; - static const int kSliceStringLength = String::kMinNonFlatLength + 1; - - uint16_t* source = new uint16_t[kFullStringLength]; - for (int i = 0; i < kFullStringLength; i++) source[i] = '1'; - char* key = new char[kSliceStringLength]; - for (int i = 0; i < kSliceStringLength; i++) key[i] = '1'; - Vector<const char> key_vector(key, kSliceStringLength); - - // Allocate an external string resource that keeps track of when it - // is destructed. - bool resource_destructed = false; - TwoByteResource* resource = - new TwoByteResource(source, kFullStringLength, &resource_destructed); - - { - v8::HandleScope scope; - - // Allocate an external string resource and external string. We - // have to go through the API to get the weak handle and the - // automatic destruction going. - Handle<String> string = - v8::Utils::OpenHandle(*v8::String::NewExternal(resource)); - - // Create a slice of the external string. - Handle<String> slice = - Factory::NewStringSlice(string, 0, kSliceStringLength); - CHECK_EQ(kSliceStringLength, slice->length()); - CHECK(StringShape(*slice).IsSliced()); - - // Make sure the slice ends up in old space so we can morph it - // into a symbol. - while (Heap::InNewSpace(*slice)) { - Heap::PerformScavenge(); - } - - // Force the slice into the symbol table. - slice = Factory::SymbolFromString(slice); - CHECK(slice->IsSymbol()); - CHECK(StringShape(*slice).IsSliced()); - - Handle<String> buffer(Handle<SlicedString>::cast(slice)->buffer()); - CHECK(StringShape(*buffer).IsExternal()); - CHECK(buffer->IsTwoByteRepresentation()); - - // Finally, base a script on the slice of the external string and - // get its wrapper. This allocates yet another weak handle that - // indirectly refers to the external string. - Handle<Script> script = Factory::NewScript(slice); - Handle<JSObject> wrapper = GetScriptWrapper(script); - } - - // When we collect all garbage, we cannot get rid of the sliced - // symbol entry in the symbol table because it is used by the script - // kept alive by the weak wrapper. Make sure we don't destruct the - // external string. - Heap::CollectAllGarbage(false); - CHECK(!resource_destructed); - - { - v8::HandleScope scope; - - // Make sure the sliced symbol is still in the table. - Handle<String> symbol = Factory::LookupSymbol(key_vector); - CHECK(StringShape(*symbol).IsSliced()); - - // Make sure the buffer is still a two-byte external string. - Handle<String> buffer(Handle<SlicedString>::cast(symbol)->buffer()); - CHECK(StringShape(*buffer).IsExternal()); - CHECK(buffer->IsTwoByteRepresentation()); - } - - // Forcing another garbage collection should let us get rid of the - // slice from the symbol table. The external string remains in the - // heap until the next GC. - Heap::CollectAllGarbage(false); - CHECK(!resource_destructed); - v8::HandleScope scope; - Handle<String> key_string = Factory::NewStringFromAscii(key_vector); - String* out; - CHECK(!Heap::LookupSymbolIfExists(*key_string, &out)); - - // Forcing yet another garbage collection must allow us to finally - // get rid of the external string. - Heap::CollectAllGarbage(false); - CHECK(resource_destructed); - - delete[] source; - delete[] key; -} diff --git a/deps/v8/test/cctest/test-utils.cc b/deps/v8/test/cctest/test-utils.cc index ffcaf8abc0..1d65e686e4 100644 --- a/deps/v8/test/cctest/test-utils.cc +++ b/deps/v8/test/cctest/test-utils.cc @@ -166,7 +166,7 @@ TEST(SNPrintF) { // Make sure that strings that are truncated because of too small // buffers are zero-terminated anyway. const char* s = "the quick lazy .... oh forget it!"; - int length = strlen(s); + int length = StrLength(s); for (int i = 1; i < length * 2; i++) { static const char kMarker = static_cast<char>(42); Vector<char> buffer = Vector<char>::New(i + 1); @@ -177,9 +177,9 @@ TEST(SNPrintF) { CHECK_EQ(0, strncmp(buffer.start(), s, i - 1)); CHECK_EQ(kMarker, buffer[i]); if (i <= length) { - CHECK_EQ(i - 1, strlen(buffer.start())); + CHECK_EQ(i - 1, StrLength(buffer.start())); } else { - CHECK_EQ(length, strlen(buffer.start())); + CHECK_EQ(length, StrLength(buffer.start())); } buffer.Dispose(); } diff --git a/deps/v8/src/location.h b/deps/v8/test/mjsunit/compiler/function-call.js index 26b1a09e9c..b2e0702a75 100644 --- a/deps/v8/src/location.h +++ b/deps/v8/test/mjsunit/compiler/function-call.js @@ -25,38 +25,28 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#ifndef V8_LOCATION_H_ -#define V8_LOCATION_H_ +// Test of function calls. -#include "utils.h" +function f(x) { return x; } -namespace v8 { -namespace internal { +var a; -class Location BASE_EMBEDDED { - public: - enum Type { - kUninitialized, - kEffect, - kValue - }; +// Call on global object. +a = f(8); +assertEquals(8, a); - static Location Uninitialized() { return Location(kUninitialized); } - static Location Effect() { return Location(kEffect); } - static Location Value() { return Location(kValue); } +// Call on a named property. +var b; +b = {x:f}; +a = b.x(9); +assertEquals(9, a); - bool is_effect() { return type_ == kEffect; } - bool is_value() { return type_ == kValue; } +// Call on a keyed property. +c = "x"; +a = b[c](10); +assertEquals(10, a); - Type type() { return type_; } - - private: - explicit Location(Type type) : type_(type) {} - - Type type_; -}; - - -} } // namespace v8::internal - -#endif // V8_LOCATION_H_ +// Call on a function expression +function g() { return f; } +a = g()(8); +assertEquals(8, a); diff --git a/deps/v8/test/mjsunit/compiler/globals.js b/deps/v8/test/mjsunit/compiler/globals.js index 066f9277b3..0abd5dd33c 100644 --- a/deps/v8/test/mjsunit/compiler/globals.js +++ b/deps/v8/test/mjsunit/compiler/globals.js @@ -53,3 +53,13 @@ assertEquals("2", eval('g')); // Test a second load. g = 3; assertEquals(3, eval('g')); + +// Test postfix count operation +var t; +t = g++; +assertEquals(3, t); +assertEquals(4, g); + +code = "g--; 1"; +assertEquals(1, eval(code)); +assertEquals(3, g); diff --git a/deps/v8/test/mjsunit/compiler/loops.js b/deps/v8/test/mjsunit/compiler/loops.js new file mode 100644 index 0000000000..4de45e7f88 --- /dev/null +++ b/deps/v8/test/mjsunit/compiler/loops.js @@ -0,0 +1,35 @@ +// Copyright 2009 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Test compilation of loops. + +var n = 1; +for (var i = 1; (6 - i); i++) { + // Factorial! + n = n * i; +} +assertEquals(120, n); diff --git a/deps/v8/test/mjsunit/cyrillic.js b/deps/v8/test/mjsunit/cyrillic.js new file mode 100644 index 0000000000..c5712e6f9d --- /dev/null +++ b/deps/v8/test/mjsunit/cyrillic.js @@ -0,0 +1,199 @@ +// Copyright 2009 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Test Unicode character ranges in regexps. + + +// Cyrillic. +var cyrillic = { + FIRST: "\u0410", // A + first: "\u0430", // a + LAST: "\u042f", // YA + last: "\u044f", // ya + MIDDLE: "\u0427", // CHE + middle: "\u0447", // che + // Actually no characters are between the cases in Cyrillic. + BetweenCases: false}; + +var SIGMA = "\u03a3"; +var sigma = "\u03c3"; +var alternative_sigma = "\u03c2"; + +// Greek. +var greek = { + FIRST: "\u0391", // ALPHA + first: "\u03b1", // alpha + LAST: "\u03a9", // OMEGA + last: "\u03c9", // omega + MIDDLE: SIGMA, // SIGMA + middle: sigma, // sigma + // Epsilon acute is between ALPHA-OMEGA and alpha-omega, ie it + // is between OMEGA and alpha. + BetweenCases: "\u03ad"}; + + +function Range(from, to, flags) { + return new RegExp("[" + from + "-" + to + "]", flags); +} + +// Test Cyrillic and Greek separately. +for (var lang = 0; lang < 2; lang++) { + var chars = (lang == 0) ? cyrillic : greek; + + for (var i = 0; i < 2; i++) { + var lc = (i == 0); // Lower case. + var first = lc ? chars.first : chars.FIRST; + var middle = lc ? chars.middle : chars.MIDDLE; + var last = lc ? chars.last : chars.LAST; + var first_other_case = lc ? chars.FIRST : chars.first; + var middle_other_case = lc ? chars.MIDDLE : chars.middle; + var last_other_case = lc ? chars.LAST : chars.last; + + assertTrue(Range(first, last).test(first), 1); + assertTrue(Range(first, last).test(middle), 2); + assertTrue(Range(first, last).test(last), 3); + + assertFalse(Range(first, last).test(first_other_case), 4); + assertFalse(Range(first, last).test(middle_other_case), 5); + assertFalse(Range(first, last).test(last_other_case), 6); + + assertTrue(Range(first, last, "i").test(first), 7); + assertTrue(Range(first, last, "i").test(middle), 8); + assertTrue(Range(first, last, "i").test(last), 9); + + assertTrue(Range(first, last, "i").test(first_other_case), 10); + assertTrue(Range(first, last, "i").test(middle_other_case), 11); + assertTrue(Range(first, last, "i").test(last_other_case), 12); + + if (chars.BetweenCases) { + assertFalse(Range(first, last).test(chars.BetweenCases), 13); + assertFalse(Range(first, last, "i").test(chars.BetweenCases), 14); + } + } + if (chars.BetweenCases) { + assertTrue(Range(chars.FIRST, chars.last).test(chars.BetweenCases), 15); + assertTrue(Range(chars.FIRST, chars.last, "i").test(chars.BetweenCases), 16); + } +} + +// Test range that covers both greek and cyrillic characters. +for (key in greek) { + assertTrue(Range(greek.FIRST, cyrillic.last).test(greek[key]), 17 + key); + if (cyrillic[key]) { + assertTrue(Range(greek.FIRST, cyrillic.last).test(cyrillic[key]), 18 + key); + } +} + +for (var i = 0; i < 2; i++) { + var ignore_case = (i == 0); + var flag = ignore_case ? "i" : ""; + assertTrue(Range(greek.first, cyrillic.LAST, flag).test(greek.first), 19); + assertTrue(Range(greek.first, cyrillic.LAST, flag).test(greek.middle), 20); + assertTrue(Range(greek.first, cyrillic.LAST, flag).test(greek.last), 21); + + assertTrue(Range(greek.first, cyrillic.LAST, flag).test(cyrillic.FIRST), 22); + assertTrue(Range(greek.first, cyrillic.LAST, flag).test(cyrillic.MIDDLE), 23); + assertTrue(Range(greek.first, cyrillic.LAST, flag).test(cyrillic.LAST), 24); + + // A range that covers the lower case greek letters and the upper case cyrillic + // letters. + assertEquals(ignore_case, Range(greek.first, cyrillic.LAST, flag).test(greek.FIRST), 25); + assertEquals(ignore_case, Range(greek.first, cyrillic.LAST, flag).test(greek.MIDDLE), 26); + assertEquals(ignore_case, Range(greek.first, cyrillic.LAST, flag).test(greek.LAST), 27); + + assertEquals(ignore_case, Range(greek.first, cyrillic.LAST, flag).test(cyrillic.first), 28); + assertEquals(ignore_case, Range(greek.first, cyrillic.LAST, flag).test(cyrillic.middle), 29); + assertEquals(ignore_case, Range(greek.first, cyrillic.LAST, flag).test(cyrillic.last), 30); +} + + +// Sigma is special because there are two lower case versions of the same upper +// case character. JS requires that case independece means that you should +// convert everything to upper case, so the two sigma variants are equal to each +// other in a case independt comparison. +for (var i = 0; i < 2; i++) { + var simple = (i != 0); + var name = simple ? "" : "[]"; + var regex = simple ? SIGMA : "[" + SIGMA + "]"; + + assertFalse(new RegExp(regex).test(sigma), 31 + name); + assertFalse(new RegExp(regex).test(alternative_sigma), 32 + name); + assertTrue(new RegExp(regex).test(SIGMA), 33 + name); + + assertTrue(new RegExp(regex, "i").test(sigma), 34 + name); + // JSC and Tracemonkey fail this one. + assertTrue(new RegExp(regex, "i").test(alternative_sigma), 35 + name); + assertTrue(new RegExp(regex, "i").test(SIGMA), 36 + name); + + regex = simple ? sigma : "[" + sigma + "]"; + + assertTrue(new RegExp(regex).test(sigma), 41 + name); + assertFalse(new RegExp(regex).test(alternative_sigma), 42 + name); + assertFalse(new RegExp(regex).test(SIGMA), 43 + name); + + assertTrue(new RegExp(regex, "i").test(sigma), 44 + name); + // JSC and Tracemonkey fail this one. + assertTrue(new RegExp(regex, "i").test(alternative_sigma), 45 + name); + assertTrue(new RegExp(regex, "i").test(SIGMA), 46 + name); + + regex = simple ? alternative_sigma : "[" + alternative_sigma + "]"; + + assertFalse(new RegExp(regex).test(sigma), 51 + name); + assertTrue(new RegExp(regex).test(alternative_sigma), 52 + name); + assertFalse(new RegExp(regex).test(SIGMA), 53 + name); + + // JSC and Tracemonkey fail this one. + assertTrue(new RegExp(regex, "i").test(sigma), 54 + name); + assertTrue(new RegExp(regex, "i").test(alternative_sigma), 55 + name); + // JSC and Tracemonkey fail this one. + assertTrue(new RegExp(regex, "i").test(SIGMA), 56 + name); +} + + +for (var add_non_ascii_character_to_subject = 0; + add_non_ascii_character_to_subject < 2; + add_non_ascii_character_to_subject++) { + var suffix = add_non_ascii_character_to_subject ? "\ufffe" : ""; + // A range that covers both ASCII and non-ASCII. + for (var i = 0; i < 2; i++) { + var full = (i != 0); + var mixed = full ? "[a-\uffff]" : "[a-" + cyrillic.LAST + "]"; + var f = full ? "f" : "c"; + for (var j = 0; j < 2; j++) { + var ignore_case = (j == 0); + var flag = ignore_case ? "i" : ""; + var re = new RegExp(mixed, flag); + assertEquals(ignore_case || (full && add_non_ascii_character_to_subject), + re.test("A" + suffix), + 58 + flag + f); + assertTrue(re.test("a" + suffix), 59 + flag + f); + assertTrue(re.test("~" + suffix), 60 + flag + f); + assertTrue(re.test(cyrillic.MIDDLE), 61 + flag + f); + assertEquals(ignore_case || full, re.test(cyrillic.middle), 62 + flag + f); + } + } +} diff --git a/deps/v8/test/mjsunit/debug-stepnext-do-while.js b/deps/v8/test/mjsunit/debug-stepnext-do-while.js new file mode 100644 index 0000000000..17058a7b63 --- /dev/null +++ b/deps/v8/test/mjsunit/debug-stepnext-do-while.js @@ -0,0 +1,79 @@ +// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+var exception = null;
+var break_break_point_hit_count = 0;
+
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Break) {
+ if (break_break_point_hit_count == 0) {
+ assertEquals(' debugger;',
+ event_data.sourceLineText());
+ assertEquals('runDoWhile', event_data.func().name());
+ } else if (break_break_point_hit_count == 1) {
+ assertEquals(' } while(condition());',
+ event_data.sourceLineText());
+ assertEquals('runDoWhile', event_data.func().name());
+ }
+
+ break_break_point_hit_count++;
+ // Continue stepping until returned to bottom frame.
+ if (exec_state.frameCount() > 1) {
+ exec_state.prepareStep(Debug.StepAction.StepNext);
+ }
+
+ }
+ } catch(e) {
+ exception = e;
+ }
+};
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+function condition() {
+ return false;
+}
+
+function runDoWhile() {
+ do {
+ debugger;
+ } while(condition());
+};
+
+break_break_point_hit_count = 0;
+runDoWhile();
+assertNull(exception);
+assertEquals(4, break_break_point_hit_count);
+
+// Get rid of the debug event listener.
+Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/deep-recursion.js b/deps/v8/test/mjsunit/deep-recursion.js index a8093eb6be..588b5d6105 100644 --- a/deps/v8/test/mjsunit/deep-recursion.js +++ b/deps/v8/test/mjsunit/deep-recursion.js @@ -30,9 +30,7 @@ * cause stack overflows. */ -var depth = 110000; - -function newdeep(start) { +function newdeep(start, depth) { var d = start; for (var i = 0; i < depth; i++) { d = d + "f"; @@ -40,23 +38,27 @@ function newdeep(start) { return d; } -var deep = newdeep("foo"); +var default_depth = 110000; + +var deep = newdeep("foo", default_depth); assertEquals('f', deep[0]); -var cmp1 = newdeep("a"); -var cmp2 = newdeep("b"); +var cmp1 = newdeep("a", default_depth); +var cmp2 = newdeep("b", default_depth); assertEquals(-1, cmp1.localeCompare(cmp2), "ab"); -var cmp2empty = newdeep("c"); +var cmp2empty = newdeep("c", default_depth); assertTrue(cmp2empty.localeCompare("") > 0, "c"); -var cmp3empty = newdeep("d"); +var cmp3empty = newdeep("d", default_depth); assertTrue("".localeCompare(cmp3empty) < 0), "d"; -var slicer = newdeep("slice"); +var slicer_depth = 1100; + +var slicer = newdeep("slice", slicer_depth); -for (i = 0; i < depth + 4; i += 2) { +for (i = 0; i < slicer_depth + 4; i += 2) { slicer = slicer.slice(1, -1); } diff --git a/deps/v8/test/mjsunit/eval-typeof-non-existing.js b/deps/v8/test/mjsunit/eval-typeof-non-existing.js index 3513767db7..8cc6d0bcd3 100644 --- a/deps/v8/test/mjsunit/eval-typeof-non-existing.js +++ b/deps/v8/test/mjsunit/eval-typeof-non-existing.js @@ -25,8 +25,11 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// Typeof expression must resolve to undefined when it used on a +// Typeof expression must resolve to 'undefined' when it used on a // non-existing property. It is *not* allowed to throw a // ReferenceError. assertEquals('undefined', typeof xxx); assertEquals('undefined', eval('typeof xxx')); + +assertThrows('typeof(true ? xxx : yyy)', ReferenceError); +assertThrows('with ({}) { typeof(true ? xxx : yyy) }', ReferenceError); diff --git a/deps/v8/test/mjsunit/fuzz-natives.js b/deps/v8/test/mjsunit/fuzz-natives.js index cdf58a559a..f495c72787 100644 --- a/deps/v8/test/mjsunit/fuzz-natives.js +++ b/deps/v8/test/mjsunit/fuzz-natives.js @@ -129,7 +129,9 @@ var knownProblems = { "Log": true, "DeclareGlobals": true, - "CollectStackTrace": true + "CollectStackTrace": true, + "PromoteScheduledException": true, + "DeleteHandleScopeExtensions": true }; var currentlyUncallable = { diff --git a/deps/v8/test/mjsunit/mjsunit.js b/deps/v8/test/mjsunit/mjsunit.js index 1fb3f02afb..8ced0119fe 100644 --- a/deps/v8/test/mjsunit/mjsunit.js +++ b/deps/v8/test/mjsunit/mjsunit.js @@ -75,6 +75,9 @@ function deepEquals(a, b) { if (typeof a == "number" && typeof b == "number" && isNaN(a) && isNaN(b)) { return true; } + if (a.constructor === RegExp || b.constructor === RegExp) { + return (a.constructor === b.constructor) && (a.toString === b.toString); + } if ((typeof a) !== 'object' || (typeof b) !== 'object' || (a === null) || (b === null)) return false; diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status index 15f62b0697..8eb59b7e2f 100644 --- a/deps/v8/test/mjsunit/mjsunit.status +++ b/deps/v8/test/mjsunit/mjsunit.status @@ -34,11 +34,18 @@ bugs: FAIL # too long to run in debug mode on ARM. fuzz-natives: PASS, SKIP if ($mode == release || $arch == arm) +# Issue 494: new snapshot code breaks mjsunit/apply on mac debug snapshot. +apply: PASS, FAIL if ($system == macos && $mode == debug) + big-object-literal: PASS, SKIP if ($arch == arm) # Issue 488: this test sometimes times out. array-constructor: PASS || TIMEOUT +# Very slow on ARM, contains no architecture dependent code. +unicode-case-overoptimization: PASS, TIMEOUT if ($arch == arm) + + [ $arch == arm ] # Slow tests which times out in debug mode. @@ -49,9 +56,9 @@ array-constructor: PASS, SKIP if $mode == debug # Flaky test that can hit compilation-time stack overflow in debug mode. unicode-test: PASS, (PASS || FAIL) if $mode == debug -# Bug number 130 http://code.google.com/p/v8/issues/detail?id=130 -# Fails on real ARM hardware but not on the simulator. -string-compare-alignment: PASS || FAIL - # Times out often in release mode on ARM. array-splice: PASS || TIMEOUT + +# Skip long running test in debug mode on ARM. +string-indexof-2: PASS, SKIP if $mode == debug + diff --git a/deps/v8/test/mjsunit/parse-int-float.js b/deps/v8/test/mjsunit/parse-int-float.js index ad2275e661..b9620ff6c7 100644 --- a/deps/v8/test/mjsunit/parse-int-float.js +++ b/deps/v8/test/mjsunit/parse-int-float.js @@ -36,9 +36,12 @@ assertEquals(-63, parseInt(' -077')); assertEquals(3, parseInt('11', 2)); assertEquals(4, parseInt('11', 3)); +assertEquals(4, parseInt('11', 3.8)); assertEquals(0x12, parseInt('0x12')); assertEquals(0x12, parseInt('0x12', 16)); +assertEquals(0x12, parseInt('0x12', 16.1)); +assertEquals(0x12, parseInt('0x12', NaN)); assertEquals(12, parseInt('12aaa')); diff --git a/deps/v8/test/mjsunit/regress/regress-124.js b/deps/v8/test/mjsunit/regress/regress-124.js index 81526b0edd..0b3aae5358 100644 --- a/deps/v8/test/mjsunit/regress/regress-124.js +++ b/deps/v8/test/mjsunit/regress/regress-124.js @@ -48,9 +48,9 @@ function F(f) { assertEquals("[object global]", eval("f()")); // Receiver should be the arguments object here. - assertEquals("[object Object]", eval("arguments[0]()")); + assertEquals("[object Arguments]", eval("arguments[0]()")); with (arguments) { - assertEquals("[object Object]", toString()); + assertEquals("[object Arguments]", toString()); } } diff --git a/deps/v8/test/mjsunit/regress/regress-2249423.js b/deps/v8/test/mjsunit/regress/regress-2249423.js new file mode 100644 index 0000000000..a590f33f68 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-2249423.js @@ -0,0 +1,40 @@ +// Copyright 2009 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// See http://code.google.com/p/chromium/issues/detail?id=27227 +// Regression test for stack corruption issue. + +function top() { + function g(a, b) {} + function t() { + for (var i=0; i<1; ++i) { + g(32768, g()); + } + } + t(); +} +top(); diff --git a/deps/v8/test/mjsunit/regress/regress-486.js b/deps/v8/test/mjsunit/regress/regress-486.js new file mode 100644 index 0000000000..c1e29a6362 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-486.js @@ -0,0 +1,30 @@ +// Copyright 2009 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +var st = "\u0422\u0435\u0441\u0442"; // Test in Cyrillic characters. +var cyrillicMatch = /^[\u0430-\u044fa-z]+$/i.test(st); // a-ja a-z. +assertTrue(cyrillicMatch); diff --git a/deps/v8/test/mjsunit/regress/regress-490.js b/deps/v8/test/mjsunit/regress/regress-490.js new file mode 100644 index 0000000000..8dd8959171 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-490.js @@ -0,0 +1,48 @@ +// Copyright 2009 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// See: http://code.google.com/p/v8/issues/detail?id=490 + +var kXXX = 11 +// Build a string longer than 2^11. See StringBuilderConcatHelper and +// Runtime_StringBuilderConcat in runtime.cc and +// ReplaceResultBuilder.prototype.addSpecialSlice in string.js. +var a = ''; +while (a.length < (2 << 11)) { a+= 'x'; } + +// Test specific for bug introduced in r3153. +a.replace(/^(.*)/, '$1$1$1'); + +// More generalized test. +for (var i = 0; i < 10; i++) { + var b = ''; + for (var j = 0; j < 10; j++) { + b += '$1'; + a.replace(/^(.*)/, b); + } + a += a; +} diff --git a/deps/v8/test/mjsunit/regress/regress-491.js b/deps/v8/test/mjsunit/regress/regress-491.js new file mode 100644 index 0000000000..2cf5e20ed6 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-491.js @@ -0,0 +1,47 @@ +// Copyright 2009 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// See: http://code.google.com/p/v8/issues/detail?id=491 +// This should not hit any asserts in debug mode on ARM. + +function function_with_n_strings(n) { + var source = '(function f(){'; + for (var i = 0; i < n; i++) { + if (i != 0) source += ';'; + source += '"x"'; + } + source += '})()'; + eval(source); +} + +var i; +for (i = 500; i < 600; i++) { + function_with_n_strings(i); +} +for (i = 1100; i < 1200; i++) { + function_with_n_strings(i); +} diff --git a/deps/v8/test/mjsunit/regress/regress-492.js b/deps/v8/test/mjsunit/regress/regress-492.js new file mode 100644 index 0000000000..a8b783b301 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-492.js @@ -0,0 +1,52 @@ +// Copyright 2009 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// See: http://code.google.com/p/v8/issues/detail?id=492 +// This should not hit any asserts in debug mode on ARM. + +function function_with_n_args(n) { + var source = '(function f('; + for (var arg = 0; arg < n; arg++) { + if (arg != 0) source += ','; + source += 'arg' + arg; + } + source += ') { })()'; + eval(source); +} + +var args; +for (args = 250; args < 270; args++) { + function_with_n_args(args); +} + +for (args = 500; args < 520; args++) { + function_with_n_args(args); +} + +for (args = 1019; args < 1041; args++) { + function_with_n_args(args); +} diff --git a/deps/v8/test/mjsunit/regress/regress-496.js b/deps/v8/test/mjsunit/regress/regress-496.js new file mode 100644 index 0000000000..33c1a677eb --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-496.js @@ -0,0 +1,39 @@ +// Copyright 2009 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Regression test for http://code.google.com/p/v8/issues/detail?id=496. +// +// Tests that we do not treat the unaliased eval call in g as an +// aliased call to eval. + +function h() { + function f() { return eval; } + function g() { var x = 44; return eval("x"); } + assertEquals(44, g()); +} + +h(); diff --git a/deps/v8/test/mjsunit/regress/regress-502.js b/deps/v8/test/mjsunit/regress/regress-502.js new file mode 100644 index 0000000000..d3c9381da8 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-502.js @@ -0,0 +1,38 @@ +// Copyright 2009 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Regression test for http://code.google.com/p/v8/issues/detail?id=502. +// +// Test that we do not generate an inlined version of the constructor +// function C. + +var X = 'x'; +function C() { this[X] = 42; } +var a = new C(); +var b = new C(); +assertEquals(42, a.x); +assertEquals(42, b.x); diff --git a/deps/v8/test/mjsunit/regress/regress-503.js b/deps/v8/test/mjsunit/regress/regress-503.js new file mode 100644 index 0000000000..5b156b27cc --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-503.js @@ -0,0 +1,63 @@ +// Copyright 2009 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +assertTrue(undefined == undefined, 1); +assertFalse(undefined <= undefined, 2); +assertFalse(undefined >= undefined, 3); +assertFalse(undefined < undefined, 4); +assertFalse(undefined > undefined, 5); + +assertTrue(null == null, 6); +assertTrue(null <= null, 7); +assertTrue(null >= null, 8); +assertFalse(null < null, 9); +assertFalse(null > null, 10); + +assertTrue(void 0 == void 0, 11); +assertFalse(void 0 <= void 0, 12); +assertFalse(void 0 >= void 0, 13); +assertFalse(void 0 < void 0, 14); +assertFalse(void 0 > void 0, 15); + +var x = void 0; + +assertTrue(x == x, 16); +assertFalse(x <= x, 17); +assertFalse(x >= x, 18); +assertFalse(x < x, 19); +assertFalse(x > x, 20); + +var not_undefined = [null, 0, 1, 1/0, -1/0, "", true, false]; +for (var i = 0; i < not_undefined.length; i++) { + x = not_undefined[i]; + + assertTrue(x == x, "" + 21 + x); + assertTrue(x <= x, "" + 22 + x); + assertTrue(x >= x, "" + 23 + x); + assertFalse(x < x, "" + 24 + x); + assertFalse(x > x, "" + 25 + x); +} diff --git a/deps/v8/test/mjsunit/string-charcodeat.js b/deps/v8/test/mjsunit/string-charcodeat.js index f66dd3ef68..3927557752 100644 --- a/deps/v8/test/mjsunit/string-charcodeat.js +++ b/deps/v8/test/mjsunit/string-charcodeat.js @@ -30,7 +30,7 @@ */ function Cons() { - return "Te" + "st"; + return "Te" + "st testing 123"; } @@ -38,22 +38,22 @@ function Deep() { var a = "T"; a += "e"; a += "s"; - a += "t"; + a += "ting testing 123"; return a; } function Slice() { - return "testing Testing".substring(8, 12); + return "testing Testing testing 123456789012345".substring(8, 22); } function Flat() { - return "Test"; + return "Testing testing 123"; } function Cons16() { - return "Te" + "\u1234t"; + return "Te" + "\u1234t testing 123"; } @@ -61,18 +61,18 @@ function Deep16() { var a = "T"; a += "e"; a += "\u1234"; - a += "t"; + a += "ting testing 123"; return a; } function Slice16Beginning() { - return "Te\u1234t test".substring(0, 4); + return "Te\u1234t testing testing 123".substring(0, 14); } function Slice16Middle() { - return "test Te\u1234t test".substring(5, 9); + return "test Te\u1234t testing testing 123".substring(5, 19); } @@ -82,7 +82,7 @@ function Slice16End() { function Flat16() { - return "Te\u1234t"; + return "Te\u1234ting testing 123"; } @@ -108,32 +108,35 @@ function NotAString16() { function TestStringType(generator, sixteen) { var g = generator; - assertTrue(isNaN(g().charCodeAt(-1e19))); - assertTrue(isNaN(g().charCodeAt(-0x80000001))); - assertTrue(isNaN(g().charCodeAt(-0x80000000))); - assertTrue(isNaN(g().charCodeAt(-0x40000000))); - assertTrue(isNaN(g().charCodeAt(-1))); - assertTrue(isNaN(g().charCodeAt(4))); - assertTrue(isNaN(g().charCodeAt(5))); - assertTrue(isNaN(g().charCodeAt(0x3fffffff))); - assertTrue(isNaN(g().charCodeAt(0x7fffffff))); - assertTrue(isNaN(g().charCodeAt(0x80000000))); - assertTrue(isNaN(g().charCodeAt(1e9))); - assertEquals(84, g().charCodeAt(0)); - assertEquals(84, g().charCodeAt("test")); - assertEquals(84, g().charCodeAt("")); - assertEquals(84, g().charCodeAt(null)); - assertEquals(84, g().charCodeAt(undefined)); - assertEquals(84, g().charCodeAt()); - assertEquals(84, g().charCodeAt(void 0)); - assertEquals(84, g().charCodeAt(false)); - assertEquals(101, g().charCodeAt(true)); - assertEquals(101, g().charCodeAt(1)); - assertEquals(sixteen ? 0x1234 : 115, g().charCodeAt(2)); - assertEquals(116, g().charCodeAt(3)); - assertEquals(101, g().charCodeAt(1.1)); - assertEquals(sixteen ? 0x1234 : 115, g().charCodeAt(2.1718)); - assertEquals(116, g().charCodeAt(3.14159)); + var len = g().toString().length; + var t = sixteen ? "t" : "f" + t += generator.name; + assertTrue(isNaN(g().charCodeAt(-1e19)), 1 + t); + assertTrue(isNaN(g().charCodeAt(-0x80000001)), 2 + t); + assertTrue(isNaN(g().charCodeAt(-0x80000000)), 3 + t); + assertTrue(isNaN(g().charCodeAt(-0x40000000)), 4 + t); + assertTrue(isNaN(g().charCodeAt(-1)), 5 + t); + assertTrue(isNaN(g().charCodeAt(len)), 6 + t); + assertTrue(isNaN(g().charCodeAt(len + 1)), 7 + t); + assertTrue(isNaN(g().charCodeAt(0x3fffffff)), 8 + t); + assertTrue(isNaN(g().charCodeAt(0x7fffffff)), 9 + t); + assertTrue(isNaN(g().charCodeAt(0x80000000)), 10 + t); + assertTrue(isNaN(g().charCodeAt(1e9)), 11 + t); + assertEquals(84, g().charCodeAt(0), 12 + t); + assertEquals(84, g().charCodeAt("test"), 13 + t); + assertEquals(84, g().charCodeAt(""), 14 + t); + assertEquals(84, g().charCodeAt(null), 15 + t); + assertEquals(84, g().charCodeAt(undefined), 16 + t); + assertEquals(84, g().charCodeAt(), 17 + t); + assertEquals(84, g().charCodeAt(void 0), 18 + t); + assertEquals(84, g().charCodeAt(false), 19 + t); + assertEquals(101, g().charCodeAt(true), 20 + t); + assertEquals(101, g().charCodeAt(1), 21 + t); + assertEquals(sixteen ? 0x1234 : 115, g().charCodeAt(2), 22 + t); + assertEquals(116, g().charCodeAt(3), 23 + t); + assertEquals(101, g().charCodeAt(1.1), 24 + t); + assertEquals(sixteen ? 0x1234 : 115, g().charCodeAt(2.1718), 25 + t); + assertEquals(116, g().charCodeAt(3.14159), 26 + t); } @@ -157,10 +160,10 @@ function StupidThing() { this.charCodeAt = String.prototype.charCodeAt; } -assertEquals(52, new StupidThing().charCodeAt(0)); -assertEquals(50, new StupidThing().charCodeAt(1)); -assertTrue(isNaN(new StupidThing().charCodeAt(2))); -assertTrue(isNaN(new StupidThing().charCodeAt(-1))); +assertEquals(52, new StupidThing().charCodeAt(0), 27); +assertEquals(50, new StupidThing().charCodeAt(1), 28); +assertTrue(isNaN(new StupidThing().charCodeAt(2)), 29); +assertTrue(isNaN(new StupidThing().charCodeAt(-1)), 30); // Medium (>255) and long (>65535) strings. @@ -178,12 +181,12 @@ long += long + long + long; // 4096. long += long + long + long; // 16384. long += long + long + long; // 65536. -assertTrue(isNaN(medium.charCodeAt(-1))); -assertEquals(49, medium.charCodeAt(0)); -assertEquals(56, medium.charCodeAt(255)); -assertTrue(isNaN(medium.charCodeAt(256))); +assertTrue(isNaN(medium.charCodeAt(-1)), 31); +assertEquals(49, medium.charCodeAt(0), 32); +assertEquals(56, medium.charCodeAt(255), 33); +assertTrue(isNaN(medium.charCodeAt(256)), 34); -assertTrue(isNaN(long.charCodeAt(-1))); -assertEquals(49, long.charCodeAt(0)); -assertEquals(56, long.charCodeAt(65535)); -assertTrue(isNaN(long.charCodeAt(65536))); +assertTrue(isNaN(long.charCodeAt(-1)), 35); +assertEquals(49, long.charCodeAt(0), 36); +assertEquals(56, long.charCodeAt(65535), 37); +assertTrue(isNaN(long.charCodeAt(65536)), 38); diff --git a/deps/v8/test/mjsunit/string-indexof.js b/deps/v8/test/mjsunit/string-indexof-1.js index 2018da72f3..c7dcdb8352 100644 --- a/deps/v8/test/mjsunit/string-indexof.js +++ b/deps/v8/test/mjsunit/string-indexof-1.js @@ -97,46 +97,3 @@ assertEquals(1534, long.indexOf("AJABACA", 511), "Long AJABACA, Second J"); pattern = "JABACABADABACABA"; assertEquals(511, long.indexOf(pattern), "Long JABACABA..., First J"); assertEquals(1535, long.indexOf(pattern, 512), "Long JABACABA..., Second J"); - - -var lipsum = "lorem ipsum per se esse fugiendum. itaque aiunt hanc quasi " - + "naturalem atque insitam in animis nostris inesse notionem, ut " - + "alterum esse appetendum, alterum aspernandum sentiamus. Alii autem," - + " quibus ego assentior, cum a philosophis compluribus permulta " - + "dicantur, cur nec voluptas in bonis sit numeranda nec in malis " - + "dolor, non existimant oportere nimium nos causae confidere, sed et" - + " argumentandum et accurate disserendum et rationibus conquisitis de" - + " voluptate et dolore disputandum putant.\n" - + "Sed ut perspiciatis, unde omnis iste natus error sit voluptatem " - + "accusantium doloremque laudantium, totam rem aperiam eaque ipsa," - + "quae ab illo inventore veritatis et quasi architecto beatae vitae " - + "dicta sunt, explicabo. nemo enim ipsam voluptatem, quia voluptas" - + "sit, aspernatur aut odit aut fugit, sed quia consequuntur magni" - + " dolores eos, qui ratione voluptatem sequi nesciunt, neque porro" - + " quisquam est, qui dolorem ipsum, quia dolor sit, amet, " - + "consectetur, adipisci velit, sed quia non numquam eius modi" - + " tempora incidunt, ut labore et dolore magnam aliquam quaerat " - + "voluptatem. ut enim ad minima veniam, quis nostrum exercitationem " - + "ullam corporis suscipit laboriosam, nisi ut aliquid ex ea commodi " - + "consequatur? quis autem vel eum iure reprehenderit, qui in ea " - + "voluptate velit esse, quam nihil molestiae consequatur, vel illum, " - + "qui dolorem eum fugiat, quo voluptas nulla pariatur?\n"; - -assertEquals(893, lipsum.indexOf("lorem ipsum, quia dolor sit, amet"), - "Lipsum"); -// test a lot of substrings of differing length and start-position. -for(var i = 0; i < lipsum.length; i += 3) { - for(var len = 1; i + len < lipsum.length; len += 7) { - var substring = lipsum.substring(i, i + len); - var index = -1; - do { - index = lipsum.indexOf(substring, index + 1); - assertTrue(index != -1, - "Lipsum substring " + i + ".." + (i + len-1) + " not found"); - assertEquals(lipsum.substring(index, index + len), substring, - "Wrong lipsum substring found: " + i + ".." + (i + len - 1) + "/" + - index + ".." + (index + len - 1)); - } while (index >= 0 && index < i); - assertEquals(i, index, "Lipsum match at " + i + ".." + (i + len - 1)); - } -} diff --git a/deps/v8/test/mjsunit/string-indexof-2.js b/deps/v8/test/mjsunit/string-indexof-2.js new file mode 100644 index 0000000000..a7c3f600a1 --- /dev/null +++ b/deps/v8/test/mjsunit/string-indexof-2.js @@ -0,0 +1,68 @@ +// Copyright 2008 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +var lipsum = "lorem ipsum per se esse fugiendum. itaque aiunt hanc quasi " + + "naturalem atque insitam in animis nostris inesse notionem, ut " + + "alterum esse appetendum, alterum aspernandum sentiamus. Alii autem," + + " quibus ego assentior, cum a philosophis compluribus permulta " + + "dicantur, cur nec voluptas in bonis sit numeranda nec in malis " + + "dolor, non existimant oportere nimium nos causae confidere, sed et" + + " argumentandum et accurate disserendum et rationibus conquisitis de" + + " voluptate et dolore disputandum putant.\n" + + "Sed ut perspiciatis, unde omnis iste natus error sit voluptatem " + + "accusantium doloremque laudantium, totam rem aperiam eaque ipsa," + + "quae ab illo inventore veritatis et quasi architecto beatae vitae " + + "dicta sunt, explicabo. nemo enim ipsam voluptatem, quia voluptas" + + "sit, aspernatur aut odit aut fugit, sed quia consequuntur magni" + + " dolores eos, qui ratione voluptatem sequi nesciunt, neque porro" + + " quisquam est, qui dolorem ipsum, quia dolor sit, amet, " + + "consectetur, adipisci velit, sed quia non numquam eius modi" + + " tempora incidunt, ut labore et dolore magnam aliquam quaerat " + + "voluptatem. ut enim ad minima veniam, quis nostrum exercitationem " + + "ullam corporis suscipit laboriosam, nisi ut aliquid ex ea commodi " + + "consequatur? quis autem vel eum iure reprehenderit, qui in ea " + + "voluptate velit esse, quam nihil molestiae consequatur, vel illum, " + + "qui dolorem eum fugiat, quo voluptas nulla pariatur?\n"; + +assertEquals(893, lipsum.indexOf("lorem ipsum, quia dolor sit, amet"), + "Lipsum"); +// test a lot of substrings of differing length and start-position. +for(var i = 0; i < lipsum.length; i += 3) { + for(var len = 1; i + len < lipsum.length; len += 7) { + var substring = lipsum.substring(i, i + len); + var index = -1; + do { + index = lipsum.indexOf(substring, index + 1); + assertTrue(index != -1, + "Lipsum substring " + i + ".." + (i + len-1) + " not found"); + assertEquals(lipsum.substring(index, index + len), substring, + "Wrong lipsum substring found: " + i + ".." + (i + len - 1) + "/" + + index + ".." + (index + len - 1)); + } while (index >= 0 && index < i); + assertEquals(i, index, "Lipsum match at " + i + ".." + (i + len - 1)); + } +} diff --git a/deps/v8/test/mjsunit/unicode-case-overoptimization.js b/deps/v8/test/mjsunit/unicode-case-overoptimization.js new file mode 100644 index 0000000000..bfda48c77c --- /dev/null +++ b/deps/v8/test/mjsunit/unicode-case-overoptimization.js @@ -0,0 +1,35 @@ +// Copyright 2009 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Test all non-ASCII characters individually to ensure that our optimizations +// didn't break anything. +for (var i = 0x80; i <= 0xfffe; i++) { + var c = String.fromCharCode(i); + var c2 = String.fromCharCode(i + 1); + var re = new RegExp("[" + c + "-" + c2 + "]", "i"); + assertTrue(re.test(c), i); +} diff --git a/deps/v8/test/mozilla/testcfg.py b/deps/v8/test/mozilla/testcfg.py index 477b2b2fe2..d1c1767a9a 100644 --- a/deps/v8/test/mozilla/testcfg.py +++ b/deps/v8/test/mozilla/testcfg.py @@ -103,6 +103,7 @@ class MozillaTestConfiguration(test.TestConfiguration): for excluded in EXCLUDED: if excluded in dirs: dirs.remove(excluded) + dirs.sort() root_path = root[len(self.root):].split(os.path.sep) root_path = current_path + [x for x in root_path if x] framework = [] @@ -113,6 +114,7 @@ class MozillaTestConfiguration(test.TestConfiguration): if exists(script): framework.append(script) framework.reverse() + files.sort() for file in files: if (not file in FRAMEWORK) and file.endswith('.js'): full_path = root_path + [file[:-3]] diff --git a/deps/v8/test/sputnik/README b/deps/v8/test/sputnik/README new file mode 100644 index 0000000000..3d39a67e43 --- /dev/null +++ b/deps/v8/test/sputnik/README @@ -0,0 +1,6 @@ +To run the sputniktests you must check out the test suite from +googlecode.com. The test expectations are currently relative to +version 28. To get the tests run the following command within +v8/tests/sputnik/ + + svn co http://sputniktests.googlecode.com/svn/trunk/ -r28 sputniktests diff --git a/deps/v8/test/sputnik/sputnik.status b/deps/v8/test/sputnik/sputnik.status new file mode 100644 index 0000000000..16a44c51ad --- /dev/null +++ b/deps/v8/test/sputnik/sputnik.status @@ -0,0 +1,318 @@ +# Copyright 2009 the V8 project authors. All rights reserved. +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +prefix sputnik +def FAIL_OK = FAIL, OKAY + +##################### DELIBERATE INCOMPATIBILITIES ##################### + +# 900066: Deleting elements in .arguments should disconnect the +# element from the actual arguments. Implementing this is nontrivial +# and we have no indication that anything on the web depends on this +# feature. +S13_A13_T1: FAIL_OK +S13_A13_T2: FAIL_OK +S13_A13_T3: FAIL_OK + +# This tests precision of trignometric functions. We're slightly off +# from the implementation in libc (~ 1e-17) but it's not clear if we +# or they are closer to the right answer, or if it even matters. +S15.8.2.16_A7: PASS || FAIL_OK +S15.8.2.18_A7: PASS || FAIL_OK +S15.8.2.13_A23: PASS || FAIL_OK + +# We allow calls to regexp exec() with no arguments to fail for +# compatibility reasons. +S15.10.6.2_A1_T16: FAIL_OK +S15.10.6.3_A1_T16: FAIL_OK + +# We allow regexps to be called as functions for compatibility reasons. +S15.10.7_A1_T1: FAIL_OK +S15.10.7_A1_T2: FAIL_OK + +# We allow construct calls to built-in functions, and we allow built-in +# functions to have prototypes. +S15.1.2.1_A4.6: FAIL_OK +S15.1.2.1_A4.7: FAIL_OK +S15.1.2.2_A9.6: FAIL_OK +S15.1.2.2_A9.7: FAIL_OK +S15.1.2.3_A7.6: FAIL_OK +S15.1.2.3_A7.7: FAIL_OK +S15.1.2.4_A2.6: FAIL_OK +S15.1.2.4_A2.7: FAIL_OK +S15.1.2.5_A2.6: FAIL_OK +S15.1.2.5_A2.7: FAIL_OK +S15.1.3.1_A5.6: FAIL_OK +S15.1.3.1_A5.7: FAIL_OK +S15.1.3.2_A5.6: FAIL_OK +S15.1.3.2_A5.7: FAIL_OK +S15.1.3.3_A5.6: FAIL_OK +S15.1.3.3_A5.7: FAIL_OK +S15.1.3.4_A5.6: FAIL_OK +S15.1.3.4_A5.7: FAIL_OK +S15.10.6.2_A6: FAIL_OK +S15.10.6.3_A6: FAIL_OK +S15.10.6.4_A6: FAIL_OK +S15.10.6.4_A7: FAIL_OK +S15.2.4.2_A6: FAIL_OK +S15.2.4.3_A6: FAIL_OK +S15.2.4.4_A6: FAIL_OK +S15.2.4.5_A6: FAIL_OK +S15.2.4.6_A6: FAIL_OK +S15.2.4.7_A6: FAIL_OK +S15.3.4.2_A6: FAIL_OK +S15.4.4.10_A5.6: FAIL_OK +S15.4.4.10_A5.7: FAIL_OK +S15.4.4.11_A7.6: FAIL_OK +S15.4.4.11_A7.7: FAIL_OK +S15.4.4.12_A5.6: FAIL_OK +S15.4.4.12_A5.7: FAIL_OK +S15.4.4.13_A5.6: FAIL_OK +S15.4.4.13_A5.7: FAIL_OK +S15.4.4.2_A4.6: FAIL_OK +S15.4.4.3_A4.6: FAIL_OK +S15.4.4.3_A4.6: FAIL_OK +S15.4.4.4_A4.6: FAIL_OK +S15.4.4.4_A4.7: FAIL_OK +S15.4.4.5_A6.6: FAIL_OK +S15.4.4.5_A6.7: FAIL_OK +S15.4.4.6_A5.6: FAIL_OK +S15.4.4.6_A5.7: FAIL_OK +S15.4.4.7_A6.6: FAIL_OK +S15.4.4.7_A6.7: FAIL_OK +S15.4.4.8_A5.6: FAIL_OK +S15.4.4.8_A5.7: FAIL_OK +S15.4.4.9_A5.6: FAIL_OK +S15.4.4.9_A5.7: FAIL_OK +S15.5.4.10_A6: FAIL_OK +S15.5.4.11_A6: FAIL_OK +S15.5.4.12_A6: FAIL_OK +S15.5.4.13_A6: FAIL_OK +S15.5.4.14_A6: FAIL_OK +S15.5.4.15_A6: FAIL_OK +S15.5.4.16_A6: FAIL_OK +S15.5.4.17_A6: FAIL_OK +S15.5.4.18_A6: FAIL_OK +S15.5.4.19_A6: FAIL_OK +S15.5.4.4_A6: FAIL_OK +S15.5.4.5_A6: FAIL_OK +S15.5.4.6_A6: FAIL_OK +S15.5.4.7_A6: FAIL_OK +S15.5.4.9_A6: FAIL_OK +S15.3.4.3_A12: FAIL_OK +S15.3.4.4_A12: FAIL_OK +S15.5.4.8_A6: FAIL_OK + +# We are silent in some regexp cases where the spec wants us to give +# errors, for compatibility. +S15.10.2.11_A1_T2: FAIL +S15.10.2.11_A1_T3: FAIL +S15.10.4.1_A5_T1: FAIL +S15.10.4.1_A5_T2: FAIL +S15.10.4.1_A5_T3: FAIL +S15.10.4.1_A5_T4: FAIL +S15.10.4.1_A5_T5: FAIL +S15.10.4.1_A5_T6: FAIL +S15.10.4.1_A5_T7: FAIL +S15.10.4.1_A5_T8: FAIL +S15.10.4.1_A5_T9: FAIL + +# We are more lenient in which string character escapes we allow than +# the spec (7.8.4 p. 19) wants us to be. This is for compatibility. +S7.8.4_A4.3_T2: FAIL_OK +S7.8.4_A4.3_T2: FAIL_OK +S7.8.4_A6.2_T2: FAIL_OK +S7.8.4_A6.1_T4: FAIL_OK +S7.8.4_A4.3_T4: FAIL_OK +S7.8.4_A7.2_T2: FAIL_OK +S7.8.4_A7.1_T4: FAIL_OK +S7.8.4_A6.4_T2: FAIL_OK +S7.8.4_A7.4_T2: FAIL_OK +S7.8.4_A7.2_T4: FAIL_OK +S7.8.4_A4.3_T6: FAIL_OK +S7.8.4_A7.2_T6: FAIL_OK +S7.8.4_A4.3_T1: FAIL_OK +S7.8.4_A6.2_T1: FAIL_OK +S7.8.4_A4.3_T3: FAIL_OK +S7.8.4_A7.2_T1: FAIL_OK +S7.8.4_A6.4_T1: FAIL_OK +S7.8.4_A7.2_T3: FAIL_OK +S7.8.4_A7.4_T1: FAIL_OK +S7.8.4_A4.3_T5: FAIL_OK +S7.8.4_A7.2_T5: FAIL_OK +S7.8.4_A4.3_T1: FAIL_OK +S7.8.4_A6.2_T1: FAIL_OK +S7.8.4_A4.3_T3: FAIL_OK +S7.8.4_A7.2_T1: FAIL_OK +S7.8.4_A6.4_T1: FAIL_OK +S7.8.4_A7.2_T3: FAIL_OK +S7.8.4_A7.4_T1: FAIL_OK +S7.8.4_A4.3_T5: FAIL_OK +S7.8.4_A7.2_T5: FAIL_OK + +# We allow some keywords to be used as identifiers +S7.5.3_A1.17: FAIL_OK +S7.5.3_A1.26: FAIL_OK +S7.5.3_A1.18: FAIL_OK +S7.5.3_A1.27: FAIL_OK +S7.5.3_A1.28: FAIL_OK +S7.5.3_A1.19: FAIL_OK +S7.5.3_A1.29: FAIL_OK +S7.5.3_A1.1: FAIL_OK +S7.5.3_A1.2: FAIL_OK +S7.5.3_A1.3: FAIL_OK +S7.5.3_A1.4: FAIL_OK +S7.5.3_A1.5: FAIL_OK +S7.5.3_A1.8: FAIL_OK +S7.5.3_A1.9: FAIL_OK +S7.5.3_A1.10: FAIL_OK +S7.5.3_A1.11: FAIL_OK +S7.5.3_A1.21: FAIL_OK +S7.5.3_A1.12: FAIL_OK +S7.5.3_A1.30: FAIL_OK +S7.5.3_A1.31: FAIL_OK +S7.5.3_A1.13: FAIL_OK +S7.5.3_A1.22: FAIL_OK +S7.5.3_A1.23: FAIL_OK +S7.5.3_A1.14: FAIL_OK +S7.5.3_A1.15: FAIL_OK +S7.5.3_A1.24: FAIL_OK +S7.5.3_A1.25: FAIL_OK +S7.5.3_A1.16: FAIL_OK + +# This checks for non-262 behavior +S12.6.4_A14_T1: PASS || FAIL_OK +S12.6.4_R1: PASS || FAIL_OK +S12.6.4_R2: PASS || FAIL_OK +S8.4_D2.1: PASS || FAIL_OK +S8.4_D2.2: PASS || FAIL_OK +S8.4_D2.3: PASS || FAIL_OK +S8.4_D2.4: PASS || FAIL_OK +S8.4_D2.5: PASS || FAIL_OK +S8.4_D2.6: PASS || FAIL_OK +S8.4_D2.7: PASS || FAIL_OK +S8.4_D1.1: PASS || FAIL_OK +S13.2_D1.2: PASS || FAIL_OK +S11.4.3_D1.2: PASS || FAIL_OK +S7.6_D1: PASS || FAIL_OK +S7.6_D2: PASS || FAIL_OK +S15.1.2.2_D1.2: PASS || FAIL_OK +S13_D1_T1: PASS || FAIL_OK +S14_D4_T3: PASS || FAIL_OK +S14_D7: PASS || FAIL_OK +S15.5.4.11_D1.1_T2: PASS || FAIL_OK +S15.5.4.11_D1.1_T4: PASS || FAIL_OK +S15.5.2_D2: PASS || FAIL_OK +S15.5.4.11_D1.1_T1: PASS || FAIL_OK +S15.5.4.11_D1.1_T3: PASS || FAIL_OK +S12.6.4_D1: PASS || FAIL_OK + +# We deliberately don't throw type errors when iterating through the +# undefined object +S9.9_A1: FAIL_OK +S9.9_A2: FAIL_OK + +# We allow function declarations within statements +S12.5_A9_T1: FAIL_OK +S12.5_A9_T2: FAIL_OK +# S12.6.2_A13_T3: FAIL_OK +# S12.5_A9_T3: FAIL_OK +# S12.6.1_A13_T3: FAIL_OK +S12.1_A1: FAIL_OK +S12.6.2_A13_T1: FAIL_OK +S12.6.2_A13_T2: FAIL_OK +S12.6.1_A13_T1: FAIL_OK +S12.6.1_A13_T2: FAIL_OK +S12.6.4_A13_T1: FAIL_OK +S12.6.4_A13_T2: FAIL_OK +#S12.6.4_A13_T3: FAIL_OK +S15.3.4.2_A1_T1: FAIL_OK + +# Linux and Mac defaults to extended 80 bit floating point format in the FPU. +# We follow the other major JS engines by keeping this default. +S8.5_A2.2: PASS, FAIL if $system == linux, FAIL if $system == macos +S8.5_A2.1: PASS, FAIL if $system == linux, FAIL if $system == macos + +##################### SKIPPED TESTS ##################### + +# These tests take a looong time to run in debug mode. +S15.1.3.2_A2.5_T1: PASS, SKIP if $mode == debug +S15.1.3.1_A2.5_T1: PASS, SKIP if $mode == debug + + +# These tests fail because we had to add bugs to be compatible with JSC. See +# http://code.google.com/p/chromium/issues/detail?id=1717 +S15.4.4_A1.1_T2: FAIL_OK +S15.5.4.1_A1_T2: FAIL_OK +S15.5.4_A1: FAIL_OK +S15.5.4_A3: FAIL_OK +S15.9.5.10_A1_T2: FAIL_OK +S15.9.5.11_A1_T2: FAIL_OK +S15.9.5.12_A1_T2: FAIL_OK +S15.9.5.13_A1_T2: FAIL_OK +S15.9.5.14_A1_T2: FAIL_OK +S15.9.5.15_A1_T2: FAIL_OK +S15.9.5.16_A1_T2: FAIL_OK +S15.9.5.17_A1_T2: FAIL_OK +S15.9.5.18_A1_T2: FAIL_OK +S15.9.5.19_A1_T2: FAIL_OK +S15.9.5.20_A1_T2: FAIL_OK +S15.9.5.21_A1_T2: FAIL_OK +S15.9.5.22_A1_T2: FAIL_OK +S15.9.5.23_A1_T2: FAIL_OK +S15.9.5.24_A1_T2: FAIL_OK +S15.9.5.25_A1_T2: FAIL_OK +S15.9.5.26_A1_T2: FAIL_OK +S15.9.5.27_A1_T2: FAIL_OK +S15.9.5.28_A1_T2: FAIL_OK +S15.9.5.29_A1_T2: FAIL_OK +S15.9.5.2_A1_T2: FAIL_OK +S15.9.5.30_A1_T2: FAIL_OK +S15.9.5.31_A1_T2: FAIL_OK +S15.9.5.32_A1_T2: FAIL_OK +S15.9.5.33_A1_T2: FAIL_OK +S15.9.5.34_A1_T2: FAIL_OK +S15.9.5.35_A1_T2: FAIL_OK +S15.9.5.36_A1_T2: FAIL_OK +S15.9.5.37_A1_T2: FAIL_OK +S15.9.5.38_A1_T2: FAIL_OK +S15.9.5.39_A1_T2: FAIL_OK +S15.9.5.3_A1_T2: FAIL_OK +S15.9.5.40_A1_T2: FAIL_OK +S15.9.5.41_A1_T2: FAIL_OK +S15.9.5.42_A1_T2: FAIL_OK +S15.9.5.4_A1_T2: FAIL_OK +S15.9.5.5_A1_T2: FAIL_OK +S15.9.5.6_A1_T2: FAIL_OK +S15.9.5.7_A1_T2: FAIL_OK +S15.9.5.8_A1_T2: FAIL_OK +S15.9.5.9_A1_T2: FAIL_OK + +# Regexps have type "function", not "object". +S11.4.3_A3.6: FAIL_OK +S15.10.7_A3_T2: FAIL_OK +S15.10.7_A3_T1: FAIL_OK diff --git a/deps/v8/test/sputnik/testcfg.py b/deps/v8/test/sputnik/testcfg.py new file mode 100644 index 0000000000..659238220b --- /dev/null +++ b/deps/v8/test/sputnik/testcfg.py @@ -0,0 +1,112 @@ +# Copyright 2009 the V8 project authors. All rights reserved. +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +import os +from os.path import join, exists +import sys +import test +import time + + +class SputnikTestCase(test.TestCase): + + def __init__(self, case, path, context, mode): + super(SputnikTestCase, self).__init__(context, path) + self.case = case + self.mode = mode + self.tmpfile = None + self.source = None + + def IsNegative(self): + return '@negative' in self.GetSource() + + def IsFailureOutput(self, output): + if output.exit_code != 0: + return True + out = output.stdout + return "SputnikError" in out + + def BeforeRun(self): + self.tmpfile = sputnik.TempFile(suffix='.js', prefix='sputnik-', text=True) + self.tmpfile.Write(self.GetSource()) + self.tmpfile.Close() + + def AfterRun(self): + self.tmpfile.Dispose() + self.tmpfile = None + + def GetCommand(self): + result = [self.context.GetVm(self.mode)] + result.append(self.tmpfile.name) + return result + + def GetLabel(self): + return "%s sputnik %s" % (self.mode, self.GetName()) + + def GetName(self): + return self.path[-1] + + def GetSource(self): + if not self.source: + self.source = self.case.GetSource() + return self.source + +class SputnikTestConfiguration(test.TestConfiguration): + + def __init__(self, context, root): + super(SputnikTestConfiguration, self).__init__(context, root) + + def ListTests(self, current_path, path, mode): + # Import the sputnik test runner script as a module + testroot = join(self.root, 'sputniktests') + modroot = join(testroot, 'tools') + sys.path.append(modroot) + import sputnik + globals()['sputnik'] = sputnik + test_suite = sputnik.TestSuite(testroot) + test_suite.Validate() + tests = test_suite.EnumerateTests([]) + result = [] + for test in tests: + full_path = current_path + [test.GetPath()[-1]] + if self.Contains(path, full_path): + case = SputnikTestCase(test, full_path, self.context, mode) + result.append(case) + return result + + def GetBuildRequirements(self): + return ['sample', 'sample=shell'] + + def GetTestStatus(self, sections, defs): + status_file = join(self.root, 'sputnik.status') + if exists(status_file): + test.ReadConfigurationInto(status_file, sections, defs) + + +def GetConfiguration(context, root): + return SputnikTestConfiguration(context, root) diff --git a/deps/v8/tools/gyp/v8.gyp b/deps/v8/tools/gyp/v8.gyp index 5e2bb88e92..75464f206a 100644 --- a/deps/v8/tools/gyp/v8.gyp +++ b/deps/v8/tools/gyp/v8.gyp @@ -156,8 +156,8 @@ 'target_name': 'v8_snapshot', 'type': '<(library)', 'dependencies': [ - 'mksnapshot', - 'js2c', + 'mksnapshot#host', + 'js2c#host', 'v8_base', ], 'include_dirs+': [ @@ -183,8 +183,9 @@ { 'target_name': 'v8_nosnapshot', 'type': '<(library)', + 'toolsets': ['host', 'target'], 'dependencies': [ - 'js2c', + 'js2c#host', 'v8_base', ], 'include_dirs+': [ @@ -194,10 +195,21 @@ '<(SHARED_INTERMEDIATE_DIR)/libraries.cc', '../../src/snapshot-empty.cc', ], + 'conditions': [ + # The ARM assembler assumes the host is 32 bits, so force building + # 32-bit host tools. + # TODO(piman): This assumes that the host is ia32 or amd64. Fixing the + # code would be better + ['target_arch=="arm" and _toolset=="host"', { + 'cflags': ['-m32'], + 'ldflags': ['-m32'], + }] + ] }, { 'target_name': 'v8_base', 'type': '<(library)', + 'toolsets': ['host', 'target'], 'include_dirs+': [ '../../src', ], @@ -293,7 +305,6 @@ '../../src/jsregexp.h', '../../src/list-inl.h', '../../src/list.h', - '../../src/location.h', '../../src/log-inl.h', '../../src/log-utils.cc', '../../src/log-utils.h', @@ -394,6 +405,7 @@ '../../src/arm/codegen-arm.cc', '../../src/arm/codegen-arm.h', '../../src/arm/constants-arm.h', + '../../src/arm/constants-arm.cc', '../../src/arm/cpu-arm.cc', '../../src/arm/debug-arm.cc', '../../src/arm/disasm-arm.cc', @@ -412,6 +424,16 @@ '../../src/arm/virtual-frame-arm.cc', '../../src/arm/virtual-frame-arm.h', ], + 'conditions': [ + # The ARM assembler assumes the host is 32 bits, so force building + # 32-bit host tools. + # TODO(piman): This assumes that the host is ia32 or amd64. Fixing + # the code would be better + ['_toolset=="host"', { + 'cflags': ['-m32'], + 'ldflags': ['-m32'], + }] + ] }], ['target_arch=="ia32"', { 'include_dirs+': [ @@ -508,6 +530,7 @@ { 'target_name': 'js2c', 'type': 'none', + 'toolsets': ['host'], 'variables': { 'library_files': [ '../../src/runtime.js', @@ -550,6 +573,7 @@ { 'target_name': 'mksnapshot', 'type': 'executable', + 'toolsets': ['host'], 'dependencies': [ 'v8_nosnapshot', ], @@ -559,6 +583,16 @@ 'sources': [ '../../src/mksnapshot.cc', ], + 'conditions': [ + # The ARM assembler assumes the host is 32 bits, so force building + # 32-bit host tools. + # TODO(piman): This assumes that the host is ia32 or amd64. Fixing + # the code would be better + ['target_arch=="arm" and _toolset=="host"', { + 'cflags': ['-m32'], + 'ldflags': ['-m32'], + }] + ] }, { 'target_name': 'v8_shell', diff --git a/deps/v8/tools/js2c.py b/deps/v8/tools/js2c.py index 2b7dbdfbae..b889530d5a 100755 --- a/deps/v8/tools/js2c.py +++ b/deps/v8/tools/js2c.py @@ -301,7 +301,7 @@ def JS2C(source, target, env): else: ids.append((id, len(lines))) source_lines.append(SOURCE_DECLARATION % { 'id': id, 'data': data }) - source_lines_empty.append(SOURCE_DECLARATION % { 'id': id, 'data': 0 }) + source_lines_empty.append(SOURCE_DECLARATION % { 'id': id, 'data': data }) # Build delay support functions get_index_cases = [ ] diff --git a/deps/v8/tools/presubmit.py b/deps/v8/tools/presubmit.py index c4f78536bb..5a99c2adda 100755 --- a/deps/v8/tools/presubmit.py +++ b/deps/v8/tools/presubmit.py @@ -108,7 +108,7 @@ class SourceFileProcessor(object): return True def IgnoreDir(self, name): - return name.startswith('.') or name == 'data' + return name.startswith('.') or name == 'data' or name == 'sputniktests' def IgnoreFile(self, name): return name.startswith('.') diff --git a/deps/v8/tools/process-heap-prof.py b/deps/v8/tools/process-heap-prof.py index ff83952e0e..6a2c3978d2 100755 --- a/deps/v8/tools/process-heap-prof.py +++ b/deps/v8/tools/process-heap-prof.py @@ -40,9 +40,14 @@ # to get JS constructor profile -import csv, sys, time +import csv, sys, time, optparse -def process_logfile(filename, itemname): +def ProcessLogFile(filename, options): + if options.js_cons_profile: + itemname = 'heap-js-cons-item' + else: + itemname = 'heap-sample-item' + first_call_time = None sample_time = 0.0 sampling = False @@ -68,13 +73,48 @@ def process_logfile(filename, itemname): print('END_SAMPLE %.2f' % sample_time) sampling = False elif row[0] == itemname and sampling: - print('%s %d' % (row[1], int(row[3]))) + print(row[1]), + if options.count: + print('%d' % (int(row[2]))), + if options.size: + print('%d' % (int(row[3]))), + print finally: logfile.close() except: sys.exit('can\'t open %s' % filename) -if sys.argv[1] == '--js-cons-profile': - process_logfile(sys.argv[2], 'heap-js-cons-item') -else: - process_logfile(sys.argv[1], 'heap-sample-item') + +def BuildOptions(): + result = optparse.OptionParser() + result.add_option("--js_cons_profile", help="Constructor profile", + default=False, action="store_true") + result.add_option("--size", help="Report object size", + default=False, action="store_true") + result.add_option("--count", help="Report object count", + default=False, action="store_true") + return result + + +def ProcessOptions(options): + if not options.size and not options.count: + options.size = True + return True + + +def Main(): + parser = BuildOptions() + (options, args) = parser.parse_args() + if not ProcessOptions(options): + parser.print_help() + sys.exit(); + + if not args: + print "Missing logfile" + sys.exit(); + + ProcessLogFile(args[0], options) + + +if __name__ == '__main__': + sys.exit(Main()) diff --git a/deps/v8/tools/test.py b/deps/v8/tools/test.py index d206e33e5b..75b4f61f75 100755 --- a/deps/v8/tools/test.py +++ b/deps/v8/tools/test.py @@ -359,8 +359,19 @@ class TestCase(object): self.Cleanup() return TestOutput(self, full_command, output) + def BeforeRun(self): + pass + + def AfterRun(self): + pass + def Run(self): - return self.RunCommand(self.GetCommand()) + self.BeforeRun() + try: + result = self.RunCommand(self.GetCommand()) + finally: + self.AfterRun() + return result def Cleanup(self): return diff --git a/deps/v8/tools/visual_studio/v8_base.vcproj b/deps/v8/tools/visual_studio/v8_base.vcproj index fc7402aebb..6b47359754 100644 --- a/deps/v8/tools/visual_studio/v8_base.vcproj +++ b/deps/v8/tools/visual_studio/v8_base.vcproj @@ -557,10 +557,6 @@ > </File> <File - RelativePath="..\..\src\location.h" - > - </File> - <File RelativePath="..\..\src\log.cc" > </File> diff --git a/deps/v8/tools/visual_studio/v8_base_arm.vcproj b/deps/v8/tools/visual_studio/v8_base_arm.vcproj index fca4a96058..afb4f74b79 100644 --- a/deps/v8/tools/visual_studio/v8_base_arm.vcproj +++ b/deps/v8/tools/visual_studio/v8_base_arm.vcproj @@ -561,10 +561,6 @@ > </File> <File - RelativePath="..\..\src\location.h" - > - </File> - <File RelativePath="..\..\src\log.cc" > </File> |