diff options
180 files changed, 16938 insertions, 3555 deletions
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog index 0786ed9296..5b1240df8b 100644 --- a/deps/v8/ChangeLog +++ b/deps/v8/ChangeLog @@ -1,3 +1,20 @@ +2010-03-25: Version 2.1.9 + + Added API support for reattaching a global object to a context. + + Extended debugger API with access to the internal debugger context. + + Fixed Chromium crashes (issues http://crbug.com/39128 and + http://crbug.com/39160) + + +2010-03-24: Version 2.1.8 + + Added fine-grained garbage collection callbacks to the API. + + Performance improvements on all platforms. + + 2010-03-22: Version 2.1.7 Fixed issue 650. diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct index dd26cb4147..bef64eb2d4 100644 --- a/deps/v8/SConstruct +++ b/deps/v8/SConstruct @@ -90,7 +90,7 @@ ANDROID_LINKFLAGS = ['-nostdlib', '-Wl,-z,nocopyreloc', '-Wl,-rpath-link=' + ANDROID_TOP + '/out/target/product/generic/obj/lib', ANDROID_TOP + '/out/target/product/generic/obj/lib/crtbegin_dynamic.o', - ANDROID_TOP + '/prebuilt/linux-x86/toolchain/arm-eabi-4.2.1/lib/gcc/arm-eabi/4.2.1/interwork/libgcc.a', + ANDROID_TOP + '/prebuilt/linux-x86/toolchain/arm-eabi-4.4.0/lib/gcc/arm-eabi/4.4.0/interwork/libgcc.a', ANDROID_TOP + '/out/target/product/generic/obj/lib/crtend_android.o']; LIBRARY_FLAGS = { @@ -275,6 +275,7 @@ V8_EXTRA_FLAGS = { 'gcc': { 'all': { 'WARNINGFLAGS': ['-Wall', + '-Werror', '-W', '-Wno-unused-parameter', '-Wnon-virtual-dtor'] @@ -370,7 +371,6 @@ DTOA_EXTRA_FLAGS = { CCTEST_EXTRA_FLAGS = { 'all': { 'CPPPATH': [join(root_dir, 'src')], - 'LIBS': ['$LIBRARY'] }, 'gcc': { 'all': { @@ -400,9 +400,10 @@ CCTEST_EXTRA_FLAGS = { '__ARM_ARCH_5E__', '__ARM_ARCH_5TE__'], 'CCFLAGS': ANDROID_FLAGS, 'CPPPATH': ANDROID_INCLUDES, - 'LIBPATH': [ANDROID_TOP + '/out/target/product/generic/obj/lib'], + 'LIBPATH': [ANDROID_TOP + '/out/target/product/generic/obj/lib', + ANDROID_TOP + '/prebuilt/linux-x86/toolchain/arm-eabi-4.4.0/lib/gcc/arm-eabi/4.4.0/interwork'], 'LINKFLAGS': ANDROID_LINKFLAGS, - 'LIBS': ['log', 'c', 'stdc++', 'm'], + 'LIBS': ['log', 'c', 'stdc++', 'm', 'gcc'], 'mode:release': { 'CPPDEFINES': ['SK_RELEASE', 'NDEBUG'] } @@ -430,7 +431,6 @@ CCTEST_EXTRA_FLAGS = { SAMPLE_FLAGS = { 'all': { 'CPPPATH': [join(abspath('.'), 'include')], - 'LIBS': ['$LIBRARY'], }, 'gcc': { 'all': { @@ -464,9 +464,10 @@ SAMPLE_FLAGS = { '__ARM_ARCH_5E__', '__ARM_ARCH_5TE__'], 'CCFLAGS': ANDROID_FLAGS, 'CPPPATH': ANDROID_INCLUDES, - 'LIBPATH': [ANDROID_TOP + '/out/target/product/generic/obj/lib'], + 'LIBPATH': [ANDROID_TOP + '/out/target/product/generic/obj/lib', + ANDROID_TOP + '/prebuilt/linux-x86/toolchain/arm-eabi-4.4.0/lib/gcc/arm-eabi/4.4.0/interwork'], 'LINKFLAGS': ANDROID_LINKFLAGS, - 'LIBS': ['log', 'c', 'stdc++', 'm'], + 'LIBS': ['log', 'c', 'stdc++', 'm', 'gcc'], 'mode:release': { 'CPPDEFINES': ['SK_RELEASE', 'NDEBUG'] } @@ -589,9 +590,10 @@ D8_FLAGS = { 'LIBS': ['pthread'], }, 'os:android': { - 'LIBPATH': [ANDROID_TOP + '/out/target/product/generic/obj/lib'], + 'LIBPATH': [ANDROID_TOP + '/out/target/product/generic/obj/lib', + ANDROID_TOP + '/prebuilt/linux-x86/toolchain/arm-eabi-4.4.0/lib/gcc/arm-eabi/4.4.0/interwork'], 'LINKFLAGS': ANDROID_LINKFLAGS, - 'LIBS': ['log', 'c', 'stdc++', 'm'], + 'LIBS': ['log', 'c', 'stdc++', 'm', 'gcc'], }, 'os:win32': { 'LIBS': ['winmm', 'ws2_32'], @@ -987,7 +989,6 @@ def BuildSpecific(env, mode, env_overrides): if context.options['soname'] == 'on': # When building shared object with SONAME version the library name. library_name += '-' + version - env['LIBRARY'] = library_name # Generate library SONAME if required by the build. if context.options['soname'] == 'on': @@ -1008,6 +1009,7 @@ def BuildSpecific(env, mode, env_overrides): # Link the object files into a library. env.Replace(**context.flags['v8']) + env.Prepend(LIBS=[library_name]) context.ApplyEnvOverrides(env) if context.options['library'] == 'static': @@ -1027,8 +1029,9 @@ def BuildSpecific(env, mode, env_overrides): context.d8_targets.append(shell) for sample in context.samples: - sample_env = Environment(LIBRARY=library_name) + sample_env = Environment() sample_env.Replace(**context.flags['sample']) + sample_env.Prepend(LIBS=[library_name]) context.ApplyEnvOverrides(sample_env) sample_object = sample_env.SConscript( join('samples', 'SConscript'), diff --git a/deps/v8/include/v8-debug.h b/deps/v8/include/v8-debug.h index 2e5fb3fde9..f7b4fa12e3 100644 --- a/deps/v8/include/v8-debug.h +++ b/deps/v8/include/v8-debug.h @@ -237,9 +237,10 @@ class EXPORT Debug { * With this call the debugger is entered and the function specified is called * with the execution state as the first argument. This makes it possible to * get access to information otherwise not available during normal JavaScript - * execution e.g. details on stack frames. The following example show a - * JavaScript function which when passed to v8::Debug::Call will return the - * current line of JavaScript execution. + * execution e.g. details on stack frames. Receiver of the function call will + * be the debugger context global object, however this is a subject to change. + * The following example show a JavaScript function which when passed to + * v8::Debug::Call will return the current line of JavaScript execution. * * \code * function frame_source_line(exec_state) { @@ -302,6 +303,14 @@ class EXPORT Debug { * of this method. */ static void ProcessDebugMessages(); + + /** + * Debugger is running in it's own context which is entered while debugger + * messages are being dispatched. This is an explicit getter for this + * debugger context. Note that the content of the debugger context is subject + * to change. + */ + static Local<Context> GetDebugContext(); }; diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index bed86cabd0..7b42178782 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -2158,12 +2158,26 @@ typedef void (*FailedAccessCheckCallback)(Local<Object> target, // --- G a r b a g e C o l l e c t i o n C a l l b a c k s /** - * Applications can register a callback function which is called - * before and after a major garbage collection. Allocations are not - * allowed in the callback function, you therefore cannot manipulate + * Applications can register callback functions which will be called + * before and after a garbage collection. Allocations are not + * allowed in the callback functions, you therefore cannot manipulate * objects (set or delete properties for example) since it is possible * such operations will result in the allocation of objects. */ +enum GCType { + kGCTypeScavenge = 1 << 0, + kGCTypeMarkSweepCompact = 1 << 1, + kGCTypeAll = kGCTypeScavenge | kGCTypeMarkSweepCompact +}; + +enum GCCallbackFlags { + kNoGCCallbackFlags = 0, + kGCCallbackFlagCompacted = 1 << 0 +}; + +typedef void (*GCPrologueCallback)(GCType type, GCCallbackFlags flags); +typedef void (*GCEpilogueCallback)(GCType type, GCCallbackFlags flags); + typedef void (*GCCallback)(); @@ -2299,7 +2313,27 @@ class V8EXPORT V8 { /** * Enables the host application to receive a notification before a - * major garbage colletion. Allocations are not allowed in the + * garbage collection. Allocations are not allowed in the + * callback function, you therefore cannot manipulate objects (set + * or delete properties for example) since it is possible such + * operations will result in the allocation of objects. It is possible + * to specify the GCType filter for your callback. But it is not possible to + * register the same callback function two times with different + * GCType filters. + */ + static void AddGCPrologueCallback( + GCPrologueCallback callback, GCType gc_type_filter = kGCTypeAll); + + /** + * This function removes callback which was installed by + * AddGCPrologueCallback function. + */ + static void RemoveGCPrologueCallback(GCPrologueCallback callback); + + /** + * The function is deprecated. Please use AddGCPrologueCallback instead. + * Enables the host application to receive a notification before a + * garbage collection. Allocations are not allowed in the * callback function, you therefore cannot manipulate objects (set * or delete properties for example) since it is possible such * operations will result in the allocation of objects. @@ -2308,6 +2342,26 @@ class V8EXPORT V8 { /** * Enables the host application to receive a notification after a + * garbage collection. Allocations are not allowed in the + * callback function, you therefore cannot manipulate objects (set + * or delete properties for example) since it is possible such + * operations will result in the allocation of objects. It is possible + * to specify the GCType filter for your callback. But it is not possible to + * register the same callback function two times with different + * GCType filters. + */ + static void AddGCEpilogueCallback( + GCEpilogueCallback callback, GCType gc_type_filter = kGCTypeAll); + + /** + * This function removes callback which was installed by + * AddGCEpilogueCallback function. + */ + static void RemoveGCEpilogueCallback(GCEpilogueCallback callback); + + /** + * The function is deprecated. Please use AddGCEpilogueCallback instead. + * Enables the host application to receive a notification after a * major garbage collection. Allocations are not allowed in the * callback function, you therefore cannot manipulate objects (set * or delete properties for example) since it is possible such @@ -2681,9 +2735,21 @@ class V8EXPORT Context { */ void DetachGlobal(); + /** + * Reattaches a global object to a context. This can be used to + * restore the connection between a global object and a context + * after DetachGlobal has been called. + * + * \param global_object The global object to reattach to the + * context. For this to work, the global object must be the global + * object that was associated with this context before a call to + * DetachGlobal. + */ + void ReattachGlobal(Handle<Object> global_object); + /** Creates a new context. */ static Persistent<Context> New( - ExtensionConfiguration* extensions = 0, + ExtensionConfiguration* extensions = NULL, Handle<ObjectTemplate> global_template = Handle<ObjectTemplate>(), Handle<Value> global_object = Handle<Value>()); diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript index bf42fd4e61..e7f6efd143 100755 --- a/deps/v8/src/SConscript +++ b/deps/v8/src/SConscript @@ -113,6 +113,8 @@ SOURCES = { """), 'arch:arm': Split(""" fast-codegen.cc + jump-target-light.cc + virtual-frame-light.cc arm/builtins-arm.cc arm/codegen-arm.cc arm/constants-arm.cc @@ -156,6 +158,8 @@ SOURCES = { mips/virtual-frame-mips.cc """), 'arch:ia32': Split(""" + jump-target-heavy.cc + virtual-frame-heavy.cc ia32/assembler-ia32.cc ia32/builtins-ia32.cc ia32/codegen-ia32.cc @@ -175,6 +179,8 @@ SOURCES = { """), 'arch:x64': Split(""" fast-codegen.cc + jump-target-heavy.cc + virtual-frame-heavy.cc x64/assembler-x64.cc x64/builtins-x64.cc x64/codegen-x64.cc @@ -252,12 +258,12 @@ uri.js math.js messages.js apinatives.js -debug-delay.js -liveedit-delay.js -mirror-delay.js -date-delay.js -regexp-delay.js -json-delay.js +date.js +regexp.js +json.js +liveedit-debugger.js +mirror-debugger.js +debug-debugger.js '''.split() diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index b2f0e03406..2100480e85 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -537,10 +537,17 @@ i::Object** v8::HandleScope::RawClose(i::Object** value) { LOG_API("CloseHandleScope"); // Read the result before popping the handle block. - i::Object* result = *value; + i::Object* result = NULL; + if (value != NULL) { + result = *value; + } is_closed_ = true; i::HandleScope::Leave(&previous_); + if (value == NULL) { + return NULL; + } + // Allocate a new handle on the previous handle block. i::Handle<i::Object> handle(result); return handle.location(); @@ -1136,7 +1143,7 @@ Local<Script> Script::New(v8::Handle<String> source, if (pre_data_impl != NULL && !pre_data_impl->SanityCheck()) { pre_data_impl = NULL; } - i::Handle<i::JSFunction> boilerplate = + i::Handle<i::SharedFunctionInfo> result = i::Compiler::Compile(str, name_obj, line_offset, @@ -1145,9 +1152,9 @@ Local<Script> Script::New(v8::Handle<String> source, pre_data_impl, Utils::OpenHandle(*script_data), i::NOT_NATIVES_CODE); - has_pending_exception = boilerplate.is_null(); + has_pending_exception = result.is_null(); EXCEPTION_BAILOUT_CHECK(Local<Script>()); - return Local<Script>(ToApi<Script>(boilerplate)); + return Local<Script>(ToApi<Script>(result)); } @@ -1168,10 +1175,12 @@ Local<Script> Script::Compile(v8::Handle<String> source, Local<Script> generic = New(source, origin, pre_data, script_data); if (generic.IsEmpty()) return generic; - i::Handle<i::JSFunction> boilerplate = Utils::OpenHandle(*generic); + i::Handle<i::Object> obj = Utils::OpenHandle(*generic); + i::Handle<i::SharedFunctionInfo> function = + i::Handle<i::SharedFunctionInfo>(i::SharedFunctionInfo::cast(*obj)); i::Handle<i::JSFunction> result = - i::Factory::NewFunctionFromBoilerplate(boilerplate, - i::Top::global_context()); + i::Factory::NewFunctionFromSharedFunctionInfo(function, + i::Top::global_context()); return Local<Script>(ToApi<Script>(result)); } @@ -1191,10 +1200,15 @@ Local<Value> Script::Run() { i::Object* raw_result = NULL; { HandleScope scope; - i::Handle<i::JSFunction> fun = Utils::OpenHandle(this); - if (fun->IsBoilerplate()) { - fun = i::Factory::NewFunctionFromBoilerplate(fun, - i::Top::global_context()); + i::Handle<i::Object> obj = Utils::OpenHandle(this); + i::Handle<i::JSFunction> fun; + if (obj->IsSharedFunctionInfo()) { + i::Handle<i::SharedFunctionInfo> + function_info(i::SharedFunctionInfo::cast(*obj)); + fun = i::Factory::NewFunctionFromSharedFunctionInfo( + function_info, i::Top::global_context()); + } else { + fun = i::Handle<i::JSFunction>(i::JSFunction::cast(*obj)); } EXCEPTION_PREAMBLE(); i::Handle<i::Object> receiver(i::Top::context()->global_proxy()); @@ -1208,14 +1222,28 @@ Local<Value> Script::Run() { } +static i::Handle<i::SharedFunctionInfo> OpenScript(Script* script) { + i::Handle<i::Object> obj = Utils::OpenHandle(script); + i::Handle<i::SharedFunctionInfo> result; + if (obj->IsSharedFunctionInfo()) { + result = + i::Handle<i::SharedFunctionInfo>(i::SharedFunctionInfo::cast(*obj)); + } else { + result = + i::Handle<i::SharedFunctionInfo>(i::JSFunction::cast(*obj)->shared()); + } + return result; +} + + Local<Value> Script::Id() { ON_BAILOUT("v8::Script::Id()", return Local<Value>()); LOG_API("Script::Id"); i::Object* raw_id = NULL; { HandleScope scope; - i::Handle<i::JSFunction> fun = Utils::OpenHandle(this); - i::Handle<i::Script> script(i::Script::cast(fun->shared()->script())); + i::Handle<i::SharedFunctionInfo> function_info = OpenScript(this); + i::Handle<i::Script> script(i::Script::cast(function_info->script())); i::Handle<i::Object> id(script->id()); raw_id = *id; } @@ -1229,9 +1257,9 @@ void Script::SetData(v8::Handle<String> data) { LOG_API("Script::SetData"); { HandleScope scope; - i::Handle<i::JSFunction> fun = Utils::OpenHandle(this); + i::Handle<i::SharedFunctionInfo> function_info = OpenScript(this); i::Handle<i::Object> raw_data = Utils::OpenHandle(*data); - i::Handle<i::Script> script(i::Script::cast(fun->shared()->script())); + i::Handle<i::Script> script(i::Script::cast(function_info->script())); script->set_data(*raw_data); } } @@ -3057,6 +3085,16 @@ void Context::DetachGlobal() { } +void Context::ReattachGlobal(Handle<Object> global_object) { + if (IsDeadCheck("v8::Context::ReattachGlobal()")) return; + ENTER_V8; + i::Object** ctx = reinterpret_cast<i::Object**>(this); + i::Handle<i::Context> context = + i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx)); + i::Bootstrapper::ReattachGlobal(context, Utils::OpenHandle(*global_object)); +} + + Local<v8::Object> ObjectTemplate::NewInstance() { ON_BAILOUT("v8::ObjectTemplate::NewInstance()", return Local<v8::Object>()); LOG_API("ObjectTemplate::NewInstance"); @@ -3525,6 +3563,30 @@ void V8::SetGlobalGCEpilogueCallback(GCCallback callback) { } +void V8::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) { + if (IsDeadCheck("v8::V8::AddGCPrologueCallback()")) return; + i::Heap::AddGCPrologueCallback(callback, gc_type); +} + + +void V8::RemoveGCPrologueCallback(GCPrologueCallback callback) { + if (IsDeadCheck("v8::V8::RemoveGCPrologueCallback()")) return; + i::Heap::RemoveGCPrologueCallback(callback); +} + + +void V8::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) { + if (IsDeadCheck("v8::V8::AddGCEpilogueCallback()")) return; + i::Heap::AddGCEpilogueCallback(callback, gc_type); +} + + +void V8::RemoveGCEpilogueCallback(GCEpilogueCallback callback) { + if (IsDeadCheck("v8::V8::RemoveGCEpilogueCallback()")) return; + i::Heap::RemoveGCEpilogueCallback(callback); +} + + void V8::PauseProfiler() { #ifdef ENABLE_LOGGING_AND_PROFILING PauseProfilerEx(PROFILER_MODULE_CPU); @@ -3928,6 +3990,11 @@ void Debug::ProcessDebugMessages() { i::Execution::ProcessDebugMesssages(true); } +Local<Context> Debug::GetDebugContext() { + i::EnterDebugger debugger; + return Utils::ToLocal(i::Debug::debug_context()); +} + #endif // ENABLE_DEBUGGER_SUPPORT namespace internal { diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h index a28e1f0774..7b88112c04 100644 --- a/deps/v8/src/api.h +++ b/deps/v8/src/api.h @@ -221,7 +221,7 @@ class Utils { OpenHandle(const v8::Array* data); static inline v8::internal::Handle<v8::internal::String> OpenHandle(const String* data); - static inline v8::internal::Handle<v8::internal::JSFunction> + static inline v8::internal::Handle<v8::internal::Object> OpenHandle(const Script* data); static inline v8::internal::Handle<v8::internal::JSFunction> OpenHandle(const Function* data); @@ -247,7 +247,11 @@ static inline T* ToApi(v8::internal::Handle<v8::internal::Object> obj) { template <class T> v8::internal::Handle<T> v8::internal::Handle<T>::EscapeFrom( v8::HandleScope* scope) { - return Utils::OpenHandle(*scope->Close(Utils::ToLocal(*this))); + v8::internal::Handle<T> handle; + if (!is_null()) { + handle = *this; + } + return Utils::OpenHandle(*scope->Close(Utils::ToLocal(handle))); } @@ -255,7 +259,7 @@ v8::internal::Handle<T> v8::internal::Handle<T>::EscapeFrom( #define MAKE_TO_LOCAL(Name, From, To) \ Local<v8::To> Utils::Name(v8::internal::Handle<v8::internal::From> obj) { \ - ASSERT(!obj->IsTheHole()); \ + ASSERT(obj.is_null() || !obj->IsTheHole()); \ return Local<To>(reinterpret_cast<To*>(obj.location())); \ } @@ -296,7 +300,7 @@ MAKE_OPEN_HANDLE(Data, Object) MAKE_OPEN_HANDLE(Object, JSObject) MAKE_OPEN_HANDLE(Array, JSArray) MAKE_OPEN_HANDLE(String, String) -MAKE_OPEN_HANDLE(Script, JSFunction) +MAKE_OPEN_HANDLE(Script, Object) MAKE_OPEN_HANDLE(Function, JSFunction) MAKE_OPEN_HANDLE(Message, JSObject) MAKE_OPEN_HANDLE(Context, Context) diff --git a/deps/v8/src/apinatives.js b/deps/v8/src/apinatives.js index 6451e62603..ca2bbf5c53 100644 --- a/deps/v8/src/apinatives.js +++ b/deps/v8/src/apinatives.js @@ -31,7 +31,7 @@ function CreateDate(time) { - var date = new ORIGINAL_DATE(); + var date = new $Date(); date.setTime(time); return date; } diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc index d4cd818c17..23d5e00fd1 100644 --- a/deps/v8/src/arm/assembler-arm.cc +++ b/deps/v8/src/arm/assembler-arm.cc @@ -47,21 +47,41 @@ unsigned CpuFeatures::supported_ = 0; unsigned CpuFeatures::enabled_ = 0; unsigned CpuFeatures::found_by_runtime_probing_ = 0; + +#ifdef __arm__ +static uint64_t CpuFeaturesImpliedByCompiler() { + uint64_t answer = 0; +#ifdef CAN_USE_ARMV7_INSTRUCTIONS + answer |= 1u << ARMv7; +#endif // def CAN_USE_ARMV7_INSTRUCTIONS + // If the compiler is allowed to use VFP then we can use VFP too in our code + // generation even when generating snapshots. This won't work for cross + // compilation. +#if defined(__VFP_FP__) && !defined(__SOFTFP__) + answer |= 1u << VFP3; +#endif // defined(__VFP_FP__) && !defined(__SOFTFP__) +#ifdef CAN_USE_VFP_INSTRUCTIONS + answer |= 1u << VFP3; +#endif // def CAN_USE_VFP_INSTRUCTIONS + return answer; +} +#endif // def __arm__ + + void CpuFeatures::Probe() { - // If the compiler is allowed to use vfp then we can use vfp too in our - // code generation. -#if !defined(__arm__) +#ifndef __arm__ // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is enabled. if (FLAG_enable_vfp3) { - supported_ |= 1u << VFP3; + supported_ |= 1u << VFP3; } // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled if (FLAG_enable_armv7) { - supported_ |= 1u << ARMv7; + supported_ |= 1u << ARMv7; } -#else +#else // def __arm__ if (Serializer::enabled()) { supported_ |= OS::CpuFeaturesImpliedByPlatform(); + supported_ |= CpuFeaturesImpliedByCompiler(); return; // No features if we might serialize. } @@ -532,7 +552,7 @@ static bool MustUseIp(RelocInfo::Mode rmode) { if (!Serializer::enabled()) { Serializer::TooLateToEnableNow(); } -#endif +#endif // def DEBUG return Serializer::enabled(); } else if (rmode == RelocInfo::NONE) { return false; @@ -1137,14 +1157,16 @@ void Assembler::swpb(Register dst, // Exception-generating instructions and debugging support. void Assembler::stop(const char* msg) { -#if !defined(__arm__) +#ifndef __arm__ // The simulator handles these special instructions and stops execution. emit(15 << 28 | ((intptr_t) msg)); -#else - // Just issue a simple break instruction for now. Alternatively we could use - // the swi(0x9f0001) instruction on Linux. +#else // def __arm__ +#ifdef CAN_USE_ARMV5_INSTRUCTIONS bkpt(0); -#endif +#else // ndef CAN_USE_ARMV5_INSTRUCTIONS + swi(0x9f0001); +#endif // ndef CAN_USE_ARMV5_INSTRUCTIONS +#endif // def __arm__ } @@ -1319,11 +1341,28 @@ void Assembler::vldr(const DwVfpRegister dst, // Vdst(15-12) | 1011(11-8) | offset ASSERT(CpuFeatures::IsEnabled(VFP3)); ASSERT(offset % 4 == 0); + ASSERT((offset / 4) < 256); emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 | 0xB*B8 | ((offset / 4) & 255)); } +void Assembler::vldr(const SwVfpRegister dst, + const Register base, + int offset, + const Condition cond) { + // Sdst = MEM(Rbase + offset). + // Instruction details available in ARM DDI 0406A, A8-628. + // cond(31-28) | 1101(27-24)| 1001(23-20) | Rbase(19-16) | + // Vdst(15-12) | 1010(11-8) | offset + ASSERT(CpuFeatures::IsEnabled(VFP3)); + ASSERT(offset % 4 == 0); + ASSERT((offset / 4) < 256); + emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 | + 0xA*B8 | ((offset / 4) & 255)); +} + + void Assembler::vstr(const DwVfpRegister src, const Register base, int offset, @@ -1334,6 +1373,7 @@ void Assembler::vstr(const DwVfpRegister src, // Vsrc(15-12) | 1011(11-8) | (offset/4) ASSERT(CpuFeatures::IsEnabled(VFP3)); ASSERT(offset % 4 == 0); + ASSERT((offset / 4) < 256); emit(cond | 0xD8*B20 | base.code()*B16 | src.code()*B12 | 0xB*B8 | ((offset / 4) & 255)); } @@ -1397,31 +1437,172 @@ void Assembler::vmov(const Register dst, } -void Assembler::vcvt(const DwVfpRegister dst, - const SwVfpRegister src, - const Condition cond) { - // Dd = Sm (integer in Sm converted to IEEE 64-bit doubles in Dd). - // Instruction details available in ARM DDI 0406A, A8-576. - // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=000(18-16) | - // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=1 | 1(6) | M=?(5) | 0(4) | Vm(3-0) +// Type of data to read from or write to VFP register. +// Used as specifier in generic vcvt instruction. +enum VFPType { S32, U32, F32, F64 }; + + +static bool IsSignedVFPType(VFPType type) { + switch (type) { + case S32: + return true; + case U32: + return false; + default: + UNREACHABLE(); + return false; + } +} + + +static bool IsIntegerVFPType(VFPType type) { + switch (type) { + case S32: + case U32: + return true; + case F32: + case F64: + return false; + default: + UNREACHABLE(); + return false; + } +} + + +static bool IsDoubleVFPType(VFPType type) { + switch (type) { + case F32: + return false; + case F64: + return true; + default: + UNREACHABLE(); + return false; + } +} + + +// Depending on split_last_bit split binary representation of reg_code into Vm:M +// or M:Vm form (where M is single bit). +static void SplitRegCode(bool split_last_bit, + int reg_code, + int* vm, + int* m) { + if (split_last_bit) { + *m = reg_code & 0x1; + *vm = reg_code >> 1; + } else { + *m = (reg_code & 0x10) >> 4; + *vm = reg_code & 0x0F; + } +} + + +// Encode vcvt.src_type.dst_type instruction. +static Instr EncodeVCVT(const VFPType dst_type, + const int dst_code, + const VFPType src_type, + const int src_code, + const Condition cond) { + if (IsIntegerVFPType(dst_type) || IsIntegerVFPType(src_type)) { + // Conversion between IEEE floating point and 32-bit integer. + // Instruction details available in ARM DDI 0406B, A8.6.295. + // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 1(19) | opc2(18-16) | + // Vd(15-12) | 101(11-9) | sz(8) | op(7) | 1(6) | M(5) | 0(4) | Vm(3-0) + ASSERT(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type)); + + int sz, opc2, D, Vd, M, Vm, op; + + if (IsIntegerVFPType(dst_type)) { + opc2 = IsSignedVFPType(dst_type) ? 0x5 : 0x4; + sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0; + op = 1; // round towards zero + SplitRegCode(!IsDoubleVFPType(src_type), src_code, &Vm, &M); + SplitRegCode(true, dst_code, &Vd, &D); + } else { + ASSERT(IsIntegerVFPType(src_type)); + + opc2 = 0x0; + sz = IsDoubleVFPType(dst_type) ? 0x1 : 0x0; + op = IsSignedVFPType(src_type) ? 0x1 : 0x0; + SplitRegCode(true, src_code, &Vm, &M); + SplitRegCode(!IsDoubleVFPType(dst_type), dst_code, &Vd, &D); + } + + return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | B19 | opc2*B16 | + Vd*B12 | 0x5*B9 | sz*B8 | op*B7 | B6 | M*B5 | Vm); + } else { + // Conversion between IEEE double and single precision. + // Instruction details available in ARM DDI 0406B, A8.6.298. + // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0111(19-16) | + // Vd(15-12) | 101(11-9) | sz(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0) + int sz, D, Vd, M, Vm; + + ASSERT(IsDoubleVFPType(dst_type) != IsDoubleVFPType(src_type)); + sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0; + SplitRegCode(IsDoubleVFPType(src_type), dst_code, &Vd, &D); + SplitRegCode(!IsDoubleVFPType(src_type), src_code, &Vm, &M); + + return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | 0x7*B16 | + Vd*B12 | 0x5*B9 | sz*B8 | B7 | B6 | M*B5 | Vm); + } +} + + +void Assembler::vcvt_f64_s32(const DwVfpRegister dst, + const SwVfpRegister src, + const Condition cond) { ASSERT(CpuFeatures::IsEnabled(VFP3)); - emit(cond | 0xE*B24 | B23 | 0x3*B20 | B19 | - dst.code()*B12 | 0x5*B9 | B8 | B7 | B6 | - (0x1 & src.code())*B5 | (src.code() >> 1)); + emit(EncodeVCVT(F64, dst.code(), S32, src.code(), cond)); } -void Assembler::vcvt(const SwVfpRegister dst, - const DwVfpRegister src, - const Condition cond) { - // Sd = Dm (IEEE 64-bit doubles in Dm converted to 32 bit integer in Sd). - // Instruction details available in ARM DDI 0406A, A8-576. - // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=101(18-16)| - // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=? | 1(6) | M=?(5) | 0(4) | Vm(3-0) +void Assembler::vcvt_f32_s32(const SwVfpRegister dst, + const SwVfpRegister src, + const Condition cond) { + ASSERT(CpuFeatures::IsEnabled(VFP3)); + emit(EncodeVCVT(F32, dst.code(), S32, src.code(), cond)); +} + + +void Assembler::vcvt_f64_u32(const DwVfpRegister dst, + const SwVfpRegister src, + const Condition cond) { + ASSERT(CpuFeatures::IsEnabled(VFP3)); + emit(EncodeVCVT(F64, dst.code(), U32, src.code(), cond)); +} + + +void Assembler::vcvt_s32_f64(const SwVfpRegister dst, + const DwVfpRegister src, + const Condition cond) { + ASSERT(CpuFeatures::IsEnabled(VFP3)); + emit(EncodeVCVT(S32, dst.code(), F64, src.code(), cond)); +} + + +void Assembler::vcvt_u32_f64(const SwVfpRegister dst, + const DwVfpRegister src, + const Condition cond) { + ASSERT(CpuFeatures::IsEnabled(VFP3)); + emit(EncodeVCVT(U32, dst.code(), F64, src.code(), cond)); +} + + +void Assembler::vcvt_f64_f32(const DwVfpRegister dst, + const SwVfpRegister src, + const Condition cond) { + ASSERT(CpuFeatures::IsEnabled(VFP3)); + emit(EncodeVCVT(F64, dst.code(), F32, src.code(), cond)); +} + + +void Assembler::vcvt_f32_f64(const SwVfpRegister dst, + const DwVfpRegister src, + const Condition cond) { ASSERT(CpuFeatures::IsEnabled(VFP3)); - emit(cond | 0xE*B24 | B23 |(0x1 & dst.code())*B22 | - 0x3*B20 | B19 | 0x5*B16 | (dst.code() >> 1)*B12 | - 0x5*B9 | B8 | B7 | B6 | src.code()); + emit(EncodeVCVT(F32, dst.code(), F64, src.code(), cond)); } diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h index 539a6b8990..98be7b59cc 100644 --- a/deps/v8/src/arm/assembler-arm.h +++ b/deps/v8/src/arm/assembler-arm.h @@ -826,6 +826,12 @@ class Assembler : public Malloced { const Register base, int offset, // Offset must be a multiple of 4. const Condition cond = al); + + void vldr(const SwVfpRegister dst, + const Register base, + int offset, // Offset must be a multiple of 4. + const Condition cond = al); + void vstr(const DwVfpRegister src, const Register base, int offset, // Offset must be a multiple of 4. @@ -844,12 +850,27 @@ class Assembler : public Malloced { void vmov(const Register dst, const SwVfpRegister src, const Condition cond = al); - void vcvt(const DwVfpRegister dst, - const SwVfpRegister src, - const Condition cond = al); - void vcvt(const SwVfpRegister dst, - const DwVfpRegister src, - const Condition cond = al); + void vcvt_f64_s32(const DwVfpRegister dst, + const SwVfpRegister src, + const Condition cond = al); + void vcvt_f32_s32(const SwVfpRegister dst, + const SwVfpRegister src, + const Condition cond = al); + void vcvt_f64_u32(const DwVfpRegister dst, + const SwVfpRegister src, + const Condition cond = al); + void vcvt_s32_f64(const SwVfpRegister dst, + const DwVfpRegister src, + const Condition cond = al); + void vcvt_u32_f64(const SwVfpRegister dst, + const DwVfpRegister src, + const Condition cond = al); + void vcvt_f64_f32(const DwVfpRegister dst, + const SwVfpRegister src, + const Condition cond = al); + void vcvt_f32_f64(const SwVfpRegister dst, + const DwVfpRegister src, + const Condition cond = al); void vadd(const DwVfpRegister dst, const DwVfpRegister src1, diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc index d3e98a3109..5e0067716b 100644 --- a/deps/v8/src/arm/codegen-arm.cc +++ b/deps/v8/src/arm/codegen-arm.cc @@ -2305,14 +2305,13 @@ void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) { } -void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) { +void CodeGenerator::InstantiateFunction( + Handle<SharedFunctionInfo> function_info) { VirtualFrame::SpilledScope spilled_scope; - ASSERT(boilerplate->IsBoilerplate()); - - __ mov(r0, Operand(boilerplate)); + __ mov(r0, Operand(function_info)); // Use the fast case closure allocation code that allocates in new // space for nested functions that don't need literals cloning. - if (scope()->is_function_scope() && boilerplate->NumberOfLiterals() == 0) { + if (scope()->is_function_scope() && function_info->num_literals() == 0) { FastNewClosureStub stub; frame_->EmitPush(r0); frame_->CallStub(&stub, 1); @@ -2334,27 +2333,27 @@ void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) { VirtualFrame::SpilledScope spilled_scope; Comment cmnt(masm_, "[ FunctionLiteral"); - // Build the function boilerplate and instantiate it. - Handle<JSFunction> boilerplate = - Compiler::BuildBoilerplate(node, script(), this); + // Build the function info and instantiate it. + Handle<SharedFunctionInfo> function_info = + Compiler::BuildFunctionInfo(node, script(), this); // Check for stack-overflow exception. if (HasStackOverflow()) { ASSERT(frame_->height() == original_height); return; } - InstantiateBoilerplate(boilerplate); + InstantiateFunction(function_info); ASSERT(frame_->height() == original_height + 1); } -void CodeGenerator::VisitFunctionBoilerplateLiteral( - FunctionBoilerplateLiteral* node) { +void CodeGenerator::VisitSharedFunctionInfoLiteral( + SharedFunctionInfoLiteral* node) { #ifdef DEBUG int original_height = frame_->height(); #endif VirtualFrame::SpilledScope spilled_scope; - Comment cmnt(masm_, "[ FunctionBoilerplateLiteral"); - InstantiateBoilerplate(node->boilerplate()); + Comment cmnt(masm_, "[ SharedFunctionInfoLiteral"); + InstantiateFunction(node->shared_function_info()); ASSERT(frame_->height() == original_height + 1); } @@ -4527,11 +4526,11 @@ void Reference::SetValue(InitState init_state) { void FastNewClosureStub::Generate(MacroAssembler* masm) { - // Clone the boilerplate in new space. Set the context to the - // current context in cp. + // Create a new closure from the given function info in new + // space. Set the context to the current context in cp. Label gc; - // Pop the boilerplate function from the stack. + // Pop the function info from the stack. __ pop(r3); // Attempt to allocate new JSFunction in new space. @@ -4549,20 +4548,18 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) { __ ldr(r2, MemOperand(r2, Context::SlotOffset(Context::FUNCTION_MAP_INDEX))); __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); - // Clone the rest of the boilerplate fields. We don't have to update - // the write barrier because the allocated object is in new space. - for (int offset = kPointerSize; - offset < JSFunction::kSize; - offset += kPointerSize) { - if (offset == JSFunction::kContextOffset) { - __ str(cp, FieldMemOperand(r0, offset)); - } else { - __ ldr(r1, FieldMemOperand(r3, offset)); - __ str(r1, FieldMemOperand(r0, offset)); - } - } - - // Return result. The argument boilerplate has been popped already. + // Initialize the rest of the function. We don't have to update the + // write barrier because the allocated object is in new space. + __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex); + __ LoadRoot(r2, Heap::kTheHoleValueRootIndex); + __ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset)); + __ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset)); + __ str(r2, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset)); + __ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset)); + __ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset)); + __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset)); + + // Return result. The argument function info has been popped already. __ Ret(); // Create a new closure through the slower runtime call. @@ -4685,42 +4682,6 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { } -// Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz -// instruction. On pre-ARM5 hardware this routine gives the wrong answer for 0 -// (31 instead of 32). -static void CountLeadingZeros( - MacroAssembler* masm, - Register source, - Register scratch, - Register zeros) { -#ifdef CAN_USE_ARMV5_INSTRUCTIONS - __ clz(zeros, source); // This instruction is only supported after ARM5. -#else - __ mov(zeros, Operand(0)); - __ mov(scratch, source); - // Top 16. - __ tst(scratch, Operand(0xffff0000)); - __ add(zeros, zeros, Operand(16), LeaveCC, eq); - __ mov(scratch, Operand(scratch, LSL, 16), LeaveCC, eq); - // Top 8. - __ tst(scratch, Operand(0xff000000)); - __ add(zeros, zeros, Operand(8), LeaveCC, eq); - __ mov(scratch, Operand(scratch, LSL, 8), LeaveCC, eq); - // Top 4. - __ tst(scratch, Operand(0xf0000000)); - __ add(zeros, zeros, Operand(4), LeaveCC, eq); - __ mov(scratch, Operand(scratch, LSL, 4), LeaveCC, eq); - // Top 2. - __ tst(scratch, Operand(0xc0000000)); - __ add(zeros, zeros, Operand(2), LeaveCC, eq); - __ mov(scratch, Operand(scratch, LSL, 2), LeaveCC, eq); - // Top bit. - __ tst(scratch, Operand(0x80000000u)); - __ add(zeros, zeros, Operand(1), LeaveCC, eq); -#endif -} - - // Takes a Smi and converts to an IEEE 64 bit floating point value in two // registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and // 52 fraction bits (20 in the first word, 32 in the second). Zeros is a @@ -4784,25 +4745,27 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) { __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC); // Subtract from 0 if source was negative. __ rsb(source_, source_, Operand(0), LeaveCC, ne); + + // We have -1, 0 or 1, which we treat specially. Register source_ contains + // absolute value: it is either equal to 1 (special case of -1 and 1), + // greater than 1 (not a special case) or less than 1 (special case of 0). __ cmp(source_, Operand(1)); __ b(gt, ¬_special); - // We have -1, 0 or 1, which we treat specially. - __ cmp(source_, Operand(0)); // For 1 or -1 we need to or in the 0 exponent (biased to 1023). static const uint32_t exponent_word_for_1 = HeapNumber::kExponentBias << HeapNumber::kExponentShift; - __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, ne); + __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq); // 1, 0 and -1 all have 0 for the second word. __ mov(mantissa, Operand(0)); __ Ret(); __ bind(¬_special); - // Count leading zeros. Uses result2 for a scratch register on pre-ARM5. + // Count leading zeros. Uses mantissa for a scratch register on pre-ARM5. // Gets the wrong answer for 0, but we already checked for that case above. - CountLeadingZeros(masm, source_, mantissa, zeros_); + __ CountLeadingZeros(source_, mantissa, zeros_); // Compute exponent and or it into the exponent register. - // We use result2 as a scratch register here. + // We use mantissa as a scratch register here. __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias)); __ orr(exponent, exponent, @@ -4821,45 +4784,6 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) { } -// This stub can convert a signed int32 to a heap number (double). It does -// not work for int32s that are in Smi range! No GC occurs during this stub -// so you don't have to set up the frame. -class WriteInt32ToHeapNumberStub : public CodeStub { - public: - WriteInt32ToHeapNumberStub(Register the_int, - Register the_heap_number, - Register scratch) - : the_int_(the_int), - the_heap_number_(the_heap_number), - scratch_(scratch) { } - - private: - Register the_int_; - Register the_heap_number_; - Register scratch_; - - // Minor key encoding in 16 bits. - class ModeBits: public BitField<OverwriteMode, 0, 2> {}; - class OpBits: public BitField<Token::Value, 2, 14> {}; - - Major MajorKey() { return WriteInt32ToHeapNumber; } - int MinorKey() { - // Encode the parameters in a unique 16 bit value. - return the_int_.code() + - (the_heap_number_.code() << 4) + - (scratch_.code() << 8); - } - - void Generate(MacroAssembler* masm); - - const char* GetName() { return "WriteInt32ToHeapNumberStub"; } - -#ifdef DEBUG - void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); } -#endif -}; - - // See comment for class. void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { Label max_negative_int; @@ -5042,7 +4966,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, CpuFeatures::Scope scope(VFP3); __ mov(r7, Operand(r1, ASR, kSmiTagSize)); __ vmov(s15, r7); - __ vcvt(d7, s15); + __ vcvt_f64_s32(d7, s15); // Load the double from rhs, tagged HeapNumber r0, to d6. __ sub(r7, r0, Operand(kHeapObjectTag)); __ vldr(d6, r7, HeapNumber::kValueOffset); @@ -5085,7 +5009,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, __ vldr(d7, r7, HeapNumber::kValueOffset); __ mov(r7, Operand(r0, ASR, kSmiTagSize)); __ vmov(s13, r7); - __ vcvt(d6, s13); + __ vcvt_f64_s32(d6, s13); } else { __ push(lr); // Load lhs to a double in r2, r3. @@ -5494,29 +5418,6 @@ void CompareStub::Generate(MacroAssembler* masm) { } -// Allocates a heap number or jumps to the label if the young space is full and -// a scavenge is needed. -static void AllocateHeapNumber( - MacroAssembler* masm, - Label* need_gc, // Jump here if young space is full. - Register result, // The tagged address of the new heap number. - Register scratch1, // A scratch register. - Register scratch2) { // Another scratch register. - // Allocate an object in the heap for the heap number and tag it as a heap - // object. - __ AllocateInNewSpace(HeapNumber::kSize / kPointerSize, - result, - scratch1, - scratch2, - need_gc, - TAG_OBJECT); - - // Get heap number map and store it in the allocated object. - __ LoadRoot(scratch1, Heap::kHeapNumberMapRootIndex); - __ str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset)); -} - - // We fall into this code if the operands were Smis, but the result was // not (eg. overflow). We branch into this code (to the not_smi label) if // the operands were not both Smi. The operands are in r0 and r1. In order @@ -5533,7 +5434,7 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm, // Smi-smi case (overflow). // Since both are Smis there is no heap number to overwrite, so allocate. // The new heap number is in r5. r6 and r7 are scratch. - AllocateHeapNumber(masm, &slow, r5, r6, r7); + __ AllocateHeapNumber(r5, r6, r7, &slow); // If we have floating point hardware, inline ADD, SUB, MUL, and DIV, // using registers d7 and d6 for the double values. @@ -5543,10 +5444,10 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm, CpuFeatures::Scope scope(VFP3); __ mov(r7, Operand(r0, ASR, kSmiTagSize)); __ vmov(s15, r7); - __ vcvt(d7, s15); + __ vcvt_f64_s32(d7, s15); __ mov(r7, Operand(r1, ASR, kSmiTagSize)); __ vmov(s13, r7); - __ vcvt(d6, s13); + __ vcvt_f64_s32(d6, s13); } else { // Write Smi from r0 to r3 and r2 in double format. r6 is scratch. __ mov(r7, Operand(r0)); @@ -5628,7 +5529,7 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm, if (mode == NO_OVERWRITE) { // In the case where there is no chance of an overwritable float we may as // well do the allocation immediately while r0 and r1 are untouched. - AllocateHeapNumber(masm, &slow, r5, r6, r7); + __ AllocateHeapNumber(r5, r6, r7, &slow); } // Move r0 to a double in r2-r3. @@ -5653,7 +5554,7 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm, __ bind(&r0_is_smi); if (mode == OVERWRITE_RIGHT) { // We can't overwrite a Smi so get address of new heap number into r5. - AllocateHeapNumber(masm, &slow, r5, r6, r7); + __ AllocateHeapNumber(r5, r6, r7, &slow); } if (use_fp_registers) { @@ -5661,7 +5562,7 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm, // Convert smi in r0 to double in d7. __ mov(r7, Operand(r0, ASR, kSmiTagSize)); __ vmov(s15, r7); - __ vcvt(d7, s15); + __ vcvt_f64_s32(d7, s15); } else { // Write Smi from r0 to r3 and r2 in double format. __ mov(r7, Operand(r0)); @@ -5695,7 +5596,7 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm, __ bind(&r1_is_smi); if (mode == OVERWRITE_LEFT) { // We can't overwrite a Smi so get address of new heap number into r5. - AllocateHeapNumber(masm, &slow, r5, r6, r7); + __ AllocateHeapNumber(r5, r6, r7, &slow); } if (use_fp_registers) { @@ -5703,7 +5604,7 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm, // Convert smi in r1 to double in d6. __ mov(r7, Operand(r1, ASR, kSmiTagSize)); __ vmov(s13, r7); - __ vcvt(d6, s13); + __ vcvt_f64_s32(d6, s13); } else { // Write Smi from r1 to r1 and r0 in double format. __ mov(r7, Operand(r1)); @@ -5830,7 +5731,7 @@ static void GetInt32(MacroAssembler* masm, // conversion using round to zero. __ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset)); __ vmov(d7, scratch2, scratch); - __ vcvt(s15, d7); + __ vcvt_s32_f64(s15, d7); __ vmov(dest, s15); } else { // Get the top bits of the mantissa. @@ -5942,7 +5843,7 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) { } case NO_OVERWRITE: { // Get a new heap number in r5. r6 and r7 are scratch. - AllocateHeapNumber(masm, &slow, r5, r6, r7); + __ AllocateHeapNumber(r5, r6, r7, &slow); } default: break; } @@ -5962,7 +5863,7 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) { if (mode_ != NO_OVERWRITE) { __ bind(&have_to_allocate); // Get a new heap number in r5. r6 and r7 are scratch. - AllocateHeapNumber(masm, &slow, r5, r6, r7); + __ AllocateHeapNumber(r5, r6, r7, &slow); __ jmp(&got_a_heap_number); } @@ -6380,7 +6281,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) { __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); } else { - AllocateHeapNumber(masm, &slow, r1, r2, r3); + __ AllocateHeapNumber(r1, r2, r3, &slow); __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset)); @@ -6410,7 +6311,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) { // Allocate a fresh heap number, but don't overwrite r0 until // we're sure we can do it without going through the slow case // that needs the value in r0. - AllocateHeapNumber(masm, &slow, r2, r3, r4); + __ AllocateHeapNumber(r2, r3, r4, &slow); __ mov(r0, Operand(r2)); } @@ -7117,53 +7018,59 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { } +// Unfortunately you have to run without snapshots to see most of these +// names in the profile since most compare stubs end up in the snapshot. const char* CompareStub::GetName() { + if (name_ != NULL) return name_; + const int kMaxNameLength = 100; + name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength); + if (name_ == NULL) return "OOM"; + + const char* cc_name; switch (cc_) { - case lt: return "CompareStub_LT"; - case gt: return "CompareStub_GT"; - case le: return "CompareStub_LE"; - case ge: return "CompareStub_GE"; - case ne: { - if (strict_) { - if (never_nan_nan_) { - return "CompareStub_NE_STRICT_NO_NAN"; - } else { - return "CompareStub_NE_STRICT"; - } - } else { - if (never_nan_nan_) { - return "CompareStub_NE_NO_NAN"; - } else { - return "CompareStub_NE"; - } - } - } - case eq: { - if (strict_) { - if (never_nan_nan_) { - return "CompareStub_EQ_STRICT_NO_NAN"; - } else { - return "CompareStub_EQ_STRICT"; - } - } else { - if (never_nan_nan_) { - return "CompareStub_EQ_NO_NAN"; - } else { - return "CompareStub_EQ"; - } - } - } - default: return "CompareStub"; + case lt: cc_name = "LT"; break; + case gt: cc_name = "GT"; break; + case le: cc_name = "LE"; break; + case ge: cc_name = "GE"; break; + case eq: cc_name = "EQ"; break; + case ne: cc_name = "NE"; break; + default: cc_name = "UnknownCondition"; break; } + + const char* strict_name = ""; + if (strict_ && (cc_ == eq || cc_ == ne)) { + strict_name = "_STRICT"; + } + + const char* never_nan_nan_name = ""; + if (never_nan_nan_ && (cc_ == eq || cc_ == ne)) { + never_nan_nan_name = "_NO_NAN"; + } + + const char* include_number_compare_name = ""; + if (!include_number_compare_) { + include_number_compare_name = "_NO_NUMBER"; + } + + OS::SNPrintF(Vector<char>(name_, kMaxNameLength), + "CompareStub_%s%s%s%s", + cc_name, + strict_name, + never_nan_nan_name, + include_number_compare_name); + return name_; } int CompareStub::MinorKey() { - // Encode the three parameters in a unique 16 bit value. - ASSERT((static_cast<unsigned>(cc_) >> 26) < (1 << 16)); - int nnn_value = (never_nan_nan_ ? 2 : 0); - if (cc_ != eq) nnn_value = 0; // Avoid duplicate stubs. - return (static_cast<unsigned>(cc_) >> 26) | nnn_value | (strict_ ? 1 : 0); + // Encode the three parameters in a unique 16 bit value. To avoid duplicate + // stubs the never NaN NaN condition is only taken into account if the + // condition is equals. + ASSERT((static_cast<unsigned>(cc_) >> 28) < (1 << 13)); + return ConditionField::encode(static_cast<unsigned>(cc_) >> 28) + | StrictField::encode(strict_) + | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false) + | IncludeNumberCompareField::encode(include_number_compare_); } diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h index 68f293a853..4bea3415a5 100644 --- a/deps/v8/src/arm/codegen-arm.h +++ b/deps/v8/src/arm/codegen-arm.h @@ -348,8 +348,8 @@ class CodeGenerator: public AstVisitor { // name/value pairs. void DeclareGlobals(Handle<FixedArray> pairs); - // Instantiate the function boilerplate. - void InstantiateBoilerplate(Handle<JSFunction> boilerplate); + // Instantiate the function based on the shared function info. + void InstantiateFunction(Handle<SharedFunctionInfo> function_info); // Support for type checks. void GenerateIsSmi(ZoneList<Expression*>* args); @@ -660,6 +660,46 @@ class StringCompareStub: public CodeStub { }; +// This stub can convert a signed int32 to a heap number (double). It does +// not work for int32s that are in Smi range! No GC occurs during this stub +// so you don't have to set up the frame. +class WriteInt32ToHeapNumberStub : public CodeStub { + public: + WriteInt32ToHeapNumberStub(Register the_int, + Register the_heap_number, + Register scratch) + : the_int_(the_int), + the_heap_number_(the_heap_number), + scratch_(scratch) { } + + private: + Register the_int_; + Register the_heap_number_; + Register scratch_; + + // Minor key encoding in 16 bits. + class IntRegisterBits: public BitField<int, 0, 4> {}; + class HeapNumberRegisterBits: public BitField<int, 4, 4> {}; + class ScratchRegisterBits: public BitField<int, 8, 4> {}; + + Major MajorKey() { return WriteInt32ToHeapNumber; } + int MinorKey() { + // Encode the parameters in a unique 16 bit value. + return IntRegisterBits::encode(the_int_.code()) + | HeapNumberRegisterBits::encode(the_heap_number_.code()) + | ScratchRegisterBits::encode(scratch_.code()); + } + + void Generate(MacroAssembler* masm); + + const char* GetName() { return "WriteInt32ToHeapNumberStub"; } + +#ifdef DEBUG + void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); } +#endif +}; + + class NumberToStringStub: public CodeStub { public: NumberToStringStub() { } diff --git a/deps/v8/src/arm/constants-arm.cc b/deps/v8/src/arm/constants-arm.cc index 89ff7c0857..2e371207e0 100644 --- a/deps/v8/src/arm/constants-arm.cc +++ b/deps/v8/src/arm/constants-arm.cc @@ -81,9 +81,27 @@ const char* VFPRegisters::names_[kNumVFPRegisters] = { }; -const char* VFPRegisters::Name(int reg) { +const char* VFPRegisters::Name(int reg, bool is_double) { ASSERT((0 <= reg) && (reg < kNumVFPRegisters)); - return names_[reg]; + return names_[reg + is_double ? kNumVFPSingleRegisters : 0]; +} + + +int VFPRegisters::Number(const char* name, bool* is_double) { + for (int i = 0; i < kNumVFPRegisters; i++) { + if (strcmp(names_[i], name) == 0) { + if (i < kNumVFPSingleRegisters) { + *is_double = false; + return i; + } else { + *is_double = true; + return i - kNumVFPSingleRegisters; + } + } + } + + // No register with the requested name found. + return kNoRegister; } @@ -104,7 +122,7 @@ int Registers::Number(const char* name) { i++; } - // No register with the reguested name found. + // No register with the requested name found. return kNoRegister; } diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h index 2b883f3b34..36d2fb67b6 100644 --- a/deps/v8/src/arm/constants-arm.h +++ b/deps/v8/src/arm/constants-arm.h @@ -84,7 +84,10 @@ namespace arm { static const int kNumRegisters = 16; // VFP support. -static const int kNumVFPRegisters = 48; +static const int kNumVFPSingleRegisters = 32; +static const int kNumVFPDoubleRegisters = 16; +static const int kNumVFPRegisters = + kNumVFPSingleRegisters + kNumVFPDoubleRegisters; // PC is register 15. static const int kPCRegister = 15; @@ -254,6 +257,14 @@ class Instr { inline int RtField() const { return Bits(15, 12); } inline int PField() const { return Bit(24); } inline int UField() const { return Bit(23); } + inline int Opc1Field() const { return (Bit(23) << 2) | Bits(21, 20); } + inline int Opc2Field() const { return Bits(19, 16); } + inline int Opc3Field() const { return Bits(7, 6); } + inline int SzField() const { return Bit(8); } + inline int VLField() const { return Bit(20); } + inline int VCField() const { return Bit(8); } + inline int VAField() const { return Bits(23, 21); } + inline int VBField() const { return Bits(6, 5); } // Fields used in Data processing instructions inline Opcode OpcodeField() const { @@ -344,7 +355,12 @@ class Registers { class VFPRegisters { public: // Return the name of the register. - static const char* Name(int reg); + static const char* Name(int reg, bool is_double); + + // Lookup the register number for the name provided. + // Set flag pointed by is_double to true if register + // is double-precision. + static int Number(const char* name, bool* is_double); private: static const char* names_[kNumVFPRegisters]; diff --git a/deps/v8/src/arm/cpu-arm.cc b/deps/v8/src/arm/cpu-arm.cc index 4e39cdaff3..55f31d46f2 100644 --- a/deps/v8/src/arm/cpu-arm.cc +++ b/deps/v8/src/arm/cpu-arm.cc @@ -122,7 +122,7 @@ void CPU::FlushICache(void* start, size_t size) { void CPU::DebugBreak() { -#if !defined (__arm__) +#if !defined (__arm__) || !defined(CAN_USE_ARMV5_INSTRUCTIONS) UNIMPLEMENTED(); // when building ARM emulator target #else asm volatile("bkpt 0"); diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc index 127c160863..8e1776d98c 100644 --- a/deps/v8/src/arm/disasm-arm.cc +++ b/deps/v8/src/arm/disasm-arm.cc @@ -129,6 +129,10 @@ class Decoder { void DecodeTypeVFP(Instr* instr); void DecodeType6CoprocessorIns(Instr* instr); + void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr); + void DecodeVCMP(Instr* instr); + void DecodeVCVTBetweenDoubleAndSingle(Instr* instr); + void DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr); const disasm::NameConverter& converter_; v8::internal::Vector<char> out_buffer_; @@ -181,12 +185,12 @@ void Decoder::PrintRegister(int reg) { // Print the VFP S register name according to the active name converter. void Decoder::PrintSRegister(int reg) { - Print(assembler::arm::VFPRegisters::Name(reg)); + Print(assembler::arm::VFPRegisters::Name(reg, false)); } // Print the VFP D register name according to the active name converter. void Decoder::PrintDRegister(int reg) { - Print(assembler::arm::VFPRegisters::Name(reg + 32)); + Print(assembler::arm::VFPRegisters::Name(reg, true)); } @@ -930,87 +934,151 @@ void Decoder::DecodeUnconditional(Instr* instr) { // VMRS void Decoder::DecodeTypeVFP(Instr* instr) { ASSERT((instr->TypeField() == 7) && (instr->Bit(24) == 0x0) ); - - if (instr->Bit(23) == 1) { - if ((instr->Bits(21, 19) == 0x7) && - (instr->Bits(18, 16) == 0x5) && - (instr->Bits(11, 9) == 0x5) && - (instr->Bit(8) == 1) && - (instr->Bit(6) == 1) && - (instr->Bit(4) == 0)) { - Format(instr, "vcvt.s32.f64'cond 'Sd, 'Dm"); - } else if ((instr->Bits(21, 19) == 0x7) && - (instr->Bits(18, 16) == 0x0) && - (instr->Bits(11, 9) == 0x5) && - (instr->Bit(8) == 1) && - (instr->Bit(7) == 1) && - (instr->Bit(6) == 1) && - (instr->Bit(4) == 0)) { - Format(instr, "vcvt.f64.s32'cond 'Dd, 'Sm"); - } else if ((instr->Bit(21) == 0x0) && - (instr->Bit(20) == 0x0) && - (instr->Bits(11, 9) == 0x5) && - (instr->Bit(8) == 1) && - (instr->Bit(6) == 0) && - (instr->Bit(4) == 0)) { + ASSERT(instr->Bits(11, 9) == 0x5); + + if (instr->Bit(4) == 0) { + if (instr->Opc1Field() == 0x7) { + // Other data processing instructions + if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) { + DecodeVCVTBetweenDoubleAndSingle(instr); + } else if ((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) { + DecodeVCVTBetweenFloatingPointAndInteger(instr); + } else if (((instr->Opc2Field() >> 1) == 0x6) && + (instr->Opc3Field() & 0x1)) { + DecodeVCVTBetweenFloatingPointAndInteger(instr); + } else if (((instr->Opc2Field() == 0x4) || (instr->Opc2Field() == 0x5)) && + (instr->Opc3Field() & 0x1)) { + DecodeVCMP(instr); + } else { + Unknown(instr); // Not used by V8. + } + } else if (instr->Opc1Field() == 0x3) { + if (instr->SzField() == 0x1) { + if (instr->Opc3Field() & 0x1) { + Format(instr, "vsub.f64'cond 'Dd, 'Dn, 'Dm"); + } else { + Format(instr, "vadd.f64'cond 'Dd, 'Dn, 'Dm"); + } + } else { + Unknown(instr); // Not used by V8. + } + } else if ((instr->Opc1Field() == 0x2) && !(instr->Opc3Field() & 0x1)) { + if (instr->SzField() == 0x1) { + Format(instr, "vmul.f64'cond 'Dd, 'Dn, 'Dm"); + } else { + Unknown(instr); // Not used by V8. + } + } else if ((instr->Opc1Field() == 0x4) && !(instr->Opc3Field() & 0x1)) { + if (instr->SzField() == 0x1) { Format(instr, "vdiv.f64'cond 'Dd, 'Dn, 'Dm"); - } else if ((instr->Bits(21, 20) == 0x3) && - (instr->Bits(19, 16) == 0x4) && - (instr->Bits(11, 9) == 0x5) && - (instr->Bit(8) == 0x1) && - (instr->Bit(6) == 0x1) && - (instr->Bit(4) == 0x0)) { - Format(instr, "vcmp.f64'cond 'Dd, 'Dm"); - } else if ((instr->Bits(23, 20) == 0xF) && - (instr->Bits(19, 16) == 0x1) && - (instr->Bits(11, 8) == 0xA) && - (instr->Bits(7, 5) == 0x0) && - (instr->Bit(4) == 0x1) && - (instr->Bits(3, 0) == 0x0)) { - if (instr->Bits(15, 12) == 0xF) - Format(instr, "vmrs'cond APSR, FPSCR"); - else - Unknown(instr); // Not used by V8. + } else { + Unknown(instr); // Not used by V8. + } } else { Unknown(instr); // Not used by V8. } - } else if (instr->Bit(21) == 1) { - if ((instr->Bit(20) == 0x1) && - (instr->Bits(11, 9) == 0x5) && - (instr->Bit(8) == 0x1) && - (instr->Bit(6) == 0) && - (instr->Bit(4) == 0)) { - Format(instr, "vadd.f64'cond 'Dd, 'Dn, 'Dm"); - } else if ((instr->Bit(20) == 0x1) && - (instr->Bits(11, 9) == 0x5) && - (instr->Bit(8) == 0x1) && - (instr->Bit(6) == 1) && - (instr->Bit(4) == 0)) { - Format(instr, "vsub.f64'cond 'Dd, 'Dn, 'Dm"); - } else if ((instr->Bit(20) == 0x0) && - (instr->Bits(11, 9) == 0x5) && - (instr->Bit(8) == 0x1) && - (instr->Bit(6) == 0) && - (instr->Bit(4) == 0)) { - Format(instr, "vmul.f64'cond 'Dd, 'Dn, 'Dm"); + } else { + if ((instr->VCField() == 0x0) && + (instr->VAField() == 0x0)) { + DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr); + } else if ((instr->VLField() == 0x1) && + (instr->VCField() == 0x0) && + (instr->VAField() == 0x7) && + (instr->Bits(19, 16) == 0x1)) { + if (instr->Bits(15, 12) == 0xF) + Format(instr, "vmrs'cond APSR, FPSCR"); + else + Unknown(instr); // Not used by V8. } else { Unknown(instr); // Not used by V8. } + } +} + + +void Decoder::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr) { + ASSERT((instr->Bit(4) == 1) && (instr->VCField() == 0x0) && + (instr->VAField() == 0x0)); + + bool to_arm_register = (instr->VLField() == 0x1); + + if (to_arm_register) { + Format(instr, "vmov'cond 'rt, 'Sn"); + } else { + Format(instr, "vmov'cond 'Sn, 'rt"); + } +} + + +void Decoder::DecodeVCMP(Instr* instr) { + ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7)); + ASSERT(((instr->Opc2Field() == 0x4) || (instr->Opc2Field() == 0x5)) && + (instr->Opc3Field() & 0x1)); + + // Comparison. + bool dp_operation = (instr->SzField() == 1); + bool raise_exception_for_qnan = (instr->Bit(7) == 0x1); + + if (dp_operation && !raise_exception_for_qnan) { + Format(instr, "vcmp.f64'cond 'Dd, 'Dm"); + } else { + Unknown(instr); // Not used by V8. + } +} + + +void Decoder::DecodeVCVTBetweenDoubleAndSingle(Instr* instr) { + ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7)); + ASSERT((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)); + + bool double_to_single = (instr->SzField() == 1); + + if (double_to_single) { + Format(instr, "vcvt.f32.f64'cond 'Sd, 'Dm"); + } else { + Format(instr, "vcvt.f64.f32'cond 'Dd, 'Sm"); + } +} + + +void Decoder::DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr) { + ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7)); + ASSERT(((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) || + (((instr->Opc2Field() >> 1) == 0x6) && (instr->Opc3Field() & 0x1))); + + bool to_integer = (instr->Bit(18) == 1); + bool dp_operation = (instr->SzField() == 1); + if (to_integer) { + bool unsigned_integer = (instr->Bit(16) == 0); + + if (dp_operation) { + if (unsigned_integer) { + Format(instr, "vcvt.u32.f64'cond 'Sd, 'Dm"); + } else { + Format(instr, "vcvt.s32.f64'cond 'Sd, 'Dm"); + } + } else { + if (unsigned_integer) { + Format(instr, "vcvt.u32.f32'cond 'Sd, 'Sm"); + } else { + Format(instr, "vcvt.s32.f32'cond 'Sd, 'Sm"); + } + } } else { - if ((instr->Bit(20) == 0x0) && - (instr->Bits(11, 8) == 0xA) && - (instr->Bits(6, 5) == 0x0) && - (instr->Bit(4) == 1) && - (instr->Bits(3, 0) == 0x0)) { - Format(instr, "vmov'cond 'Sn, 'rt"); - } else if ((instr->Bit(20) == 0x1) && - (instr->Bits(11, 8) == 0xA) && - (instr->Bits(6, 5) == 0x0) && - (instr->Bit(4) == 1) && - (instr->Bits(3, 0) == 0x0)) { - Format(instr, "vmov'cond 'rt, 'Sn"); + bool unsigned_integer = (instr->Bit(7) == 0); + + if (dp_operation) { + if (unsigned_integer) { + Format(instr, "vcvt.f64.u32'cond 'Dd, 'Sm"); + } else { + Format(instr, "vcvt.f64.s32'cond 'Dd, 'Sm"); + } } else { - Unknown(instr); // Not used by V8. + if (unsigned_integer) { + Format(instr, "vcvt.f32.u32'cond 'Sd, 'Sm"); + } else { + Format(instr, "vcvt.f32.s32'cond 'Sd, 'Sm"); + } } } } @@ -1024,9 +1092,27 @@ void Decoder::DecodeTypeVFP(Instr* instr) { void Decoder::DecodeType6CoprocessorIns(Instr* instr) { ASSERT((instr->TypeField() == 6)); - if (instr->CoprocessorField() != 0xB) { - Unknown(instr); // Not used by V8. - } else { + if (instr->CoprocessorField() == 0xA) { + switch (instr->OpcodeField()) { + case 0x8: + if (instr->HasL()) { + Format(instr, "vldr'cond 'Sd, ['rn - 4*'off8]"); + } else { + Format(instr, "vstr'cond 'Sd, ['rn - 4*'off8]"); + } + break; + case 0xC: + if (instr->HasL()) { + Format(instr, "vldr'cond 'Sd, ['rn + 4*'off8]"); + } else { + Format(instr, "vstr'cond 'Sd, ['rn + 4*'off8]"); + } + break; + default: + Unknown(instr); // Not used by V8. + break; + } + } else if (instr->CoprocessorField() == 0xB) { switch (instr->OpcodeField()) { case 0x2: // Load and store double to two GP registers @@ -1056,6 +1142,8 @@ void Decoder::DecodeType6CoprocessorIns(Instr* instr) { Unknown(instr); // Not used by V8. break; } + } else { + UNIMPLEMENTED(); // Not used by V8. } } diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc index a70cf44f80..cc9e70b851 100644 --- a/deps/v8/src/arm/full-codegen-arm.cc +++ b/deps/v8/src/arm/full-codegen-arm.cc @@ -667,14 +667,12 @@ void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) { Comment cmnt(masm_, "[ FunctionLiteral"); // Build the function boilerplate and instantiate it. - Handle<JSFunction> boilerplate = - Compiler::BuildBoilerplate(expr, script(), this); + Handle<SharedFunctionInfo> function_info = + Compiler::BuildFunctionInfo(expr, script(), this); if (HasStackOverflow()) return; - ASSERT(boilerplate->IsBoilerplate()); - // Create a new closure. - __ mov(r0, Operand(boilerplate)); + __ mov(r0, Operand(function_info)); __ stm(db_w, sp, cp.bit() | r0.bit()); __ CallRuntime(Runtime::kNewClosure, 2); Apply(context_, r0); diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc index e68a77a0fc..2259aea3aa 100644 --- a/deps/v8/src/arm/ic-arm.cc +++ b/deps/v8/src/arm/ic-arm.cc @@ -42,7 +42,6 @@ namespace internal { #define __ ACCESS_MASM(masm) - // Helper function used from LoadIC/CallIC GenerateNormal. static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss, @@ -145,25 +144,6 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, } -// Helper function used to check that a value is either not an object -// or is loaded if it is an object. -static void GenerateCheckNonObjectOrLoaded(MacroAssembler* masm, - Label* miss, - Register value, - Register scratch) { - Label done; - // Check if the value is a Smi. - __ tst(value, Operand(kSmiTagMask)); - __ b(eq, &done); - // Check if the object has been loaded. - __ ldr(scratch, FieldMemOperand(value, JSObject::kMapOffset)); - __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitField2Offset)); - __ tst(scratch, Operand(1 << Map::kNeedsLoading)); - __ b(ne, miss); - __ bind(&done); -} - - void LoadIC::GenerateArrayLength(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- r2 : name @@ -292,12 +272,6 @@ static void GenerateNormalHelper(MacroAssembler* masm, __ CompareObjectType(r1, r0, r0, JS_FUNCTION_TYPE); __ b(ne, miss); - // Check that the function has been loaded. - __ ldr(r0, FieldMemOperand(r1, JSObject::kMapOffset)); - __ ldrb(r0, FieldMemOperand(r0, Map::kBitField2Offset)); - __ tst(r0, Operand(1 << Map::kNeedsLoading)); - __ b(ne, miss); - // Patch the receiver with the global proxy if necessary. if (is_global_object) { __ ldr(r0, MemOperand(sp, argc * kPointerSize)); @@ -469,7 +443,6 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) { __ bind(&probe); GenerateDictionaryLoad(masm, &miss, r1, r0); - GenerateCheckNonObjectOrLoaded(masm, &miss, r0, r1); __ Ret(); // Global object access: Check access rights. @@ -557,7 +530,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { // -- sp[0] : key // -- sp[4] : receiver // ----------------------------------- - Label slow, fast; + Label slow, fast, check_pixel_array; // Get the key and receiver object from the stack. __ ldm(ia, sp, r0.bit() | r1.bit()); @@ -595,6 +568,19 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ cmp(r0, Operand(r3)); __ b(lo, &fast); + // Check whether the elements is a pixel array. + __ bind(&check_pixel_array); + __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex); + __ cmp(r3, ip); + __ b(ne, &slow); + __ ldr(ip, FieldMemOperand(r1, PixelArray::kLengthOffset)); + __ cmp(r0, ip); + __ b(hs, &slow); + __ ldr(ip, FieldMemOperand(r1, PixelArray::kExternalPointerOffset)); + __ ldrb(r0, MemOperand(ip, r0)); + __ mov(r0, Operand(r0, LSL, kSmiTagSize)); // Tag result as smi. + __ Ret(); + // Slow case: Push extra copies of the arguments (2). __ bind(&slow); __ IncrementCounter(&Counters::keyed_load_generic_slow, 1, r0, r1); @@ -625,10 +611,283 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) { } +// Convert unsigned integer with specified number of leading zeroes in binary +// representation to IEEE 754 double. +// Integer to convert is passed in register hiword. +// Resulting double is returned in registers hiword:loword. +// This functions does not work correctly for 0. +static void GenerateUInt2Double(MacroAssembler* masm, + Register hiword, + Register loword, + Register scratch, + int leading_zeroes) { + const int meaningful_bits = kBitsPerInt - leading_zeroes - 1; + const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits; + + const int mantissa_shift_for_hi_word = + meaningful_bits - HeapNumber::kMantissaBitsInTopWord; + + const int mantissa_shift_for_lo_word = + kBitsPerInt - mantissa_shift_for_hi_word; + + __ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift)); + if (mantissa_shift_for_hi_word > 0) { + __ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word)); + __ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word)); + } else { + __ mov(loword, Operand(0)); + __ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word)); + } + + // If least significant bit of biased exponent was not 1 it was corrupted + // by most significant bit of mantissa so we should fix that. + if (!(biased_exponent & 1)) { + __ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift)); + } +} + + void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm, ExternalArrayType array_type) { - // TODO(476): port specialized code. - GenerateGeneric(masm); + // ---------- S t a t e -------------- + // -- lr : return address + // -- sp[0] : key + // -- sp[4] : receiver + // ----------------------------------- + Label slow, failed_allocation; + + // Get the key and receiver object from the stack. + __ ldm(ia, sp, r0.bit() | r1.bit()); + + // r0: key + // r1: receiver object + + // Check that the object isn't a smi + __ BranchOnSmi(r1, &slow); + + // Check that the key is a smi. + __ BranchOnNotSmi(r0, &slow); + + // Check that the object is a JS object. Load map into r2. + __ CompareObjectType(r1, r2, r3, FIRST_JS_OBJECT_TYPE); + __ b(lt, &slow); + + // Check that the receiver does not require access checks. We need + // to check this explicitly since this generic stub does not perform + // map checks. + __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset)); + __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded)); + __ b(ne, &slow); + + // Check that the elements array is the appropriate type of + // ExternalArray. + // r0: index (as a smi) + // r1: JSObject + __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset)); + __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); + __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type)); + __ cmp(r2, ip); + __ b(ne, &slow); + + // Check that the index is in range. + __ ldr(ip, FieldMemOperand(r1, ExternalArray::kLengthOffset)); + __ cmp(r1, Operand(r0, ASR, kSmiTagSize)); + // Unsigned comparison catches both negative and too-large values. + __ b(lo, &slow); + + // r0: index (smi) + // r1: elements array + __ ldr(r1, FieldMemOperand(r1, ExternalArray::kExternalPointerOffset)); + // r1: base pointer of external storage + + // We are not untagging smi key and instead work with it + // as if it was premultiplied by 2. + ASSERT((kSmiTag == 0) && (kSmiTagSize == 1)); + + switch (array_type) { + case kExternalByteArray: + __ ldrsb(r0, MemOperand(r1, r0, LSR, 1)); + break; + case kExternalUnsignedByteArray: + __ ldrb(r0, MemOperand(r1, r0, LSR, 1)); + break; + case kExternalShortArray: + __ ldrsh(r0, MemOperand(r1, r0, LSL, 0)); + break; + case kExternalUnsignedShortArray: + __ ldrh(r0, MemOperand(r1, r0, LSL, 0)); + break; + case kExternalIntArray: + case kExternalUnsignedIntArray: + __ ldr(r0, MemOperand(r1, r0, LSL, 1)); + break; + case kExternalFloatArray: + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + __ add(r0, r1, Operand(r0, LSL, 1)); + __ vldr(s0, r0, 0); + } else { + __ ldr(r0, MemOperand(r1, r0, LSL, 1)); + } + break; + default: + UNREACHABLE(); + break; + } + + // For integer array types: + // r0: value + // For floating-point array type + // s0: value (if VFP3 is supported) + // r0: value (if VFP3 is not supported) + + if (array_type == kExternalIntArray) { + // For the Int and UnsignedInt array types, we need to see whether + // the value can be represented in a Smi. If not, we need to convert + // it to a HeapNumber. + Label box_int; + __ cmp(r0, Operand(0xC0000000)); + __ b(mi, &box_int); + __ mov(r0, Operand(r0, LSL, kSmiTagSize)); + __ Ret(); + + __ bind(&box_int); + + __ mov(r1, r0); + // Allocate a HeapNumber for the int and perform int-to-double + // conversion. + __ AllocateHeapNumber(r0, r3, r4, &slow); + + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + __ vmov(s0, r1); + __ vcvt_f64_s32(d0, s0); + __ sub(r1, r0, Operand(kHeapObjectTag)); + __ vstr(d0, r1, HeapNumber::kValueOffset); + __ Ret(); + } else { + WriteInt32ToHeapNumberStub stub(r1, r0, r3); + __ TailCallStub(&stub); + } + } else if (array_type == kExternalUnsignedIntArray) { + // The test is different for unsigned int values. Since we need + // the value to be in the range of a positive smi, we can't + // handle either of the top two bits being set in the value. + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + Label box_int, done; + __ tst(r0, Operand(0xC0000000)); + __ b(ne, &box_int); + + __ mov(r0, Operand(r0, LSL, kSmiTagSize)); + __ Ret(); + + __ bind(&box_int); + __ vmov(s0, r0); + __ AllocateHeapNumber(r0, r1, r2, &slow); + + __ vcvt_f64_u32(d0, s0); + __ sub(r1, r0, Operand(kHeapObjectTag)); + __ vstr(d0, r1, HeapNumber::kValueOffset); + __ Ret(); + } else { + // Check whether unsigned integer fits into smi. + Label box_int_0, box_int_1, done; + __ tst(r0, Operand(0x80000000)); + __ b(ne, &box_int_0); + __ tst(r0, Operand(0x40000000)); + __ b(ne, &box_int_1); + + // Tag integer as smi and return it. + __ mov(r0, Operand(r0, LSL, kSmiTagSize)); + __ Ret(); + + __ bind(&box_int_0); + // Integer does not have leading zeros. + GenerateUInt2Double(masm, r0, r1, r2, 0); + __ b(&done); + + __ bind(&box_int_1); + // Integer has one leading zero. + GenerateUInt2Double(masm, r0, r1, r2, 1); + + __ bind(&done); + // Integer was converted to double in registers r0:r1. + // Wrap it into a HeapNumber. + __ AllocateHeapNumber(r2, r3, r5, &slow); + + __ str(r0, FieldMemOperand(r2, HeapNumber::kExponentOffset)); + __ str(r1, FieldMemOperand(r2, HeapNumber::kMantissaOffset)); + + __ mov(r0, r2); + + __ Ret(); + } + } else if (array_type == kExternalFloatArray) { + // For the floating-point array type, we need to always allocate a + // HeapNumber. + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + __ AllocateHeapNumber(r0, r1, r2, &slow); + __ vcvt_f64_f32(d0, s0); + __ sub(r1, r0, Operand(kHeapObjectTag)); + __ vstr(d0, r1, HeapNumber::kValueOffset); + __ Ret(); + } else { + __ AllocateHeapNumber(r3, r1, r2, &slow); + // VFP is not available, do manual single to double conversion. + + // r0: floating point value (binary32) + + // Extract mantissa to r1. + __ and_(r1, r0, Operand(kBinary32MantissaMask)); + + // Extract exponent to r2. + __ mov(r2, Operand(r0, LSR, kBinary32MantissaBits)); + __ and_(r2, r2, Operand(kBinary32ExponentMask >> kBinary32MantissaBits)); + + Label exponent_rebiased; + __ teq(r2, Operand(0x00)); + __ b(eq, &exponent_rebiased); + + __ teq(r2, Operand(0xff)); + __ mov(r2, Operand(0x7ff), LeaveCC, eq); + __ b(eq, &exponent_rebiased); + + // Rebias exponent. + __ add(r2, + r2, + Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias)); + + __ bind(&exponent_rebiased); + __ and_(r0, r0, Operand(kBinary32SignMask)); + __ orr(r0, r0, Operand(r2, LSL, HeapNumber::kMantissaBitsInTopWord)); + + // Shift mantissa. + static const int kMantissaShiftForHiWord = + kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord; + + static const int kMantissaShiftForLoWord = + kBitsPerInt - kMantissaShiftForHiWord; + + __ orr(r0, r0, Operand(r1, LSR, kMantissaShiftForHiWord)); + __ mov(r1, Operand(r1, LSL, kMantissaShiftForLoWord)); + + __ str(r0, FieldMemOperand(r3, HeapNumber::kExponentOffset)); + __ str(r1, FieldMemOperand(r3, HeapNumber::kMantissaOffset)); + __ mov(r0, r3); + __ Ret(); + } + + } else { + __ mov(r0, Operand(r0, LSL, kSmiTagSize)); + __ Ret(); + } + + // Slow case: Load name and receiver from stack and jump to runtime. + __ bind(&slow); + __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1, r0, r1); + GenerateRuntimeGetProperty(masm); } @@ -709,7 +968,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { // -- sp[0] : key // -- sp[1] : receiver // ----------------------------------- - Label slow, fast, array, extra, exit; + Label slow, fast, array, extra, exit, check_pixel_array; // Get the key and the object from the stack. __ ldm(ia, sp, r1.bit() | r3.bit()); // r1 = key, r3 = receiver @@ -742,7 +1001,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); __ cmp(r2, ip); - __ b(ne, &slow); + __ b(ne, &check_pixel_array); // Untag the key (for checking against untagged length in the fixed array). __ mov(r1, Operand(r1, ASR, kSmiTagSize)); // Compute address to store into and check array bounds. @@ -757,6 +1016,37 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { __ bind(&slow); GenerateRuntimeSetProperty(masm); + // Check whether the elements is a pixel array. + // r0: value + // r1: index (as a smi), zero-extended. + // r3: elements array + __ bind(&check_pixel_array); + __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex); + __ cmp(r2, ip); + __ b(ne, &slow); + // Check that the value is a smi. If a conversion is needed call into the + // runtime to convert and clamp. + __ BranchOnNotSmi(r0, &slow); + __ mov(r1, Operand(r1, ASR, kSmiTagSize)); // Untag the key. + __ ldr(ip, FieldMemOperand(r3, PixelArray::kLengthOffset)); + __ cmp(r1, Operand(ip)); + __ b(hs, &slow); + __ mov(r4, r0); // Save the value. + __ mov(r0, Operand(r0, ASR, kSmiTagSize)); // Untag the value. + { // Clamp the value to [0..255]. + Label done; + __ tst(r0, Operand(0xFFFFFF00)); + __ b(eq, &done); + __ mov(r0, Operand(0), LeaveCC, mi); // 0 if negative. + __ mov(r0, Operand(255), LeaveCC, pl); // 255 if positive. + __ bind(&done); + } + __ ldr(r2, FieldMemOperand(r3, PixelArray::kExternalPointerOffset)); + __ strb(r0, MemOperand(r2, r1)); + __ mov(r0, Operand(r4)); // Return the original value. + __ Ret(); + + // Extra capacity case: Check if there is extra capacity to // perform the store and update the length. Used for adding one // element to the array by writing to array[array.length]. @@ -819,10 +1109,376 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { } +// Convert int passed in register ival to IEE 754 single precision +// floating point value and store it into register fval. +// If VFP3 is available use it for conversion. +static void ConvertIntToFloat(MacroAssembler* masm, + Register ival, + Register fval, + Register scratch1, + Register scratch2) { + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + __ vmov(s0, ival); + __ vcvt_f32_s32(s0, s0); + __ vmov(fval, s0); + } else { + Label not_special, done; + // Move sign bit from source to destination. This works because the sign + // bit in the exponent word of the double has the same position and polarity + // as the 2's complement sign bit in a Smi. + ASSERT(kBinary32SignMask == 0x80000000u); + + __ and_(fval, ival, Operand(kBinary32SignMask), SetCC); + // Negate value if it is negative. + __ rsb(ival, ival, Operand(0), LeaveCC, ne); + + // We have -1, 0 or 1, which we treat specially. Register ival contains + // absolute value: it is either equal to 1 (special case of -1 and 1), + // greater than 1 (not a special case) or less than 1 (special case of 0). + __ cmp(ival, Operand(1)); + __ b(gt, ¬_special); + + // For 1 or -1 we need to or in the 0 exponent (biased). + static const uint32_t exponent_word_for_1 = + kBinary32ExponentBias << kBinary32ExponentShift; + + __ orr(fval, fval, Operand(exponent_word_for_1), LeaveCC, eq); + __ b(&done); + + __ bind(¬_special); + // Count leading zeros. + // Gets the wrong answer for 0, but we already checked for that case above. + Register zeros = scratch2; + __ CountLeadingZeros(ival, scratch1, zeros); + + // Compute exponent and or it into the exponent register. + __ rsb(scratch1, + zeros, + Operand((kBitsPerInt - 1) + kBinary32ExponentBias)); + + __ orr(fval, + fval, + Operand(scratch1, LSL, kBinary32ExponentShift)); + + // Shift up the source chopping the top bit off. + __ add(zeros, zeros, Operand(1)); + // This wouldn't work for 1 and -1 as the shift would be 32 which means 0. + __ mov(ival, Operand(ival, LSL, zeros)); + // And the top (top 20 bits). + __ orr(fval, + fval, + Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits)); + + __ bind(&done); + } +} + + +static bool IsElementTypeSigned(ExternalArrayType array_type) { + switch (array_type) { + case kExternalByteArray: + case kExternalShortArray: + case kExternalIntArray: + return true; + + case kExternalUnsignedByteArray: + case kExternalUnsignedShortArray: + case kExternalUnsignedIntArray: + return false; + + default: + UNREACHABLE(); + return false; + } +} + + void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm, ExternalArrayType array_type) { - // TODO(476): port specialized code. - GenerateGeneric(masm); + // ---------- S t a t e -------------- + // -- r0 : value + // -- lr : return address + // -- sp[0] : key + // -- sp[1] : receiver + // ----------------------------------- + Label slow, check_heap_number; + + // Get the key and the object from the stack. + __ ldm(ia, sp, r1.bit() | r2.bit()); // r1 = key, r2 = receiver + + // Check that the object isn't a smi. + __ BranchOnSmi(r2, &slow); + + // Check that the object is a JS object. Load map into r3 + __ CompareObjectType(r2, r3, r4, FIRST_JS_OBJECT_TYPE); + __ b(le, &slow); + + // Check that the receiver does not require access checks. We need + // to do this because this generic stub does not perform map checks. + __ ldrb(ip, FieldMemOperand(r3, Map::kBitFieldOffset)); + __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded)); + __ b(ne, &slow); + + // Check that the key is a smi. + __ BranchOnNotSmi(r1, &slow); + + // Check that the elements array is the appropriate type of + // ExternalArray. + // r0: value + // r1: index (smi) + // r2: object + __ ldr(r2, FieldMemOperand(r2, JSObject::kElementsOffset)); + __ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset)); + __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type)); + __ cmp(r3, ip); + __ b(ne, &slow); + + // Check that the index is in range. + __ mov(r1, Operand(r1, ASR, kSmiTagSize)); // Untag the index. + __ ldr(ip, FieldMemOperand(r2, ExternalArray::kLengthOffset)); + __ cmp(r1, ip); + // Unsigned comparison catches both negative and too-large values. + __ b(hs, &slow); + + // Handle both smis and HeapNumbers in the fast path. Go to the + // runtime for all other kinds of values. + // r0: value + // r1: index (integer) + // r2: array + __ BranchOnNotSmi(r0, &check_heap_number); + __ mov(r3, Operand(r0, ASR, kSmiTagSize)); // Untag the value. + __ ldr(r2, FieldMemOperand(r2, ExternalArray::kExternalPointerOffset)); + + // r1: index (integer) + // r2: base pointer of external storage + // r3: value (integer) + switch (array_type) { + case kExternalByteArray: + case kExternalUnsignedByteArray: + __ strb(r3, MemOperand(r2, r1, LSL, 0)); + break; + case kExternalShortArray: + case kExternalUnsignedShortArray: + __ strh(r3, MemOperand(r2, r1, LSL, 1)); + break; + case kExternalIntArray: + case kExternalUnsignedIntArray: + __ str(r3, MemOperand(r2, r1, LSL, 2)); + break; + case kExternalFloatArray: + // Need to perform int-to-float conversion. + ConvertIntToFloat(masm, r3, r4, r5, r6); + __ str(r4, MemOperand(r2, r1, LSL, 2)); + break; + default: + UNREACHABLE(); + break; + } + + // r0: value + __ Ret(); + + + // r0: value + // r1: index (integer) + // r2: external array object + __ bind(&check_heap_number); + __ CompareObjectType(r0, r3, r4, HEAP_NUMBER_TYPE); + __ b(ne, &slow); + + __ ldr(r2, FieldMemOperand(r2, ExternalArray::kExternalPointerOffset)); + + // The WebGL specification leaves the behavior of storing NaN and + // +/-Infinity into integer arrays basically undefined. For more + // reproducible behavior, convert these to zero. + if (CpuFeatures::IsSupported(VFP3)) { + CpuFeatures::Scope scope(VFP3); + + // vldr requires offset to be a multiple of 4 so we can not + // include -kHeapObjectTag into it. + __ sub(r3, r0, Operand(kHeapObjectTag)); + __ vldr(d0, r3, HeapNumber::kValueOffset); + + if (array_type == kExternalFloatArray) { + __ vcvt_f32_f64(s0, d0); + __ vmov(r3, s0); + __ str(r3, MemOperand(r2, r1, LSL, 2)); + } else { + Label done; + + // Need to perform float-to-int conversion. + // Test for NaN. + __ vcmp(d0, d0); + // Move vector status bits to normal status bits. + __ vmrs(v8::internal::pc); + __ mov(r3, Operand(0), LeaveCC, vs); // NaN converts to 0 + __ b(vs, &done); + + // Test whether exponent equal to 0x7FF (infinity or NaN) + __ vmov(r4, r3, d0); + __ mov(r5, Operand(0x7FF00000)); + __ and_(r3, r3, Operand(r5)); + __ teq(r3, Operand(r5)); + __ mov(r3, Operand(0), LeaveCC, eq); + + // Not infinity or NaN simply convert to int + if (IsElementTypeSigned(array_type)) { + __ vcvt_s32_f64(s0, d0, ne); + } else { + __ vcvt_u32_f64(s0, d0, ne); + } + + __ vmov(r3, s0, ne); + + __ bind(&done); + switch (array_type) { + case kExternalByteArray: + case kExternalUnsignedByteArray: + __ strb(r3, MemOperand(r2, r1, LSL, 0)); + break; + case kExternalShortArray: + case kExternalUnsignedShortArray: + __ strh(r3, MemOperand(r2, r1, LSL, 1)); + break; + case kExternalIntArray: + case kExternalUnsignedIntArray: + __ str(r3, MemOperand(r2, r1, LSL, 2)); + break; + default: + UNREACHABLE(); + break; + } + } + + // r0: original value + __ Ret(); + } else { + // VFP3 is not available do manual conversions + __ ldr(r3, FieldMemOperand(r0, HeapNumber::kExponentOffset)); + __ ldr(r4, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); + + if (array_type == kExternalFloatArray) { + Label done, nan_or_infinity_or_zero; + static const int kMantissaInHiWordShift = + kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord; + + static const int kMantissaInLoWordShift = + kBitsPerInt - kMantissaInHiWordShift; + + // Test for all special exponent values: zeros, subnormal numbers, NaNs + // and infinities. All these should be converted to 0. + __ mov(r5, Operand(HeapNumber::kExponentMask)); + __ and_(r6, r3, Operand(r5), SetCC); + __ b(eq, &nan_or_infinity_or_zero); + + __ teq(r6, Operand(r5)); + __ mov(r6, Operand(kBinary32ExponentMask), LeaveCC, eq); + __ b(eq, &nan_or_infinity_or_zero); + + // Rebias exponent. + __ mov(r6, Operand(r6, LSR, HeapNumber::kExponentShift)); + __ add(r6, + r6, + Operand(kBinary32ExponentBias - HeapNumber::kExponentBias)); + + __ cmp(r6, Operand(kBinary32MaxExponent)); + __ and_(r3, r3, Operand(HeapNumber::kSignMask), LeaveCC, gt); + __ orr(r3, r3, Operand(kBinary32ExponentMask), LeaveCC, gt); + __ b(gt, &done); + + __ cmp(r6, Operand(kBinary32MinExponent)); + __ and_(r3, r3, Operand(HeapNumber::kSignMask), LeaveCC, lt); + __ b(lt, &done); + + __ and_(r7, r3, Operand(HeapNumber::kSignMask)); + __ and_(r3, r3, Operand(HeapNumber::kMantissaMask)); + __ orr(r7, r7, Operand(r3, LSL, kMantissaInHiWordShift)); + __ orr(r7, r7, Operand(r4, LSR, kMantissaInLoWordShift)); + __ orr(r3, r7, Operand(r6, LSL, kBinary32ExponentShift)); + + __ bind(&done); + __ str(r3, MemOperand(r2, r1, LSL, 2)); + __ Ret(); + + __ bind(&nan_or_infinity_or_zero); + __ and_(r7, r3, Operand(HeapNumber::kSignMask)); + __ and_(r3, r3, Operand(HeapNumber::kMantissaMask)); + __ orr(r6, r6, r7); + __ orr(r6, r6, Operand(r3, LSL, kMantissaInHiWordShift)); + __ orr(r3, r6, Operand(r4, LSR, kMantissaInLoWordShift)); + __ b(&done); + } else { + bool is_signed_type = IsElementTypeSigned(array_type); + int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt; + int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000; + + Label done, sign; + + // Test for all special exponent values: zeros, subnormal numbers, NaNs + // and infinities. All these should be converted to 0. + __ mov(r5, Operand(HeapNumber::kExponentMask)); + __ and_(r6, r3, Operand(r5), SetCC); + __ mov(r3, Operand(0), LeaveCC, eq); + __ b(eq, &done); + + __ teq(r6, Operand(r5)); + __ mov(r3, Operand(0), LeaveCC, eq); + __ b(eq, &done); + + // Unbias exponent. + __ mov(r6, Operand(r6, LSR, HeapNumber::kExponentShift)); + __ sub(r6, r6, Operand(HeapNumber::kExponentBias), SetCC); + // If exponent is negative than result is 0. + __ mov(r3, Operand(0), LeaveCC, mi); + __ b(mi, &done); + + // If exponent is too big than result is minimal value + __ cmp(r6, Operand(meaningfull_bits - 1)); + __ mov(r3, Operand(min_value), LeaveCC, ge); + __ b(ge, &done); + + __ and_(r5, r3, Operand(HeapNumber::kSignMask), SetCC); + __ and_(r3, r3, Operand(HeapNumber::kMantissaMask)); + __ orr(r3, r3, Operand(1u << HeapNumber::kMantissaBitsInTopWord)); + + __ rsb(r6, r6, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC); + __ mov(r3, Operand(r3, LSR, r6), LeaveCC, pl); + __ b(pl, &sign); + + __ rsb(r6, r6, Operand(0)); + __ mov(r3, Operand(r3, LSL, r6)); + __ rsb(r6, r6, Operand(meaningfull_bits)); + __ orr(r3, r3, Operand(r4, LSR, r6)); + + __ bind(&sign); + __ teq(r5, Operand(0)); + __ rsb(r3, r3, Operand(0), LeaveCC, ne); + + __ bind(&done); + switch (array_type) { + case kExternalByteArray: + case kExternalUnsignedByteArray: + __ strb(r3, MemOperand(r2, r1, LSL, 0)); + break; + case kExternalShortArray: + case kExternalUnsignedShortArray: + __ strh(r3, MemOperand(r2, r1, LSL, 1)); + break; + case kExternalIntArray: + case kExternalUnsignedIntArray: + __ str(r3, MemOperand(r2, r1, LSL, 2)); + break; + default: + UNREACHABLE(); + break; + } + } + } + + // Slow case: call runtime. + __ bind(&slow); + GenerateRuntimeSetProperty(masm); } diff --git a/deps/v8/src/arm/jump-target-arm.cc b/deps/v8/src/arm/jump-target-arm.cc index a84060d570..a13de0e2a6 100644 --- a/deps/v8/src/arm/jump-target-arm.cc +++ b/deps/v8/src/arm/jump-target-arm.cc @@ -173,14 +173,7 @@ void BreakTarget::Jump() { void BreakTarget::Jump(Result* arg) { - // On ARM we do not currently emit merge code for jumps, so we need to do - // it explicitly here. The only merging necessary is to drop extra - // statement state from the stack. - ASSERT(cgen()->has_valid_frame()); - int count = cgen()->frame()->height() - expected_height_; - cgen()->frame()->Drop(count); - cgen()->frame()->Push(arg); - DoJump(); + UNIMPLEMENTED(); } @@ -209,27 +202,7 @@ void BreakTarget::Bind() { void BreakTarget::Bind(Result* arg) { -#ifdef DEBUG - // All the forward-reaching frames should have been adjusted at the - // jumps to this target. - for (int i = 0; i < reaching_frames_.length(); i++) { - ASSERT(reaching_frames_[i] == NULL || - reaching_frames_[i]->height() == expected_height_ + 1); - } -#endif - // Drop leftover statement state from the frame before merging, even - // on the fall through. This is so we can bind the return target - // with state on the frame. - if (cgen()->has_valid_frame()) { - int count = cgen()->frame()->height() - expected_height_; - // On ARM we do not currently emit merge code at binding sites, so we need - // to do it explicitly here. The only merging necessary is to drop extra - // statement state from the stack. - cgen()->frame()->ForgetElements(count); - cgen()->frame()->Push(arg); - } - DoBind(); - *arg = cgen()->frame()->Pop(); + UNIMPLEMENTED(); } diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index 691c08c4b6..ac1c14fd96 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -1192,7 +1192,7 @@ void MacroAssembler::IntegerToDoubleConversionWithVFP3(Register inReg, // ARMv7 VFP3 instructions to implement integer to double conversion. mov(r7, Operand(inReg, ASR, kSmiTagSize)); vmov(s15, r7); - vcvt(d7, s15); + vcvt_f64_s32(d7, s15); vmov(outLowReg, outHighReg, d7); } @@ -1455,6 +1455,58 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first, } +// Allocates a heap number or jumps to the need_gc label if the young space +// is full and a scavenge is needed. +void MacroAssembler::AllocateHeapNumber(Register result, + Register scratch1, + Register scratch2, + Label* gc_required) { + // Allocate an object in the heap for the heap number and tag it as a heap + // object. + AllocateInNewSpace(HeapNumber::kSize / kPointerSize, + result, + scratch1, + scratch2, + gc_required, + TAG_OBJECT); + + // Get heap number map and store it in the allocated object. + LoadRoot(scratch1, Heap::kHeapNumberMapRootIndex); + str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset)); +} + + +void MacroAssembler::CountLeadingZeros(Register source, + Register scratch, + Register zeros) { +#ifdef CAN_USE_ARMV5_INSTRUCTIONS + clz(zeros, source); // This instruction is only supported after ARM5. +#else + mov(zeros, Operand(0)); + mov(scratch, source); + // Top 16. + tst(scratch, Operand(0xffff0000)); + add(zeros, zeros, Operand(16), LeaveCC, eq); + mov(scratch, Operand(scratch, LSL, 16), LeaveCC, eq); + // Top 8. + tst(scratch, Operand(0xff000000)); + add(zeros, zeros, Operand(8), LeaveCC, eq); + mov(scratch, Operand(scratch, LSL, 8), LeaveCC, eq); + // Top 4. + tst(scratch, Operand(0xf0000000)); + add(zeros, zeros, Operand(4), LeaveCC, eq); + mov(scratch, Operand(scratch, LSL, 4), LeaveCC, eq); + // Top 2. + tst(scratch, Operand(0xc0000000)); + add(zeros, zeros, Operand(2), LeaveCC, eq); + mov(scratch, Operand(scratch, LSL, 2), LeaveCC, eq); + // Top bit. + tst(scratch, Operand(0x80000000u)); + add(zeros, zeros, Operand(1), LeaveCC, eq); +#endif +} + + void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii( Register first, Register second, diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index 8c70d95873..1097bd9d14 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -239,6 +239,12 @@ class MacroAssembler: public Assembler { Register scratch2, Label* gc_required); + // Allocates a heap number or jumps to the need_gc label if the young space + // is full and a scavenge is needed. + void AllocateHeapNumber(Register result, + Register scratch1, + Register scratch2, + Label* gc_required); // --------------------------------------------------------------------------- // Support functions. @@ -319,6 +325,12 @@ class MacroAssembler: public Assembler { Register outHighReg, Register outLowReg); + // Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz + // instruction. On pre-ARM5 hardware this routine gives the wrong answer + // for 0 (31 instead of 32). + void CountLeadingZeros(Register source, + Register scratch, + Register zeros); // --------------------------------------------------------------------------- // Runtime calls diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc index cee5aea0d3..49b4a5b4ee 100644 --- a/deps/v8/src/arm/simulator-arm.cc +++ b/deps/v8/src/arm/simulator-arm.cc @@ -72,6 +72,8 @@ class Debugger { int32_t GetRegisterValue(int regnum); bool GetValue(const char* desc, int32_t* value); + bool GetVFPSingleValue(const char* desc, float* value); + bool GetVFPDoubleValue(const char* desc, double* value); // Set or delete a breakpoint. Returns true if successful. bool SetBreakpoint(Instr* breakpc); @@ -154,6 +156,28 @@ bool Debugger::GetValue(const char* desc, int32_t* value) { } +bool Debugger::GetVFPSingleValue(const char* desc, float* value) { + bool is_double; + int regnum = VFPRegisters::Number(desc, &is_double); + if (regnum != kNoRegister && !is_double) { + *value = sim_->get_float_from_s_register(regnum); + return true; + } + return false; +} + + +bool Debugger::GetVFPDoubleValue(const char* desc, double* value) { + bool is_double; + int regnum = VFPRegisters::Number(desc, &is_double); + if (regnum != kNoRegister && is_double) { + *value = sim_->get_double_from_d_register(regnum); + return true; + } + return false; +} + + bool Debugger::SetBreakpoint(Instr* breakpc) { // Check if a breakpoint can be set. If not return without any side-effects. if (sim_->break_pc_ != NULL) { @@ -249,6 +273,8 @@ void Debugger::Debug() { } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) { if (args == 2) { int32_t value; + float svalue; + double dvalue; if (strcmp(arg1, "all") == 0) { for (int i = 0; i < kNumRegisters; i++) { value = GetRegisterValue(i); @@ -257,6 +283,10 @@ void Debugger::Debug() { } else { if (GetValue(arg1, &value)) { PrintF("%s: 0x%08x %d \n", arg1, value, value); + } else if (GetVFPSingleValue(arg1, &svalue)) { + PrintF("%s: %f \n", arg1, svalue); + } else if (GetVFPDoubleValue(arg1, &dvalue)) { + PrintF("%s: %lf \n", arg1, dvalue); } else { PrintF("%s unrecognized\n", arg1); } @@ -1919,6 +1949,13 @@ void Simulator::DecodeUnconditional(Instr* instr) { } +// Depending on value of last_bit flag glue register code from vm and m values +// (where m is expected to be a single bit). +static int GlueRegCode(bool last_bit, int vm, int m) { + return last_bit ? ((vm << 1) | m) : ((m << 4) | vm); +} + + // void Simulator::DecodeTypeVFP(Instr* instr) // The Following ARMv7 VFPv instructions are currently supported. // vmov :Sn = Rt @@ -1933,114 +1970,212 @@ void Simulator::DecodeUnconditional(Instr* instr) { // VMRS void Simulator::DecodeTypeVFP(Instr* instr) { ASSERT((instr->TypeField() == 7) && (instr->Bit(24) == 0x0) ); + ASSERT(instr->Bits(11, 9) == 0x5); - int rt = instr->RtField(); int vm = instr->VmField(); - int vn = instr->VnField(); int vd = instr->VdField(); + int vn = instr->VnField(); + + if (instr->Bit(4) == 0) { + if (instr->Opc1Field() == 0x7) { + // Other data processing instructions + if ((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)) { + DecodeVCVTBetweenDoubleAndSingle(instr); + } else if ((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) { + DecodeVCVTBetweenFloatingPointAndInteger(instr); + } else if (((instr->Opc2Field() >> 1) == 0x6) && + (instr->Opc3Field() & 0x1)) { + DecodeVCVTBetweenFloatingPointAndInteger(instr); + } else if (((instr->Opc2Field() == 0x4) || (instr->Opc2Field() == 0x5)) && + (instr->Opc3Field() & 0x1)) { + DecodeVCMP(instr); + } else { + UNREACHABLE(); // Not used by V8. + } + } else if (instr->Opc1Field() == 0x3) { + if (instr->SzField() != 0x1) { + UNREACHABLE(); // Not used by V8. + } + + if (instr->Opc3Field() & 0x1) { + // vsub + double dn_value = get_double_from_d_register(vn); + double dm_value = get_double_from_d_register(vm); + double dd_value = dn_value - dm_value; + set_d_register_from_double(vd, dd_value); + } else { + // vadd + double dn_value = get_double_from_d_register(vn); + double dm_value = get_double_from_d_register(vm); + double dd_value = dn_value + dm_value; + set_d_register_from_double(vd, dd_value); + } + } else if ((instr->Opc1Field() == 0x2) && !(instr->Opc3Field() & 0x1)) { + // vmul + if (instr->SzField() != 0x1) { + UNREACHABLE(); // Not used by V8. + } - if (instr->Bit(23) == 1) { - if ((instr->Bits(21, 19) == 0x7) && - (instr->Bits(18, 16) == 0x5) && - (instr->Bits(11, 9) == 0x5) && - (instr->Bit(8) == 1) && - (instr->Bit(6) == 1) && - (instr->Bit(4) == 0)) { - double dm_val = get_double_from_d_register(vm); - int32_t int_value = static_cast<int32_t>(dm_val); - set_s_register_from_sinteger(((vd<<1) | instr->DField()), int_value); - } else if ((instr->Bits(21, 19) == 0x7) && - (instr->Bits(18, 16) == 0x0) && - (instr->Bits(11, 9) == 0x5) && - (instr->Bit(8) == 1) && - (instr->Bit(7) == 1) && - (instr->Bit(6) == 1) && - (instr->Bit(4) == 0)) { - int32_t int_value = get_sinteger_from_s_register(((vm<<1) | - instr->MField())); - double dbl_value = static_cast<double>(int_value); - set_d_register_from_double(vd, dbl_value); - } else if ((instr->Bit(21) == 0x0) && - (instr->Bit(20) == 0x0) && - (instr->Bits(11, 9) == 0x5) && - (instr->Bit(8) == 1) && - (instr->Bit(6) == 0) && - (instr->Bit(4) == 0)) { double dn_value = get_double_from_d_register(vn); double dm_value = get_double_from_d_register(vm); - double dd_value = dn_value / dm_value; + double dd_value = dn_value * dm_value; set_d_register_from_double(vd, dd_value); - } else if ((instr->Bits(21, 20) == 0x3) && - (instr->Bits(19, 16) == 0x4) && - (instr->Bits(11, 9) == 0x5) && - (instr->Bit(8) == 0x1) && - (instr->Bit(6) == 0x1) && - (instr->Bit(4) == 0x0)) { - double dd_value = get_double_from_d_register(vd); + } else if ((instr->Opc1Field() == 0x4) && !(instr->Opc3Field() & 0x1)) { + // vdiv + if (instr->SzField() != 0x1) { + UNREACHABLE(); // Not used by V8. + } + + double dn_value = get_double_from_d_register(vn); double dm_value = get_double_from_d_register(vm); - Compute_FPSCR_Flags(dd_value, dm_value); - } else if ((instr->Bits(23, 20) == 0xF) && - (instr->Bits(19, 16) == 0x1) && - (instr->Bits(11, 8) == 0xA) && - (instr->Bits(7, 5) == 0x0) && - (instr->Bit(4) == 0x1) && - (instr->Bits(3, 0) == 0x0)) { - if (instr->Bits(15, 12) == 0xF) + double dd_value = dn_value / dm_value; + set_d_register_from_double(vd, dd_value); + } else { + UNIMPLEMENTED(); // Not used by V8. + } + } else { + if ((instr->VCField() == 0x0) && + (instr->VAField() == 0x0)) { + DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr); + } else if ((instr->VLField() == 0x1) && + (instr->VCField() == 0x0) && + (instr->VAField() == 0x7) && + (instr->Bits(19, 16) == 0x1)) { + // vmrs + if (instr->RtField() == 0xF) Copy_FPSCR_to_APSR(); else UNIMPLEMENTED(); // Not used by V8. } else { UNIMPLEMENTED(); // Not used by V8. } - } else if (instr->Bit(21) == 1) { - if ((instr->Bit(20) == 0x1) && - (instr->Bits(11, 9) == 0x5) && - (instr->Bit(8) == 0x1) && - (instr->Bit(6) == 0) && - (instr->Bit(4) == 0)) { - double dn_value = get_double_from_d_register(vn); - double dm_value = get_double_from_d_register(vm); - double dd_value = dn_value + dm_value; - set_d_register_from_double(vd, dd_value); - } else if ((instr->Bit(20) == 0x1) && - (instr->Bits(11, 9) == 0x5) && - (instr->Bit(8) == 0x1) && - (instr->Bit(6) == 1) && - (instr->Bit(4) == 0)) { - double dn_value = get_double_from_d_register(vn); - double dm_value = get_double_from_d_register(vm); - double dd_value = dn_value - dm_value; - set_d_register_from_double(vd, dd_value); - } else if ((instr->Bit(20) == 0x0) && - (instr->Bits(11, 9) == 0x5) && - (instr->Bit(8) == 0x1) && - (instr->Bit(6) == 0) && - (instr->Bit(4) == 0)) { - double dn_value = get_double_from_d_register(vn); - double dm_value = get_double_from_d_register(vm); - double dd_value = dn_value * dm_value; - set_d_register_from_double(vd, dd_value); - } else { + } +} + + +void Simulator::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr) { + ASSERT((instr->Bit(4) == 1) && (instr->VCField() == 0x0) && + (instr->VAField() == 0x0)); + + int t = instr->RtField(); + int n = GlueRegCode(true, instr->VnField(), instr->NField()); + bool to_arm_register = (instr->VLField() == 0x1); + + if (to_arm_register) { + int32_t int_value = get_sinteger_from_s_register(n); + set_register(t, int_value); + } else { + int32_t rs_val = get_register(t); + set_s_register_from_sinteger(n, rs_val); + } +} + + +void Simulator::DecodeVCMP(Instr* instr) { + ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7)); + ASSERT(((instr->Opc2Field() == 0x4) || (instr->Opc2Field() == 0x5)) && + (instr->Opc3Field() & 0x1)); + + // Comparison. + bool dp_operation = (instr->SzField() == 1); + + if (instr->Bit(7) != 0) { + // Raising exceptions for quiet NaNs are not supported. + UNIMPLEMENTED(); // Not used by V8. + } + + int d = GlueRegCode(!dp_operation, instr->VdField(), instr->DField()); + int m = GlueRegCode(!dp_operation, instr->VmField(), instr->MField()); + + if (dp_operation) { + double dd_value = get_double_from_d_register(d); + double dm_value = get_double_from_d_register(m); + + Compute_FPSCR_Flags(dd_value, dm_value); + } else { + UNIMPLEMENTED(); // Not used by V8. + } +} + + +void Simulator::DecodeVCVTBetweenDoubleAndSingle(Instr* instr) { + ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7)); + ASSERT((instr->Opc2Field() == 0x7) && (instr->Opc3Field() == 0x3)); + + bool double_to_single = (instr->SzField() == 1); + int dst = GlueRegCode(double_to_single, instr->VdField(), instr->DField()); + int src = GlueRegCode(!double_to_single, instr->VmField(), instr->MField()); + + if (double_to_single) { + double val = get_double_from_d_register(src); + set_s_register_from_float(dst, static_cast<float>(val)); + } else { + float val = get_float_from_s_register(src); + set_d_register_from_double(dst, static_cast<double>(val)); + } +} + + +void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr) { + ASSERT((instr->Bit(4) == 0) && (instr->Opc1Field() == 0x7)); + ASSERT(((instr->Opc2Field() == 0x8) && (instr->Opc3Field() & 0x1)) || + (((instr->Opc2Field() >> 1) == 0x6) && (instr->Opc3Field() & 0x1))); + + // Conversion between floating-point and integer. + int vd = instr->VdField(); + int d = instr->DField(); + int vm = instr->VmField(); + int m = instr->MField(); + + bool to_integer = (instr->Bit(18) == 1); + bool dp_operation = (instr->SzField() == 1); + if (to_integer) { + bool unsigned_integer = (instr->Bit(16) == 0); + if (instr->Bit(7) != 1) { + // Only rounding towards zero supported. UNIMPLEMENTED(); // Not used by V8. } + + int dst = GlueRegCode(true, vd, d); + int src = GlueRegCode(!dp_operation, vm, m); + + if (dp_operation) { + double val = get_double_from_d_register(src); + + int sint = unsigned_integer ? static_cast<uint32_t>(val) : + static_cast<int32_t>(val); + + set_s_register_from_sinteger(dst, sint); + } else { + float val = get_float_from_s_register(src); + + int sint = unsigned_integer ? static_cast<uint32_t>(val) : + static_cast<int32_t>(val); + + set_s_register_from_sinteger(dst, sint); + } } else { - if ((instr->Bit(20) == 0x0) && - (instr->Bits(11, 8) == 0xA) && - (instr->Bits(6, 5) == 0x0) && - (instr->Bit(4) == 1) && - (instr->Bits(3, 0) == 0x0)) { - int32_t rs_val = get_register(rt); - set_s_register_from_sinteger(((vn<<1) | instr->NField()), rs_val); - } else if ((instr->Bit(20) == 0x1) && - (instr->Bits(11, 8) == 0xA) && - (instr->Bits(6, 5) == 0x0) && - (instr->Bit(4) == 1) && - (instr->Bits(3, 0) == 0x0)) { - int32_t int_value = get_sinteger_from_s_register(((vn<<1) | - instr->NField())); - set_register(rt, int_value); + bool unsigned_integer = (instr->Bit(7) == 0); + + int dst = GlueRegCode(!dp_operation, vd, d); + int src = GlueRegCode(true, vm, m); + + int val = get_sinteger_from_s_register(src); + + if (dp_operation) { + if (unsigned_integer) { + set_d_register_from_double(dst, + static_cast<double>((uint32_t)val)); + } else { + set_d_register_from_double(dst, static_cast<double>(val)); + } } else { - UNIMPLEMENTED(); // Not used by V8. + if (unsigned_integer) { + set_s_register_from_float(dst, + static_cast<float>((uint32_t)val)); + } else { + set_s_register_from_float(dst, static_cast<float>(val)); + } } } } @@ -2055,9 +2190,32 @@ void Simulator::DecodeTypeVFP(Instr* instr) { void Simulator::DecodeType6CoprocessorIns(Instr* instr) { ASSERT((instr->TypeField() == 6)); - if (instr->CoprocessorField() != 0xB) { - UNIMPLEMENTED(); // Not used by V8. - } else { + if (instr->CoprocessorField() == 0xA) { + switch (instr->OpcodeField()) { + case 0x8: + case 0xC: { // Load and store float to memory. + int rn = instr->RnField(); + int vd = instr->VdField(); + int offset = instr->Immed8Field(); + if (!instr->HasU()) { + offset = -offset; + } + + int32_t address = get_register(rn) + 4 * offset; + if (instr->HasL()) { + // Load double from memory: vldr. + set_s_register_from_sinteger(vd, ReadW(address, instr)); + } else { + // Store double to memory: vstr. + WriteW(address, get_sinteger_from_s_register(vd), instr); + } + break; + } + default: + UNIMPLEMENTED(); // Not used by V8. + break; + } + } else if (instr->CoprocessorField() == 0xB) { switch (instr->OpcodeField()) { case 0x2: // Load and store double to two GP registers @@ -2106,6 +2264,8 @@ void Simulator::DecodeType6CoprocessorIns(Instr* instr) { UNIMPLEMENTED(); // Not used by V8. break; } + } else { + UNIMPLEMENTED(); // Not used by V8. } } diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h index 19737301a6..4ee9070ffd 100644 --- a/deps/v8/src/arm/simulator-arm.h +++ b/deps/v8/src/arm/simulator-arm.h @@ -231,6 +231,11 @@ class Simulator { void DecodeTypeVFP(Instr* instr); void DecodeType6CoprocessorIns(Instr* instr); + void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instr* instr); + void DecodeVCMP(Instr* instr); + void DecodeVCVTBetweenDoubleAndSingle(Instr* instr); + void DecodeVCVTBetweenFloatingPointAndInteger(Instr* instr); + // Executes one instruction. void InstructionDecode(Instr* instr); diff --git a/deps/v8/src/arm/virtual-frame-arm.cc b/deps/v8/src/arm/virtual-frame-arm.cc index ab6e5f8136..1e3a865658 100644 --- a/deps/v8/src/arm/virtual-frame-arm.cc +++ b/deps/v8/src/arm/virtual-frame-arm.cc @@ -47,16 +47,6 @@ void VirtualFrame::SyncElementByPushing(int index) { } -void VirtualFrame::SyncRange(int begin, int end) { - // All elements are in memory on ARM (ie, synced). -#ifdef DEBUG - for (int i = begin; i <= end; i++) { - ASSERT(elements_[i].is_synced()); - } -#endif -} - - void VirtualFrame::MergeTo(VirtualFrame* expected) { // ARM frames are currently always in memory. ASSERT(Equals(expected)); @@ -270,12 +260,7 @@ void VirtualFrame::Drop(int count) { } // Discard elements from the virtual frame and free any registers. - for (int i = 0; i < count; i++) { - FrameElement dropped = elements_.RemoveLast(); - if (dropped.is_register()) { - Unuse(dropped.reg()); - } - } + element_count_ -= count; } @@ -288,14 +273,14 @@ Result VirtualFrame::Pop() { void VirtualFrame::EmitPop(Register reg) { ASSERT(stack_pointer_ == element_count() - 1); stack_pointer_--; - elements_.RemoveLast(); + element_count_--; __ pop(reg); } void VirtualFrame::EmitPush(Register reg) { ASSERT(stack_pointer_ == element_count() - 1); - elements_.Add(FrameElement::MemoryElement(NumberInfo::Unknown())); + element_count_++; stack_pointer_++; __ push(reg); } diff --git a/deps/v8/src/arm/virtual-frame-arm.h b/deps/v8/src/arm/virtual-frame-arm.h index 9ac7a05480..6ba1eecc01 100644 --- a/deps/v8/src/arm/virtual-frame-arm.h +++ b/deps/v8/src/arm/virtual-frame-arm.h @@ -67,12 +67,8 @@ class VirtualFrame : public ZoneObject { CodeGenerator* cgen() { return CodeGeneratorScope::Current(); } MacroAssembler* masm() { return cgen()->masm(); } - // Create a duplicate of an existing valid frame element. - FrameElement CopyElementAt(int index, - NumberInfo info = NumberInfo::Unknown()); - // The number of elements on the virtual frame. - int element_count() { return elements_.length(); } + int element_count() { return element_count_; } // The height of the virtual expression stack. int height() { @@ -115,7 +111,7 @@ class VirtualFrame : public ZoneObject { stack_pointer_ -= count; // On ARM, all elements are in memory, so there is no extra bookkeeping // (registers, copies, etc.) beyond dropping the elements. - elements_.Rewind(stack_pointer_ + 1); + element_count_ -= count; } // Forget count elements from the top of the frame and adjust the stack @@ -124,7 +120,7 @@ class VirtualFrame : public ZoneObject { void ForgetElements(int count); // Spill all values from the frame to memory. - void SpillAll(); + inline void SpillAll(); // Spill all occurrences of a specific register from the frame. void Spill(Register reg) { @@ -179,7 +175,7 @@ class VirtualFrame : public ZoneObject { // dropping all non-locals elements in the virtual frame. This // avoids generating unnecessary merge code when jumping to the // shared return site. Emits code for spills. - void PrepareForReturn(); + inline void PrepareForReturn(); // Number of local variables after when we use a loop for allocating. static const int kLocalVarBound = 5; @@ -205,10 +201,6 @@ class VirtualFrame : public ZoneObject { SetElementAt(index, &temp); } - void PushElementAt(int index) { - PushFrameSlotAt(element_count() - index - 1); - } - // A frame-allocated local as an assembly operand. MemOperand LocalAt(int index) { ASSERT(0 <= index); @@ -216,11 +208,6 @@ class VirtualFrame : public ZoneObject { return MemOperand(fp, kLocal0Offset - index * kPointerSize); } - // Push a copy of the value of a local frame slot on top of the frame. - void PushLocalAt(int index) { - PushFrameSlotAt(local0_index() + index); - } - // Push the value of a local frame slot on top of the frame and invalidate // the local slot. The slot should be written to before trying to read // from it again. @@ -228,21 +215,12 @@ class VirtualFrame : public ZoneObject { TakeFrameSlotAt(local0_index() + index); } - // Store the top value on the virtual frame into a local frame slot. The - // value is left in place on top of the frame. - void StoreToLocalAt(int index) { - StoreToFrameSlotAt(local0_index() + index); - } - // Push the address of the receiver slot on the frame. void PushReceiverSlotAddress(); // The function frame slot. MemOperand Function() { return MemOperand(fp, kFunctionOffset); } - // Push the function on top of the frame. - void PushFunction() { PushFrameSlotAt(function_index()); } - // The context frame slot. MemOperand Context() { return MemOperand(fp, kContextOffset); } @@ -261,11 +239,6 @@ class VirtualFrame : public ZoneObject { return MemOperand(fp, (1 + parameter_count() - index) * kPointerSize); } - // Push a copy of the value of a parameter frame slot on top of the frame. - void PushParameterAt(int index) { - PushFrameSlotAt(param0_index() + index); - } - // Push the value of a paramter frame slot on top of the frame and // invalidate the parameter slot. The slot should be written to before // trying to read from it again. @@ -323,9 +296,6 @@ class VirtualFrame : public ZoneObject { // Drop one element. void Drop() { Drop(1); } - // Duplicate the top element of the frame. - void Dup() { PushFrameSlotAt(element_count() - 1); } - // Pop an element from the top of the expression stack. Returns a // Result, which may be a constant or a register. Result Pop(); @@ -344,28 +314,16 @@ class VirtualFrame : public ZoneObject { void EmitPushMultiple(int count, int src_regs); // Push an element on the virtual frame. - inline void Push(Register reg, NumberInfo info = NumberInfo::Unknown()); inline void Push(Handle<Object> value); inline void Push(Smi* value); - // Pushing a result invalidates it (its contents become owned by the frame). - void Push(Result* result) { - if (result->is_register()) { - Push(result->reg()); - } else { - ASSERT(result->is_constant()); - Push(result->handle()); - } - result->Unuse(); - } - // Nip removes zero or more elements from immediately below the top // of the frame, leaving the previous top-of-frame value on top of // the frame. Nip(k) is equivalent to x = Pop(), Drop(k), Push(x). inline void Nip(int num_dropped); - inline void SetTypeForLocalAt(int index, NumberInfo info); - inline void SetTypeForParamAt(int index, NumberInfo info); + inline void SetTypeForLocalAt(int index, TypeInfo info); + inline void SetTypeForParamAt(int index, TypeInfo info); private: static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset; @@ -375,7 +333,8 @@ class VirtualFrame : public ZoneObject { static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize; static const int kPreallocatedElements = 5 + 8; // 8 expression stack slots. - ZoneList<FrameElement> elements_; + // The number of elements on the stack frame. + int element_count_; // The index of the element that is at the processor's stack pointer // (the sp register). @@ -449,19 +408,12 @@ class VirtualFrame : public ZoneObject { // Keep the element type as register or constant, and clear the dirty bit. void SyncElementAt(int index); - // Sync the range of elements in [begin, end] with memory. - void SyncRange(int begin, int end); - // Sync a single unsynced element that lies beneath or at the stack pointer. void SyncElementBelowStackPointer(int index); // Sync a single unsynced element that lies just above the stack pointer. void SyncElementByPushing(int index); - // Push a copy of a frame slot (typically a local or parameter) on top of - // the frame. - inline void PushFrameSlotAt(int index); - // Push a the value of a frame slot (typically a local or parameter) on // top of the frame and invalidate the slot. void TakeFrameSlotAt(int index); @@ -505,9 +457,8 @@ class VirtualFrame : public ZoneObject { inline bool Equals(VirtualFrame* other); - // Classes that need raw access to the elements_ array. - friend class DeferredCode; friend class JumpTarget; + friend class DeferredCode; }; diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc index aaf10efe8b..bb010c829f 100644 --- a/deps/v8/src/assembler.cc +++ b/deps/v8/src/assembler.cc @@ -664,6 +664,16 @@ ExternalReference ExternalReference::scheduled_exception_address() { } +ExternalReference ExternalReference::compile_array_pop_call() { + return ExternalReference(FUNCTION_ADDR(CompileArrayPopCall)); +} + + +ExternalReference ExternalReference::compile_array_push_call() { + return ExternalReference(FUNCTION_ADDR(CompileArrayPushCall)); +} + + #ifdef V8_NATIVE_REGEXP ExternalReference ExternalReference::re_check_stack_guard_state() { diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h index cde7d69247..b4834e53f8 100644 --- a/deps/v8/src/assembler.h +++ b/deps/v8/src/assembler.h @@ -443,6 +443,9 @@ class ExternalReference BASE_EMBEDDED { static ExternalReference scheduled_exception_address(); + static ExternalReference compile_array_pop_call(); + static ExternalReference compile_array_push_call(); + Address address() const {return reinterpret_cast<Address>(address_);} #ifdef ENABLE_DEBUGGER_SUPPORT diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc index 9fc4af0dba..9204a840c8 100644 --- a/deps/v8/src/ast.cc +++ b/deps/v8/src/ast.cc @@ -1,4 +1,4 @@ -// Copyright 2006-2008 the V8 project authors. All rights reserved. +// Copyright 2010 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -28,6 +28,7 @@ #include "v8.h" #include "ast.h" +#include "data-flow.h" #include "parser.h" #include "scopes.h" #include "string-stream.h" @@ -79,7 +80,8 @@ VariableProxy::VariableProxy(Handle<String> name, is_this_(is_this), inside_with_(inside_with), is_trivial_(false), - reaching_definitions_(NULL) { + reaching_definitions_(NULL), + is_primitive_(false) { // names must be canonicalized for fast equality checks ASSERT(name->IsSymbol()); } @@ -87,7 +89,8 @@ VariableProxy::VariableProxy(Handle<String> name, VariableProxy::VariableProxy(bool is_this) : is_this_(is_this), - reaching_definitions_(NULL) { + reaching_definitions_(NULL), + is_primitive_(false) { } @@ -169,6 +172,72 @@ void TargetCollector::AddTarget(BreakTarget* target) { } +bool Expression::GuaranteedSmiResult() { + BinaryOperation* node = AsBinaryOperation(); + if (node == NULL) return false; + Token::Value op = node->op(); + switch (op) { + case Token::COMMA: + case Token::OR: + case Token::AND: + case Token::ADD: + case Token::SUB: + case Token::MUL: + case Token::DIV: + case Token::MOD: + case Token::BIT_XOR: + case Token::SHL: + return false; + break; + case Token::BIT_OR: + case Token::BIT_AND: { + Literal* left = node->left()->AsLiteral(); + Literal* right = node->right()->AsLiteral(); + if (left != NULL && left->handle()->IsSmi()) { + int value = Smi::cast(*left->handle())->value(); + if (op == Token::BIT_OR && ((value & 0xc0000000) == 0xc0000000)) { + // Result of bitwise or is always a negative Smi. + return true; + } + if (op == Token::BIT_AND && ((value & 0xc0000000) == 0)) { + // Result of bitwise and is always a positive Smi. + return true; + } + } + if (right != NULL && right->handle()->IsSmi()) { + int value = Smi::cast(*right->handle())->value(); + if (op == Token::BIT_OR && ((value & 0xc0000000) == 0xc0000000)) { + // Result of bitwise or is always a negative Smi. + return true; + } + if (op == Token::BIT_AND && ((value & 0xc0000000) == 0)) { + // Result of bitwise and is always a positive Smi. + return true; + } + } + return false; + break; + } + case Token::SAR: + case Token::SHR: { + Literal* right = node->right()->AsLiteral(); + if (right != NULL && right->handle()->IsSmi()) { + int value = Smi::cast(*right->handle())->value(); + if ((value & 0x1F) > 1 || + (op == Token::SAR && (value & 0x1F) == 1)) { + return true; + } + } + return false; + break; + } + default: + UNREACHABLE(); + break; + } + return false; +} + // ---------------------------------------------------------------------------- // Implementation of AstVisitor @@ -507,7 +576,7 @@ RegExpAlternative::RegExpAlternative(ZoneList<RegExpTree*>* nodes) // The following expression types are never primitive because they express // Object values. bool FunctionLiteral::IsPrimitive() { return false; } -bool FunctionBoilerplateLiteral::IsPrimitive() { return false; } +bool SharedFunctionInfoLiteral::IsPrimitive() { return false; } bool RegExpLiteral::IsPrimitive() { return false; } bool ObjectLiteral::IsPrimitive() { return false; } bool ArrayLiteral::IsPrimitive() { return false; } @@ -518,12 +587,18 @@ bool ThisFunction::IsPrimitive() { return false; } // The following expression types are not always primitive because we do not // have enough information to conclude that they are. -bool VariableProxy::IsPrimitive() { return false; } bool Property::IsPrimitive() { return false; } bool Call::IsPrimitive() { return false; } bool CallRuntime::IsPrimitive() { return false; } +// A variable use is not primitive unless the primitive-type analysis +// determines otherwise. +bool VariableProxy::IsPrimitive() { + ASSERT(!is_primitive_ || (var() != NULL && var()->IsStackAllocated())); + return is_primitive_; +} + // The value of a conditional is the value of one of the alternatives. It's // always primitive if both alternatives are always primitive. bool Conditional::IsPrimitive() { @@ -592,4 +667,580 @@ bool BinaryOperation::IsPrimitive() { bool CompareOperation::IsPrimitive() { return true; } +// Overridden IsCritical member functions. IsCritical is true for AST nodes +// whose evaluation is absolutely required (they are never dead) because +// they are externally visible. + +// References to global variables or lookup slots are critical because they +// may have getters. All others, including parameters rewritten to explicit +// property references, are not critical. +bool VariableProxy::IsCritical() { + Variable* var = AsVariable(); + return var != NULL && + (var->slot() == NULL || var->slot()->type() == Slot::LOOKUP); +} + + +// Literals are never critical. +bool Literal::IsCritical() { return false; } + + +// Property assignments and throwing of reference errors are always +// critical. Assignments to escaping variables are also critical. In +// addition the operation of compound assignments is critical if either of +// its operands is non-primitive (the arithmetic operations all use one of +// ToPrimitive, ToNumber, ToInt32, or ToUint32 on each of their operands). +// In this case, we mark the entire AST node as critical because there is +// no binary operation node to mark. +bool Assignment::IsCritical() { + Variable* var = AssignedVariable(); + return var == NULL || + !var->IsStackAllocated() || + (is_compound() && (!target()->IsPrimitive() || !value()->IsPrimitive())); +} + + +// Property references are always critical, because they may have getters. +bool Property::IsCritical() { return true; } + + +// Calls are always critical. +bool Call::IsCritical() { return true; } + + +// +,- use ToNumber on the value of their operand. +bool UnaryOperation::IsCritical() { + ASSERT(op() == Token::ADD || op() == Token::SUB); + return !expression()->IsPrimitive(); +} + + +// Count operations targeting properties and reference errors are always +// critical. Count operations on escaping variables are critical. Count +// operations targeting non-primitives are also critical because they use +// ToNumber. +bool CountOperation::IsCritical() { + Variable* var = AssignedVariable(); + return var == NULL || + !var->IsStackAllocated() || + !expression()->IsPrimitive(); +} + + +// Arithmetic operations all use one of ToPrimitive, ToNumber, ToInt32, or +// ToUint32 on each of their operands. +bool BinaryOperation::IsCritical() { + ASSERT(op() != Token::COMMA); + ASSERT(op() != Token::OR); + ASSERT(op() != Token::AND); + return !left()->IsPrimitive() || !right()->IsPrimitive(); +} + + +// <, >, <=, and >= all use ToPrimitive on both their operands. +bool CompareOperation::IsCritical() { + ASSERT(op() != Token::EQ); + ASSERT(op() != Token::NE); + ASSERT(op() != Token::EQ_STRICT); + ASSERT(op() != Token::NE_STRICT); + ASSERT(op() != Token::INSTANCEOF); + ASSERT(op() != Token::IN); + return !left()->IsPrimitive() || !right()->IsPrimitive(); +} + + +static inline void MarkIfNotLive(Expression* expr, List<AstNode*>* stack) { + if (!expr->is_live()) { + expr->mark_as_live(); + stack->Add(expr); + } +} + + +// Overloaded functions for marking children of live code as live. +void VariableProxy::ProcessNonLiveChildren( + List<AstNode*>* stack, + ZoneList<Expression*>* body_definitions, + int variable_count) { + // A reference to a stack-allocated variable depends on all the + // definitions reaching it. + BitVector* defs = reaching_definitions(); + if (defs != NULL) { + ASSERT(var()->IsStackAllocated()); + // The first variable_count definitions are the initial parameter and + // local declarations. + for (int i = variable_count; i < defs->length(); i++) { + if (defs->Contains(i)) { + MarkIfNotLive(body_definitions->at(i - variable_count), stack); + } + } + } +} + + +void Literal::ProcessNonLiveChildren(List<AstNode*>* stack, + ZoneList<Expression*>* body_definitions, + int variable_count) { + // Leaf node, no children. +} + + +void Assignment::ProcessNonLiveChildren( + List<AstNode*>* stack, + ZoneList<Expression*>* body_definitions, + int variable_count) { + Property* prop = target()->AsProperty(); + VariableProxy* proxy = target()->AsVariableProxy(); + + if (prop != NULL) { + if (!prop->key()->IsPropertyName()) MarkIfNotLive(prop->key(), stack); + MarkIfNotLive(prop->obj(), stack); + } else if (proxy == NULL) { + // Must be a reference error. + ASSERT(!target()->IsValidLeftHandSide()); + MarkIfNotLive(target(), stack); + } else if (is_compound()) { + // A variable assignment so lhs is an operand to the operation. + MarkIfNotLive(target(), stack); + } + MarkIfNotLive(value(), stack); +} + + +void Property::ProcessNonLiveChildren(List<AstNode*>* stack, + ZoneList<Expression*>* body_definitions, + int variable_count) { + if (!key()->IsPropertyName()) MarkIfNotLive(key(), stack); + MarkIfNotLive(obj(), stack); +} + + +void Call::ProcessNonLiveChildren(List<AstNode*>* stack, + ZoneList<Expression*>* body_definitions, + int variable_count) { + ZoneList<Expression*>* args = arguments(); + for (int i = args->length() - 1; i >= 0; i--) { + MarkIfNotLive(args->at(i), stack); + } + MarkIfNotLive(expression(), stack); +} + + +void UnaryOperation::ProcessNonLiveChildren( + List<AstNode*>* stack, + ZoneList<Expression*>* body_definitions, + int variable_count) { + MarkIfNotLive(expression(), stack); +} + + +void CountOperation::ProcessNonLiveChildren( + List<AstNode*>* stack, + ZoneList<Expression*>* body_definitions, + int variable_count) { + MarkIfNotLive(expression(), stack); +} + + +void BinaryOperation::ProcessNonLiveChildren( + List<AstNode*>* stack, + ZoneList<Expression*>* body_definitions, + int variable_count) { + MarkIfNotLive(right(), stack); + MarkIfNotLive(left(), stack); +} + + +void CompareOperation::ProcessNonLiveChildren( + List<AstNode*>* stack, + ZoneList<Expression*>* body_definitions, + int variable_count) { + MarkIfNotLive(right(), stack); + MarkIfNotLive(left(), stack); +} + + +// Implementation of a copy visitor. The visitor create a deep copy +// of ast nodes. Nodes that do not require a deep copy are copied +// with the default copy constructor. + +AstNode::AstNode(AstNode* other) : num_(kNoNumber) { + // AST node number should be unique. Assert that we only copy AstNodes + // before node numbers are assigned. + ASSERT(other->num_ == kNoNumber); +} + + +Statement::Statement(Statement* other) + : AstNode(other), statement_pos_(other->statement_pos_) {} + + +Expression::Expression(Expression* other) + : AstNode(other), + bitfields_(other->bitfields_), + type_(other->type_) {} + + +BreakableStatement::BreakableStatement(BreakableStatement* other) + : Statement(other), labels_(other->labels_), type_(other->type_) {} + + +Block::Block(Block* other, ZoneList<Statement*>* statements) + : BreakableStatement(other), + statements_(statements->length()), + is_initializer_block_(other->is_initializer_block_) { + statements_.AddAll(*statements); +} + + +ExpressionStatement::ExpressionStatement(ExpressionStatement* other, + Expression* expression) + : Statement(other), expression_(expression) {} + + +IfStatement::IfStatement(IfStatement* other, + Expression* condition, + Statement* then_statement, + Statement* else_statement) + : Statement(other), + condition_(condition), + then_statement_(then_statement), + else_statement_(else_statement) {} + + +EmptyStatement::EmptyStatement(EmptyStatement* other) : Statement(other) {} + + +IterationStatement::IterationStatement(IterationStatement* other, + Statement* body) + : BreakableStatement(other), body_(body) {} + + +ForStatement::ForStatement(ForStatement* other, + Statement* init, + Expression* cond, + Statement* next, + Statement* body) + : IterationStatement(other, body), + init_(init), + cond_(cond), + next_(next), + may_have_function_literal_(other->may_have_function_literal_), + loop_variable_(other->loop_variable_), + peel_this_loop_(other->peel_this_loop_) {} + + +Assignment::Assignment(Assignment* other, + Expression* target, + Expression* value) + : Expression(other), + op_(other->op_), + target_(target), + value_(value), + pos_(other->pos_), + block_start_(other->block_start_), + block_end_(other->block_end_) {} + + +Property::Property(Property* other, Expression* obj, Expression* key) + : Expression(other), + obj_(obj), + key_(key), + pos_(other->pos_), + type_(other->type_) {} + + +Call::Call(Call* other, + Expression* expression, + ZoneList<Expression*>* arguments) + : Expression(other), + expression_(expression), + arguments_(arguments), + pos_(other->pos_) {} + + +UnaryOperation::UnaryOperation(UnaryOperation* other, Expression* expression) + : Expression(other), op_(other->op_), expression_(expression) {} + + +BinaryOperation::BinaryOperation(BinaryOperation* other, + Expression* left, + Expression* right) + : Expression(other), + op_(other->op_), + left_(left), + right_(right) {} + + +CountOperation::CountOperation(CountOperation* other, Expression* expression) + : Expression(other), + is_prefix_(other->is_prefix_), + op_(other->op_), + expression_(expression) {} + + +CompareOperation::CompareOperation(CompareOperation* other, + Expression* left, + Expression* right) + : Expression(other), + op_(other->op_), + left_(left), + right_(right) {} + + +Expression* CopyAstVisitor::DeepCopyExpr(Expression* expr) { + expr_ = NULL; + if (expr != NULL) Visit(expr); + return expr_; +} + + +Statement* CopyAstVisitor::DeepCopyStmt(Statement* stmt) { + stmt_ = NULL; + if (stmt != NULL) Visit(stmt); + return stmt_; +} + + +ZoneList<Expression*>* CopyAstVisitor::DeepCopyExprList( + ZoneList<Expression*>* expressions) { + ZoneList<Expression*>* copy = + new ZoneList<Expression*>(expressions->length()); + for (int i = 0; i < expressions->length(); i++) { + copy->Add(DeepCopyExpr(expressions->at(i))); + } + return copy; +} + + +ZoneList<Statement*>* CopyAstVisitor::DeepCopyStmtList( + ZoneList<Statement*>* statements) { + ZoneList<Statement*>* copy = new ZoneList<Statement*>(statements->length()); + for (int i = 0; i < statements->length(); i++) { + copy->Add(DeepCopyStmt(statements->at(i))); + } + return copy; +} + + +void CopyAstVisitor::VisitBlock(Block* stmt) { + stmt_ = new Block(stmt, + DeepCopyStmtList(stmt->statements())); +} + + +void CopyAstVisitor::VisitExpressionStatement( + ExpressionStatement* stmt) { + stmt_ = new ExpressionStatement(stmt, DeepCopyExpr(stmt->expression())); +} + + +void CopyAstVisitor::VisitEmptyStatement(EmptyStatement* stmt) { + stmt_ = new EmptyStatement(stmt); +} + + +void CopyAstVisitor::VisitIfStatement(IfStatement* stmt) { + stmt_ = new IfStatement(stmt, + DeepCopyExpr(stmt->condition()), + DeepCopyStmt(stmt->then_statement()), + DeepCopyStmt(stmt->else_statement())); +} + + +void CopyAstVisitor::VisitContinueStatement(ContinueStatement* stmt) { + SetStackOverflow(); +} + + +void CopyAstVisitor::VisitBreakStatement(BreakStatement* stmt) { + SetStackOverflow(); +} + + +void CopyAstVisitor::VisitReturnStatement(ReturnStatement* stmt) { + SetStackOverflow(); +} + + +void CopyAstVisitor::VisitWithEnterStatement( + WithEnterStatement* stmt) { + SetStackOverflow(); +} + + +void CopyAstVisitor::VisitWithExitStatement(WithExitStatement* stmt) { + SetStackOverflow(); +} + + +void CopyAstVisitor::VisitSwitchStatement(SwitchStatement* stmt) { + SetStackOverflow(); +} + + +void CopyAstVisitor::VisitDoWhileStatement(DoWhileStatement* stmt) { + SetStackOverflow(); +} + + +void CopyAstVisitor::VisitWhileStatement(WhileStatement* stmt) { + SetStackOverflow(); +} + + +void CopyAstVisitor::VisitForStatement(ForStatement* stmt) { + stmt_ = new ForStatement(stmt, + DeepCopyStmt(stmt->init()), + DeepCopyExpr(stmt->cond()), + DeepCopyStmt(stmt->next()), + DeepCopyStmt(stmt->body())); +} + + +void CopyAstVisitor::VisitForInStatement(ForInStatement* stmt) { + SetStackOverflow(); +} + + +void CopyAstVisitor::VisitTryCatchStatement(TryCatchStatement* stmt) { + SetStackOverflow(); +} + + +void CopyAstVisitor::VisitTryFinallyStatement( + TryFinallyStatement* stmt) { + SetStackOverflow(); +} + + +void CopyAstVisitor::VisitDebuggerStatement( + DebuggerStatement* stmt) { + SetStackOverflow(); +} + + +void CopyAstVisitor::VisitFunctionLiteral(FunctionLiteral* expr) { + SetStackOverflow(); +} + + +void CopyAstVisitor::VisitSharedFunctionInfoLiteral( + SharedFunctionInfoLiteral* expr) { + SetStackOverflow(); +} + + +void CopyAstVisitor::VisitConditional(Conditional* expr) { + SetStackOverflow(); +} + + +void CopyAstVisitor::VisitSlot(Slot* expr) { + UNREACHABLE(); +} + + +void CopyAstVisitor::VisitVariableProxy(VariableProxy* expr) { + expr_ = new VariableProxy(*expr); +} + + +void CopyAstVisitor::VisitLiteral(Literal* expr) { + expr_ = new Literal(*expr); +} + + +void CopyAstVisitor::VisitRegExpLiteral(RegExpLiteral* expr) { + SetStackOverflow(); +} + + +void CopyAstVisitor::VisitObjectLiteral(ObjectLiteral* expr) { + SetStackOverflow(); +} + + +void CopyAstVisitor::VisitArrayLiteral(ArrayLiteral* expr) { + SetStackOverflow(); +} + + +void CopyAstVisitor::VisitCatchExtensionObject( + CatchExtensionObject* expr) { + SetStackOverflow(); +} + + +void CopyAstVisitor::VisitAssignment(Assignment* expr) { + expr_ = new Assignment(expr, + DeepCopyExpr(expr->target()), + DeepCopyExpr(expr->value())); +} + + +void CopyAstVisitor::VisitThrow(Throw* expr) { + SetStackOverflow(); +} + + +void CopyAstVisitor::VisitProperty(Property* expr) { + expr_ = new Property(expr, + DeepCopyExpr(expr->obj()), + DeepCopyExpr(expr->key())); +} + + +void CopyAstVisitor::VisitCall(Call* expr) { + expr_ = new Call(expr, + DeepCopyExpr(expr->expression()), + DeepCopyExprList(expr->arguments())); +} + + +void CopyAstVisitor::VisitCallNew(CallNew* expr) { + SetStackOverflow(); +} + + +void CopyAstVisitor::VisitCallRuntime(CallRuntime* expr) { + SetStackOverflow(); +} + + +void CopyAstVisitor::VisitUnaryOperation(UnaryOperation* expr) { + expr_ = new UnaryOperation(expr, DeepCopyExpr(expr->expression())); +} + + +void CopyAstVisitor::VisitCountOperation(CountOperation* expr) { + expr_ = new CountOperation(expr, + DeepCopyExpr(expr->expression())); +} + + +void CopyAstVisitor::VisitBinaryOperation(BinaryOperation* expr) { + expr_ = new BinaryOperation(expr, + DeepCopyExpr(expr->left()), + DeepCopyExpr(expr->right())); +} + + +void CopyAstVisitor::VisitCompareOperation(CompareOperation* expr) { + expr_ = new CompareOperation(expr, + DeepCopyExpr(expr->left()), + DeepCopyExpr(expr->right())); +} + + +void CopyAstVisitor::VisitThisFunction(ThisFunction* expr) { + SetStackOverflow(); +} + + +void CopyAstVisitor::VisitDeclaration(Declaration* decl) { + UNREACHABLE(); +} + + } } // namespace v8::internal diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h index 8248f62a8f..fa85eee6fd 100644 --- a/deps/v8/src/ast.h +++ b/deps/v8/src/ast.h @@ -73,7 +73,7 @@ namespace internal { #define EXPRESSION_NODE_LIST(V) \ V(FunctionLiteral) \ - V(FunctionBoilerplateLiteral) \ + V(SharedFunctionInfoLiteral) \ V(Conditional) \ V(Slot) \ V(VariableProxy) \ @@ -121,11 +121,15 @@ class AstNode: public ZoneObject { static const int kNoNumber = -1; AstNode() : num_(kNoNumber) {} + + explicit AstNode(AstNode* other); + virtual ~AstNode() { } virtual void Accept(AstVisitor* v) = 0; // Type testing & conversion. virtual Statement* AsStatement() { return NULL; } + virtual Block* AsBlock() { return NULL; } virtual ExpressionStatement* AsExpressionStatement() { return NULL; } virtual EmptyStatement* AsEmptyStatement() { return NULL; } virtual Expression* AsExpression() { return NULL; } @@ -137,6 +141,7 @@ class AstNode: public ZoneObject { virtual TargetCollector* AsTargetCollector() { return NULL; } virtual BreakableStatement* AsBreakableStatement() { return NULL; } virtual IterationStatement* AsIterationStatement() { return NULL; } + virtual ForStatement* AsForStatement() { return NULL; } virtual UnaryOperation* AsUnaryOperation() { return NULL; } virtual CountOperation* AsCountOperation() { return NULL; } virtual BinaryOperation* AsBinaryOperation() { return NULL; } @@ -147,6 +152,13 @@ class AstNode: public ZoneObject { virtual ArrayLiteral* AsArrayLiteral() { return NULL; } virtual CompareOperation* AsCompareOperation() { return NULL; } + // True if the AST node is critical (its execution is needed or externally + // visible in some way). + virtual bool IsCritical() { + UNREACHABLE(); + return true; + } + int num() { return num_; } void set_num(int n) { num_ = n; } @@ -160,6 +172,8 @@ class Statement: public AstNode { public: Statement() : statement_pos_(RelocInfo::kNoPosition) {} + explicit Statement(Statement* other); + virtual Statement* AsStatement() { return this; } virtual ReturnStatement* AsReturnStatement() { return NULL; } @@ -198,11 +212,13 @@ class Expression: public AstNode { Expression() : bitfields_(0) {} + explicit Expression(Expression* other); + virtual Expression* AsExpression() { return this; } virtual bool IsValidLeftHandSide() { return false; } - virtual Variable* AssignedVar() { return NULL; } + virtual Variable* AssignedVariable() { return NULL; } // Symbols that cannot be parsed as array indices are considered property // names. We do not treat symbols that can be array indexes as property @@ -230,6 +246,19 @@ class Expression: public AstNode { // Static type information for this expression. StaticType* type() { return &type_; } + // True if the expression is a loop condition. + bool is_loop_condition() const { + return LoopConditionField::decode(bitfields_); + } + void set_is_loop_condition(bool flag) { + bitfields_ = (bitfields_ & ~LoopConditionField::mask()) | + LoopConditionField::encode(flag); + } + + // The value of the expression is guaranteed to be a smi, because the + // top operation is a bit operation with a mask, or a shift. + bool GuaranteedSmiResult(); + // AST analysis results // True if the expression rooted at this node can be compiled by the @@ -265,6 +294,18 @@ class Expression: public AstNode { bitfields_ |= NumBitOpsField::encode(num_bit_ops); } + // Functions used for dead-code elimination. Predicate is true if the + // expression is not dead code. + int is_live() const { return LiveField::decode(bitfields_); } + void mark_as_live() { bitfields_ |= LiveField::encode(true); } + + // Mark non-live children as live and push them on a stack for further + // processing. + virtual void ProcessNonLiveChildren( + List<AstNode*>* stack, + ZoneList<Expression*>* body_definitions, + int variable_count) { + } private: static const int kMaxNumBitOps = (1 << 5) - 1; @@ -277,6 +318,8 @@ class Expression: public AstNode { class NoNegativeZeroField : public BitField<bool, 1, 1> {}; class ToInt32Field : public BitField<bool, 2, 1> {}; class NumBitOpsField : public BitField<int, 3, 5> {}; + class LoopConditionField: public BitField<bool, 8, 1> {}; + class LiveField: public BitField<bool, 9, 1> {}; }; @@ -327,6 +370,8 @@ class BreakableStatement: public Statement { ASSERT(labels == NULL || labels->length() > 0); } + explicit BreakableStatement(BreakableStatement* other); + private: ZoneStringList* labels_; Type type_; @@ -341,8 +386,14 @@ class Block: public BreakableStatement { statements_(capacity), is_initializer_block_(is_initializer_block) { } + // Construct a clone initialized from the original block and + // a deep copy of all statements of the original block. + Block(Block* other, ZoneList<Statement*>* statements); + virtual void Accept(AstVisitor* v); + virtual Block* AsBlock() { return this; } + virtual Assignment* StatementAsSimpleAssignment() { if (statements_.length() != 1) return NULL; return statements_[0]->StatementAsSimpleAssignment(); @@ -394,6 +445,7 @@ class IterationStatement: public BreakableStatement { virtual IterationStatement* AsIterationStatement() { return this; } Statement* body() const { return body_; } + void set_body(Statement* stmt) { body_ = stmt; } // Code generation BreakTarget* continue_target() { return &continue_target_; } @@ -402,6 +454,10 @@ class IterationStatement: public BreakableStatement { explicit IterationStatement(ZoneStringList* labels) : BreakableStatement(labels, TARGET_FOR_ANONYMOUS), body_(NULL) { } + // Construct a clone initialized from original and + // a deep copy of the original body. + IterationStatement(IterationStatement* other, Statement* body); + void Initialize(Statement* body) { body_ = body; } @@ -475,7 +531,18 @@ class ForStatement: public IterationStatement { cond_(NULL), next_(NULL), may_have_function_literal_(true), - loop_variable_(NULL) {} + loop_variable_(NULL), + peel_this_loop_(false) {} + + // Construct a for-statement initialized from another for-statement + // and deep copies of all parts of the original statement. + ForStatement(ForStatement* other, + Statement* init, + Expression* cond, + Statement* next, + Statement* body); + + virtual ForStatement* AsForStatement() { return this; } void Initialize(Statement* init, Expression* cond, @@ -490,8 +557,11 @@ class ForStatement: public IterationStatement { virtual void Accept(AstVisitor* v); Statement* init() const { return init_; } + void set_init(Statement* stmt) { init_ = stmt; } Expression* cond() const { return cond_; } + void set_cond(Expression* expr) { cond_ = expr; } Statement* next() const { return next_; } + void set_next(Statement* stmt) { next_ = stmt; } bool may_have_function_literal() const { return may_have_function_literal_; } @@ -500,6 +570,9 @@ class ForStatement: public IterationStatement { Variable* loop_variable() { return loop_variable_; } void set_loop_variable(Variable* var) { loop_variable_ = var; } + bool peel_this_loop() { return peel_this_loop_; } + void set_peel_this_loop(bool b) { peel_this_loop_ = b; } + private: Statement* init_; Expression* cond_; @@ -507,6 +580,7 @@ class ForStatement: public IterationStatement { // True if there is a function literal subexpression in the condition. bool may_have_function_literal_; Variable* loop_variable_; + bool peel_this_loop_; friend class AstOptimizer; }; @@ -539,6 +613,10 @@ class ExpressionStatement: public Statement { explicit ExpressionStatement(Expression* expression) : expression_(expression) { } + // Construct an expression statement initialized from another + // expression statement and a deep copy of the original expression. + ExpressionStatement(ExpressionStatement* other, Expression* expression); + virtual void Accept(AstVisitor* v); // Type testing & conversion. @@ -681,6 +759,13 @@ class IfStatement: public Statement { then_statement_(then_statement), else_statement_(else_statement) { } + // Construct an if-statement initialized from another if-statement + // and deep copies of all parts of the original. + IfStatement(IfStatement* other, + Expression* condition, + Statement* then_statement, + Statement* else_statement); + virtual void Accept(AstVisitor* v); bool HasThenStatement() const { return !then_statement()->IsEmpty(); } @@ -688,7 +773,9 @@ class IfStatement: public Statement { Expression* condition() const { return condition_; } Statement* then_statement() const { return then_statement_; } + void set_then_statement(Statement* stmt) { then_statement_ = stmt; } Statement* else_statement() const { return else_statement_; } + void set_else_statement(Statement* stmt) { else_statement_ = stmt; } private: Expression* condition_; @@ -783,6 +870,10 @@ class DebuggerStatement: public Statement { class EmptyStatement: public Statement { public: + EmptyStatement() {} + + explicit EmptyStatement(EmptyStatement* other); + virtual void Accept(AstVisitor* v); // Type testing & conversion. @@ -815,6 +906,11 @@ class Literal: public Expression { virtual bool IsLeaf() { return true; } virtual bool IsTrivial() { return true; } virtual bool IsPrimitive(); + virtual bool IsCritical(); + virtual void ProcessNonLiveChildren( + List<AstNode*>* stack, + ZoneList<Expression*>* body_definitions, + int variable_count); // Identity testers. bool IsNull() const { return handle_.is_identical_to(Factory::null_value()); } @@ -1021,6 +1117,13 @@ class VariableProxy: public Expression { virtual bool IsTrivial() { return is_trivial_; } virtual bool IsPrimitive(); + virtual bool IsCritical(); + virtual void ProcessNonLiveChildren( + List<AstNode*>* stack, + ZoneList<Expression*>* body_definitions, + int variable_count); + + void SetIsPrimitive(bool value) { is_primitive_ = value; } bool IsVariable(Handle<String> n) { return !is_this() && name().is_identical_to(n); @@ -1051,6 +1154,7 @@ class VariableProxy: public Expression { bool inside_with_; bool is_trivial_; BitVector* reaching_definitions_; + bool is_primitive_; VariableProxy(Handle<String> name, bool is_this, bool inside_with); explicit VariableProxy(bool is_this); @@ -1145,6 +1249,8 @@ class Property: public Expression { Property(Expression* obj, Expression* key, int pos, Type type = NORMAL) : obj_(obj), key_(key), pos_(pos), type_(type) { } + Property(Property* other, Expression* obj, Expression* key); + virtual void Accept(AstVisitor* v); // Type testing & conversion @@ -1153,6 +1259,11 @@ class Property: public Expression { virtual bool IsValidLeftHandSide() { return true; } virtual bool IsPrimitive(); + virtual bool IsCritical(); + virtual void ProcessNonLiveChildren( + List<AstNode*>* stack, + ZoneList<Expression*>* body_definitions, + int variable_count); Expression* obj() const { return obj_; } Expression* key() const { return key_; } @@ -1179,12 +1290,19 @@ class Call: public Expression { Call(Expression* expression, ZoneList<Expression*>* arguments, int pos) : expression_(expression), arguments_(arguments), pos_(pos) { } + Call(Call* other, Expression* expression, ZoneList<Expression*>* arguments); + virtual void Accept(AstVisitor* v); // Type testing and conversion. virtual Call* AsCall() { return this; } virtual bool IsPrimitive(); + virtual bool IsCritical(); + virtual void ProcessNonLiveChildren( + List<AstNode*>* stack, + ZoneList<Expression*>* body_definitions, + int variable_count); Expression* expression() const { return expression_; } ZoneList<Expression*>* arguments() const { return arguments_; } @@ -1255,12 +1373,19 @@ class UnaryOperation: public Expression { ASSERT(Token::IsUnaryOp(op)); } + UnaryOperation(UnaryOperation* other, Expression* expression); + virtual void Accept(AstVisitor* v); // Type testing & conversion virtual UnaryOperation* AsUnaryOperation() { return this; } virtual bool IsPrimitive(); + virtual bool IsCritical(); + virtual void ProcessNonLiveChildren( + List<AstNode*>* stack, + ZoneList<Expression*>* body_definitions, + int variable_count); Token::Value op() const { return op_; } Expression* expression() const { return expression_; } @@ -1278,12 +1403,19 @@ class BinaryOperation: public Expression { ASSERT(Token::IsBinaryOp(op)); } + BinaryOperation(BinaryOperation* other, Expression* left, Expression* right); + virtual void Accept(AstVisitor* v); // Type testing & conversion virtual BinaryOperation* AsBinaryOperation() { return this; } virtual bool IsPrimitive(); + virtual bool IsCritical(); + virtual void ProcessNonLiveChildren( + List<AstNode*>* stack, + ZoneList<Expression*>* body_definitions, + int variable_count); // True iff the result can be safely overwritten (to avoid allocation). // False for operations that can return one of their operands. @@ -1329,15 +1461,22 @@ class CountOperation: public Expression { ASSERT(Token::IsCountOp(op)); } + CountOperation(CountOperation* other, Expression* expression); + virtual void Accept(AstVisitor* v); virtual CountOperation* AsCountOperation() { return this; } - virtual Variable* AssignedVar() { + virtual Variable* AssignedVariable() { return expression()->AsVariableProxy()->AsVariable(); } virtual bool IsPrimitive(); + virtual bool IsCritical(); + virtual void ProcessNonLiveChildren( + List<AstNode*>* stack, + ZoneList<Expression*>* body_definitions, + int variable_count); bool is_prefix() const { return is_prefix_; } bool is_postfix() const { return !is_prefix_; } @@ -1359,22 +1498,27 @@ class CountOperation: public Expression { class CompareOperation: public Expression { public: CompareOperation(Token::Value op, Expression* left, Expression* right) - : op_(op), left_(left), right_(right), is_for_loop_condition_(false) { + : op_(op), left_(left), right_(right) { ASSERT(Token::IsCompareOp(op)); } + CompareOperation(CompareOperation* other, + Expression* left, + Expression* right); + virtual void Accept(AstVisitor* v); virtual bool IsPrimitive(); + virtual bool IsCritical(); + virtual void ProcessNonLiveChildren( + List<AstNode*>* stack, + ZoneList<Expression*>* body_definitions, + int variable_count); Token::Value op() const { return op_; } Expression* left() const { return left_; } Expression* right() const { return right_; } - // Accessors for flag whether this compare operation is hanging of a for loop. - bool is_for_loop_condition() const { return is_for_loop_condition_; } - void set_is_for_loop_condition() { is_for_loop_condition_ = true; } - // Type testing & conversion virtual CompareOperation* AsCompareOperation() { return this; } @@ -1382,7 +1526,6 @@ class CompareOperation: public Expression { Token::Value op_; Expression* left_; Expression* right_; - bool is_for_loop_condition_; }; @@ -1418,14 +1561,21 @@ class Assignment: public Expression { ASSERT(Token::IsAssignmentOp(op)); } + Assignment(Assignment* other, Expression* target, Expression* value); + virtual void Accept(AstVisitor* v); virtual Assignment* AsAssignment() { return this; } virtual bool IsPrimitive(); + virtual bool IsCritical(); + virtual void ProcessNonLiveChildren( + List<AstNode*>* stack, + ZoneList<Expression*>* body_definitions, + int variable_count); Assignment* AsSimpleAssignment() { return !is_compound() ? this : NULL; } - virtual Variable* AssignedVar() { + virtual Variable* AssignedVariable() { return target()->AsVariableProxy()->AsVariable(); } @@ -1574,14 +1724,15 @@ class FunctionLiteral: public Expression { }; -class FunctionBoilerplateLiteral: public Expression { +class SharedFunctionInfoLiteral: public Expression { public: - explicit FunctionBoilerplateLiteral(Handle<JSFunction> boilerplate) - : boilerplate_(boilerplate) { - ASSERT(boilerplate->IsBoilerplate()); - } + explicit SharedFunctionInfoLiteral( + Handle<SharedFunctionInfo> shared_function_info) + : shared_function_info_(shared_function_info) { } - Handle<JSFunction> boilerplate() const { return boilerplate_; } + Handle<SharedFunctionInfo> shared_function_info() const { + return shared_function_info_; + } virtual bool IsLeaf() { return true; } @@ -1590,7 +1741,7 @@ class FunctionBoilerplateLiteral: public Expression { virtual bool IsPrimitive(); private: - Handle<JSFunction> boilerplate_; + Handle<SharedFunctionInfo> shared_function_info_; }; @@ -1993,6 +2144,28 @@ class AstVisitor BASE_EMBEDDED { }; +class CopyAstVisitor : public AstVisitor { + public: + Expression* DeepCopyExpr(Expression* expr); + + Statement* DeepCopyStmt(Statement* stmt); + + private: + ZoneList<Expression*>* DeepCopyExprList(ZoneList<Expression*>* expressions); + + ZoneList<Statement*>* DeepCopyStmtList(ZoneList<Statement*>* statements); + + // AST node visit functions. +#define DECLARE_VISIT(type) virtual void Visit##type(type* node); + AST_NODE_LIST(DECLARE_VISIT) +#undef DECLARE_VISIT + + // Holds the result of copying an expression. + Expression* expr_; + // Holds the result of copying a statement. + Statement* stmt_; +}; + } } // namespace v8::internal #endif // V8_AST_H_ diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc index 56b185a2ce..8a9fa4bf69 100644 --- a/deps/v8/src/bootstrapper.cc +++ b/deps/v8/src/bootstrapper.cc @@ -59,11 +59,12 @@ class SourceCodeCache BASE_EMBEDDED { } - bool Lookup(Vector<const char> name, Handle<JSFunction>* handle) { + bool Lookup(Vector<const char> name, Handle<SharedFunctionInfo>* handle) { for (int i = 0; i < cache_->length(); i+=2) { SeqAsciiString* str = SeqAsciiString::cast(cache_->get(i)); if (str->IsEqualTo(name)) { - *handle = Handle<JSFunction>(JSFunction::cast(cache_->get(i + 1))); + *handle = Handle<SharedFunctionInfo>( + SharedFunctionInfo::cast(cache_->get(i + 1))); return true; } } @@ -71,8 +72,7 @@ class SourceCodeCache BASE_EMBEDDED { } - void Add(Vector<const char> name, Handle<JSFunction> fun) { - ASSERT(fun->IsBoilerplate()); + void Add(Vector<const char> name, Handle<SharedFunctionInfo> shared) { HandleScope scope; int length = cache_->length(); Handle<FixedArray> new_array = @@ -81,8 +81,8 @@ class SourceCodeCache BASE_EMBEDDED { cache_ = *new_array; Handle<String> str = Factory::NewStringFromAscii(name, TENURED); cache_->set(length, *str); - cache_->set(length + 1, *fun); - Script::cast(fun->shared()->script())->set_type(Smi::FromInt(type_)); + cache_->set(length + 1, *shared); + Script::cast(shared->script())->set_type(Smi::FromInt(type_)); } private: @@ -91,7 +91,6 @@ class SourceCodeCache BASE_EMBEDDED { DISALLOW_COPY_AND_ASSIGN(SourceCodeCache); }; -static SourceCodeCache natives_cache(Script::TYPE_NATIVE); static SourceCodeCache extensions_cache(Script::TYPE_EXTENSION); // This is for delete, not delete[]. static List<char*>* delete_these_non_arrays_on_tear_down = NULL; @@ -134,20 +133,7 @@ Handle<String> Bootstrapper::NativesSourceLookup(int index) { } -bool Bootstrapper::NativesCacheLookup(Vector<const char> name, - Handle<JSFunction>* handle) { - return natives_cache.Lookup(name, handle); -} - - -void Bootstrapper::NativesCacheAdd(Vector<const char> name, - Handle<JSFunction> fun) { - natives_cache.Add(name, fun); -} - - void Bootstrapper::Initialize(bool create_heap_objects) { - natives_cache.Initialize(create_heap_objects); extensions_cache.Initialize(create_heap_objects); } @@ -187,8 +173,7 @@ void Bootstrapper::TearDown() { delete_these_arrays_on_tear_down = NULL; } - natives_cache.Initialize(false); // Yes, symmetrical - extensions_cache.Initialize(false); + extensions_cache.Initialize(false); // Yes, symmetrical } @@ -197,17 +182,11 @@ class Genesis BASE_EMBEDDED { Genesis(Handle<Object> global_object, v8::Handle<v8::ObjectTemplate> global_template, v8::ExtensionConfiguration* extensions); - ~Genesis(); + ~Genesis() { } Handle<Context> result() { return result_; } Genesis* previous() { return previous_; } - static Genesis* current() { return current_; } - - // Support for thread preemption. - static int ArchiveSpacePerThread(); - static char* ArchiveState(char* to); - static char* RestoreState(char* from); private: Handle<Context> global_context_; @@ -216,18 +195,46 @@ class Genesis BASE_EMBEDDED { // triggered during environment creation there may be weak handle // processing callbacks which may create new environments. Genesis* previous_; - static Genesis* current_; Handle<Context> global_context() { return global_context_; } - void CreateRoots(v8::Handle<v8::ObjectTemplate> global_template, - Handle<Object> global_object); + // Creates some basic objects. Used for creating a context from scratch. + void CreateRoots(); + // Creates the empty function. Used for creating a context from scratch. + Handle<JSFunction> CreateEmptyFunction(); + // Creates the global objects using the global and the template passed in + // through the API. We call this regardless of whether we are building a + // context from scratch or using a deserialized one from the partial snapshot + // but in the latter case we don't use the objects it produces directly, as + // we have to used the deserialized ones that are linked together with the + // rest of the context snapshot. + Handle<JSGlobalProxy> CreateNewGlobals( + v8::Handle<v8::ObjectTemplate> global_template, + Handle<Object> global_object, + Handle<GlobalObject>* global_proxy_out); + // Hooks the given global proxy into the context. If the context was created + // by deserialization then this will unhook the global proxy that was + // deserialized, leaving the GC to pick it up. + void HookUpGlobalProxy(Handle<GlobalObject> inner_global, + Handle<JSGlobalProxy> global_proxy); + // Similarly, we want to use the inner global that has been created by the + // templates passed through the API. The inner global from the snapshot is + // detached from the other objects in the snapshot. + void HookUpInnerGlobal(Handle<GlobalObject> inner_global); + // New context initialization. Used for creating a context from scratch. + void InitializeGlobal(Handle<GlobalObject> inner_global, + Handle<JSFunction> empty_function); + // Installs the contents of the native .js files on the global objects. + // Used for creating a context from scratch. void InstallNativeFunctions(); bool InstallNatives(); - bool InstallExtensions(v8::ExtensionConfiguration* extensions); - bool InstallExtension(const char* name); - bool InstallExtension(v8::RegisteredExtension* current); - bool InstallSpecialObjects(); + // Used both for deserialized and from-scratch contexts to add the extensions + // provided. + static bool InstallExtensions(Handle<Context> global_context, + v8::ExtensionConfiguration* extensions); + static bool InstallExtension(const char* name); + static bool InstallExtension(v8::RegisteredExtension* current); + static void InstallSpecialObjects(Handle<Context> global_context); bool InstallJSBuiltins(Handle<JSBuiltinsObject> builtins); bool ConfigureApiObject(Handle<JSObject> object, Handle<ObjectTemplateInfo> object_template); @@ -251,33 +258,36 @@ class Genesis BASE_EMBEDDED { Handle<String> source, SourceCodeCache* cache, v8::Extension* extension, + Handle<Context> top_context, bool use_runtime_context); Handle<Context> result_; + Handle<JSFunction> empty_function_; + BootstrapperActive active_; + friend class Bootstrapper; }; -Genesis* Genesis::current_ = NULL; - void Bootstrapper::Iterate(ObjectVisitor* v) { - natives_cache.Iterate(v); - v->Synchronize("NativesCache"); extensions_cache.Iterate(v); v->Synchronize("Extensions"); } -bool Bootstrapper::IsActive() { - return Genesis::current() != NULL; -} - - Handle<Context> Bootstrapper::CreateEnvironment( Handle<Object> global_object, v8::Handle<v8::ObjectTemplate> global_template, v8::ExtensionConfiguration* extensions) { + HandleScope scope; + Handle<Context> env; Genesis genesis(global_object, global_template, extensions); - return genesis.result(); + env = genesis.result(); + if (!env.is_null()) { + if (InstallExtensions(env, extensions)) { + return env; + } + } + return Handle<Context>(); } @@ -299,9 +309,14 @@ void Bootstrapper::DetachGlobal(Handle<Context> env) { } -Genesis::~Genesis() { - ASSERT(current_ == this); - current_ = previous_; +void Bootstrapper::ReattachGlobal(Handle<Context> env, + Handle<Object> global_object) { + ASSERT(global_object->IsJSGlobalProxy()); + Handle<JSGlobalProxy> global = Handle<JSGlobalProxy>::cast(global_object); + env->global()->set_global_receiver(*global); + env->set_global_proxy(*global); + SetObjectPrototype(global, Handle<JSObject>(env->global())); + global->set_context(*env); } @@ -384,22 +399,7 @@ Handle<DescriptorArray> Genesis::ComputeFunctionInstanceDescriptor( } -void Genesis::CreateRoots(v8::Handle<v8::ObjectTemplate> global_template, - Handle<Object> global_object) { - HandleScope scope; - // Allocate the global context FixedArray first and then patch the - // closure and extension object later (we need the empty function - // and the global object, but in order to create those, we need the - // global context). - global_context_ = - Handle<Context>::cast( - GlobalHandles::Create(*Factory::NewGlobalContext())); - Top::set_context(*global_context()); - - // Allocate the message listeners object. - v8::NeanderArray listeners; - global_context()->set_message_listeners(*listeners.value()); - +Handle<JSFunction> Genesis::CreateEmptyFunction() { // Allocate the map for function instances. Handle<Map> fm = Factory::NewMap(JS_FUNCTION_TYPE, JSFunction::kSize); global_context()->set_function_instance_map(*fm); @@ -443,137 +443,195 @@ void Genesis::CreateRoots(v8::Handle<v8::ObjectTemplate> global_template, Handle<JSFunction> empty_function = Factory::NewFunction(symbol, Factory::null_value()); - { // --- E m p t y --- - Handle<Code> code = - Handle<Code>(Builtins::builtin(Builtins::EmptyFunction)); - empty_function->set_code(*code); - Handle<String> source = Factory::NewStringFromAscii(CStrVector("() {}")); - Handle<Script> script = Factory::NewScript(source); - script->set_type(Smi::FromInt(Script::TYPE_NATIVE)); - empty_function->shared()->set_script(*script); - empty_function->shared()->set_start_position(0); - empty_function->shared()->set_end_position(source->length()); - empty_function->shared()->DontAdaptArguments(); - global_context()->function_map()->set_prototype(*empty_function); - global_context()->function_instance_map()->set_prototype(*empty_function); - - // Allocate the function map first and then patch the prototype later - Handle<Map> empty_fm = Factory::CopyMapDropDescriptors(fm); - empty_fm->set_instance_descriptors(*function_map_descriptors); - empty_fm->set_prototype(global_context()->object_function()->prototype()); - empty_function->set_map(*empty_fm); - } + // --- E m p t y --- + Handle<Code> code = + Handle<Code>(Builtins::builtin(Builtins::EmptyFunction)); + empty_function->set_code(*code); + Handle<String> source = Factory::NewStringFromAscii(CStrVector("() {}")); + Handle<Script> script = Factory::NewScript(source); + script->set_type(Smi::FromInt(Script::TYPE_NATIVE)); + empty_function->shared()->set_script(*script); + empty_function->shared()->set_start_position(0); + empty_function->shared()->set_end_position(source->length()); + empty_function->shared()->DontAdaptArguments(); + global_context()->function_map()->set_prototype(*empty_function); + global_context()->function_instance_map()->set_prototype(*empty_function); - { // --- G l o b a l --- - // Step 1: create a fresh inner JSGlobalObject - Handle<GlobalObject> object; - { - Handle<JSFunction> js_global_function; - Handle<ObjectTemplateInfo> js_global_template; - if (!global_template.IsEmpty()) { - // Get prototype template of the global_template - Handle<ObjectTemplateInfo> data = - v8::Utils::OpenHandle(*global_template); - Handle<FunctionTemplateInfo> global_constructor = - Handle<FunctionTemplateInfo>( - FunctionTemplateInfo::cast(data->constructor())); - Handle<Object> proto_template(global_constructor->prototype_template()); - if (!proto_template->IsUndefined()) { - js_global_template = - Handle<ObjectTemplateInfo>::cast(proto_template); - } - } + // Allocate the function map first and then patch the prototype later + Handle<Map> empty_fm = Factory::CopyMapDropDescriptors(fm); + empty_fm->set_instance_descriptors(*function_map_descriptors); + empty_fm->set_prototype(global_context()->object_function()->prototype()); + empty_function->set_map(*empty_fm); + return empty_function; +} - if (js_global_template.is_null()) { - Handle<String> name = Handle<String>(Heap::empty_symbol()); - Handle<Code> code = Handle<Code>(Builtins::builtin(Builtins::Illegal)); - js_global_function = - Factory::NewFunction(name, JS_GLOBAL_OBJECT_TYPE, - JSGlobalObject::kSize, code, true); - // Change the constructor property of the prototype of the - // hidden global function to refer to the Object function. - Handle<JSObject> prototype = - Handle<JSObject>( - JSObject::cast(js_global_function->instance_prototype())); - SetProperty(prototype, Factory::constructor_symbol(), - Top::object_function(), NONE); - } else { - Handle<FunctionTemplateInfo> js_global_constructor( - FunctionTemplateInfo::cast(js_global_template->constructor())); - js_global_function = - Factory::CreateApiFunction(js_global_constructor, - Factory::InnerGlobalObject); - } - js_global_function->initial_map()->set_is_hidden_prototype(); - object = Factory::NewGlobalObject(js_global_function); - } +void Genesis::CreateRoots() { + // Allocate the global context FixedArray first and then patch the + // closure and extension object later (we need the empty function + // and the global object, but in order to create those, we need the + // global context). + global_context_ = + Handle<Context>::cast( + GlobalHandles::Create(*Factory::NewGlobalContext())); + Top::set_context(*global_context()); - // Set the global context for the global object. - object->set_global_context(*global_context()); - - // Step 2: create or re-initialize the global proxy object. - Handle<JSGlobalProxy> global_proxy; - { - Handle<JSFunction> global_proxy_function; - if (global_template.IsEmpty()) { - Handle<String> name = Handle<String>(Heap::empty_symbol()); - Handle<Code> code = Handle<Code>(Builtins::builtin(Builtins::Illegal)); - global_proxy_function = - Factory::NewFunction(name, JS_GLOBAL_PROXY_TYPE, - JSGlobalProxy::kSize, code, true); - } else { - Handle<ObjectTemplateInfo> data = - v8::Utils::OpenHandle(*global_template); - Handle<FunctionTemplateInfo> global_constructor( - FunctionTemplateInfo::cast(data->constructor())); - global_proxy_function = - Factory::CreateApiFunction(global_constructor, - Factory::OuterGlobalObject); - } + // Allocate the message listeners object. + { + v8::NeanderArray listeners; + global_context()->set_message_listeners(*listeners.value()); + } +} - Handle<String> global_name = Factory::LookupAsciiSymbol("global"); - global_proxy_function->shared()->set_instance_class_name(*global_name); - global_proxy_function->initial_map()->set_is_access_check_needed(true); - - // Set global_proxy.__proto__ to js_global after ConfigureGlobalObjects - - if (global_object.location() != NULL) { - ASSERT(global_object->IsJSGlobalProxy()); - global_proxy = - ReinitializeJSGlobalProxy( - global_proxy_function, - Handle<JSGlobalProxy>::cast(global_object)); - } else { - global_proxy = Handle<JSGlobalProxy>::cast( - Factory::NewJSObject(global_proxy_function, TENURED)); - } - // Security setup: Set the security token of the global object to - // its the inner global. This makes the security check between two - // different contexts fail by default even in case of global - // object reinitialization. - object->set_global_receiver(*global_proxy); - global_proxy->set_context(*global_context()); +Handle<JSGlobalProxy> Genesis::CreateNewGlobals( + v8::Handle<v8::ObjectTemplate> global_template, + Handle<Object> global_object, + Handle<GlobalObject>* inner_global_out) { + // The argument global_template aka data is an ObjectTemplateInfo. + // It has a constructor pointer that points at global_constructor which is a + // FunctionTemplateInfo. + // The global_constructor is used to create or reinitialize the global_proxy. + // The global_constructor also has a prototype_template pointer that points at + // js_global_template which is an ObjectTemplateInfo. + // That in turn has a constructor pointer that points at + // js_global_constructor which is a FunctionTemplateInfo. + // js_global_constructor is used to make js_global_function + // js_global_function is used to make the new inner_global. + // + // --- G l o b a l --- + // Step 1: Create a fresh inner JSGlobalObject. + Handle<JSFunction> js_global_function; + Handle<ObjectTemplateInfo> js_global_template; + if (!global_template.IsEmpty()) { + // Get prototype template of the global_template. + Handle<ObjectTemplateInfo> data = + v8::Utils::OpenHandle(*global_template); + Handle<FunctionTemplateInfo> global_constructor = + Handle<FunctionTemplateInfo>( + FunctionTemplateInfo::cast(data->constructor())); + Handle<Object> proto_template(global_constructor->prototype_template()); + if (!proto_template->IsUndefined()) { + js_global_template = + Handle<ObjectTemplateInfo>::cast(proto_template); } + } - { // --- G l o b a l C o n t e x t --- - // use the empty function as closure (no scope info) - global_context()->set_closure(*empty_function); - global_context()->set_fcontext(*global_context()); - global_context()->set_previous(NULL); - - // set extension and global object - global_context()->set_extension(*object); - global_context()->set_global(*object); - global_context()->set_global_proxy(*global_proxy); - // use inner global object as security token by default - global_context()->set_security_token(*object); - } + if (js_global_template.is_null()) { + Handle<String> name = Handle<String>(Heap::empty_symbol()); + Handle<Code> code = Handle<Code>(Builtins::builtin(Builtins::Illegal)); + js_global_function = + Factory::NewFunction(name, JS_GLOBAL_OBJECT_TYPE, + JSGlobalObject::kSize, code, true); + // Change the constructor property of the prototype of the + // hidden global function to refer to the Object function. + Handle<JSObject> prototype = + Handle<JSObject>( + JSObject::cast(js_global_function->instance_prototype())); + SetProperty(prototype, Factory::constructor_symbol(), + Top::object_function(), NONE); + } else { + Handle<FunctionTemplateInfo> js_global_constructor( + FunctionTemplateInfo::cast(js_global_template->constructor())); + js_global_function = + Factory::CreateApiFunction(js_global_constructor, + Factory::InnerGlobalObject); + } + + js_global_function->initial_map()->set_is_hidden_prototype(); + Handle<GlobalObject> inner_global = + Factory::NewGlobalObject(js_global_function); + if (inner_global_out != NULL) { + *inner_global_out = inner_global; + } + + // Step 2: create or re-initialize the global proxy object. + Handle<JSFunction> global_proxy_function; + if (global_template.IsEmpty()) { + Handle<String> name = Handle<String>(Heap::empty_symbol()); + Handle<Code> code = Handle<Code>(Builtins::builtin(Builtins::Illegal)); + global_proxy_function = + Factory::NewFunction(name, JS_GLOBAL_PROXY_TYPE, + JSGlobalProxy::kSize, code, true); + } else { + Handle<ObjectTemplateInfo> data = + v8::Utils::OpenHandle(*global_template); + Handle<FunctionTemplateInfo> global_constructor( + FunctionTemplateInfo::cast(data->constructor())); + global_proxy_function = + Factory::CreateApiFunction(global_constructor, + Factory::OuterGlobalObject); + } + + Handle<String> global_name = Factory::LookupAsciiSymbol("global"); + global_proxy_function->shared()->set_instance_class_name(*global_name); + global_proxy_function->initial_map()->set_is_access_check_needed(true); + + // Set global_proxy.__proto__ to js_global after ConfigureGlobalObjects + // Return the global proxy. - Handle<JSObject> global = Handle<JSObject>(global_context()->global()); - SetProperty(global, object_name, Top::object_function(), DONT_ENUM); + if (global_object.location() != NULL) { + ASSERT(global_object->IsJSGlobalProxy()); + return ReinitializeJSGlobalProxy( + global_proxy_function, + Handle<JSGlobalProxy>::cast(global_object)); + } else { + return Handle<JSGlobalProxy>::cast( + Factory::NewJSObject(global_proxy_function, TENURED)); } +} + + +void Genesis::HookUpGlobalProxy(Handle<GlobalObject> inner_global, + Handle<JSGlobalProxy> global_proxy) { + // Set the global context for the global object. + inner_global->set_global_context(*global_context()); + inner_global->set_global_receiver(*global_proxy); + global_proxy->set_context(*global_context()); + global_context()->set_global_proxy(*global_proxy); +} + + +void Genesis::HookUpInnerGlobal(Handle<GlobalObject> inner_global) { + Handle<GlobalObject> inner_global_from_snapshot( + GlobalObject::cast(global_context_->extension())); + Handle<JSBuiltinsObject> builtins_global(global_context_->builtins()); + global_context_->set_extension(*inner_global); + global_context_->set_global(*inner_global); + global_context_->set_security_token(*inner_global); + static const PropertyAttributes attributes = + static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE); + ForceSetProperty(builtins_global, + Factory::LookupAsciiSymbol("global"), + inner_global, + attributes); + // Setup the reference from the global object to the builtins object. + JSGlobalObject::cast(*inner_global)->set_builtins(*builtins_global); + TransferNamedProperties(inner_global_from_snapshot, inner_global); + TransferIndexedProperties(inner_global_from_snapshot, inner_global); +} + + +// This is only called if we are not using snapshots. The equivalent +// work in the snapshot case is done in HookUpInnerGlobal. +void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global, + Handle<JSFunction> empty_function) { + // --- G l o b a l C o n t e x t --- + // Use the empty function as closure (no scope info). + global_context()->set_closure(*empty_function); + global_context()->set_fcontext(*global_context()); + global_context()->set_previous(NULL); + // Set extension and global object. + global_context()->set_extension(*inner_global); + global_context()->set_global(*inner_global); + // Security setup: Set the security token of the global object to + // its the inner global. This makes the security check between two + // different contexts fail by default even in case of global + // object reinitialization. + global_context()->set_security_token(*inner_global); + + Handle<String> object_name = Handle<String>(Heap::Object_symbol()); + SetProperty(inner_global, object_name, Top::object_function(), DONT_ENUM); Handle<JSObject> global = Handle<JSObject>(global_context()->global()); @@ -791,8 +849,12 @@ bool Genesis::CompileNative(Vector<const char> name, Handle<String> source) { #ifdef ENABLE_DEBUGGER_SUPPORT Debugger::set_compiling_natives(true); #endif - bool result = - CompileScriptCached(name, source, &natives_cache, NULL, true); + bool result = CompileScriptCached(name, + source, + NULL, + NULL, + Handle<Context>(Top::context()), + true); ASSERT(Top::has_pending_exception() != result); if (!result) Top::clear_pending_exception(); #ifdef ENABLE_DEBUGGER_SUPPORT @@ -806,46 +868,46 @@ bool Genesis::CompileScriptCached(Vector<const char> name, Handle<String> source, SourceCodeCache* cache, v8::Extension* extension, + Handle<Context> top_context, bool use_runtime_context) { HandleScope scope; - Handle<JSFunction> boilerplate; + Handle<SharedFunctionInfo> function_info; // If we can't find the function in the cache, we compile a new // function and insert it into the cache. - if (!cache->Lookup(name, &boilerplate)) { + if (cache == NULL || !cache->Lookup(name, &function_info)) { ASSERT(source->IsAsciiRepresentation()); Handle<String> script_name = Factory::NewStringFromUtf8(name); - boilerplate = - Compiler::Compile( - source, - script_name, - 0, - 0, - extension, - NULL, - Handle<String>::null(), - use_runtime_context ? NATIVES_CODE : NOT_NATIVES_CODE); - if (boilerplate.is_null()) return false; - cache->Add(name, boilerplate); + function_info = Compiler::Compile( + source, + script_name, + 0, + 0, + extension, + NULL, + Handle<String>::null(), + use_runtime_context ? NATIVES_CODE : NOT_NATIVES_CODE); + if (function_info.is_null()) return false; + if (cache != NULL) cache->Add(name, function_info); } // Setup the function context. Conceptually, we should clone the // function before overwriting the context but since we're in a // single-threaded environment it is not strictly necessary. - ASSERT(Top::context()->IsGlobalContext()); + ASSERT(top_context->IsGlobalContext()); Handle<Context> context = Handle<Context>(use_runtime_context - ? Top::context()->runtime_context() - : Top::context()); + ? Handle<Context>(top_context->runtime_context()) + : top_context); Handle<JSFunction> fun = - Factory::NewFunctionFromBoilerplate(boilerplate, context); + Factory::NewFunctionFromSharedFunctionInfo(function_info, context); // Call function using either the runtime object or the global // object as the receiver. Provide no parameters. Handle<Object> receiver = Handle<Object>(use_runtime_context - ? Top::context()->builtins() - : Top::context()->global()); + ? top_context->builtins() + : top_context->global()); bool has_pending_exception; Handle<Object> result = Execution::Call(fun, receiver, 0, NULL, &has_pending_exception); @@ -1047,7 +1109,7 @@ bool Genesis::InstallNatives() { // Allocate the empty script. Handle<Script> script = Factory::NewScript(Factory::empty_string()); script->set_type(Smi::FromInt(Script::TYPE_NATIVE)); - global_context()->set_empty_script(*script); + Heap::public_set_empty_script(*script); } { // Builtin function for OpaqueReference -- a JSValue-based object, @@ -1063,48 +1125,23 @@ bool Genesis::InstallNatives() { global_context()->set_opaque_reference_function(*opaque_reference_fun); } - if (FLAG_natives_file == NULL) { - // Without natives file, install default natives. - for (int i = Natives::GetDelayCount(); - i < Natives::GetBuiltinsCount(); - i++) { - if (!CompileBuiltin(i)) return false; - // TODO(ager): We really only need to install the JS builtin - // functions on the builtins object after compiling and running - // runtime.js. - if (!InstallJSBuiltins(builtins)) return false; - } - - // Setup natives with lazy loading. - SetupLazy(Handle<JSFunction>(global_context()->date_function()), - Natives::GetIndex("date"), - Top::global_context(), - Handle<Context>(Top::context()->runtime_context())); - SetupLazy(Handle<JSFunction>(global_context()->regexp_function()), - Natives::GetIndex("regexp"), - Top::global_context(), - Handle<Context>(Top::context()->runtime_context())); - SetupLazy(Handle<JSObject>(global_context()->json_object()), - Natives::GetIndex("json"), - Top::global_context(), - Handle<Context>(Top::context()->runtime_context())); - - } else if (strlen(FLAG_natives_file) != 0) { - // Otherwise install natives from natives file if file exists and - // compiles. - bool exists; - Vector<const char> source = ReadFile(FLAG_natives_file, &exists); - Handle<String> source_string = Factory::NewStringFromAscii(source); - if (source.is_empty()) return false; - bool result = CompileNative(CStrVector(FLAG_natives_file), source_string); - if (!result) return false; - - } else { - // Empty natives file name - do not install any natives. + if (FLAG_disable_native_files) { PrintF("Warning: Running without installed natives!\n"); return true; } + // Install natives. + for (int i = Natives::GetDebuggerCount(); + i < Natives::GetBuiltinsCount(); + i++) { + Vector<const char> name = Natives::GetScriptName(i); + if (!CompileBuiltin(i)) return false; + // TODO(ager): We really only need to install the JS builtin + // functions on the builtins object after compiling and running + // runtime.js. + if (!InstallJSBuiltins(builtins)) return false; + } + InstallNativeFunctions(); // Install Function.prototype.call and apply. @@ -1143,14 +1180,29 @@ bool Genesis::InstallNatives() { #ifdef DEBUG builtins->Verify(); #endif + + return true; +} + + +int BootstrapperActive::nesting_ = 0; + + +bool Bootstrapper::InstallExtensions(Handle<Context> global_context, + v8::ExtensionConfiguration* extensions) { + BootstrapperActive active; + SaveContext saved_context; + Top::set_context(*global_context); + if (!Genesis::InstallExtensions(global_context, extensions)) return false; + Genesis::InstallSpecialObjects(global_context); return true; } -bool Genesis::InstallSpecialObjects() { +void Genesis::InstallSpecialObjects(Handle<Context> global_context) { HandleScope scope; Handle<JSGlobalObject> js_global( - JSGlobalObject::cast(global_context()->global())); + JSGlobalObject::cast(global_context->global())); // Expose the natives in global if a name for it is specified. if (FLAG_expose_natives_as != NULL && strlen(FLAG_expose_natives_as) != 0) { Handle<String> natives_string = @@ -1173,13 +1225,12 @@ bool Genesis::InstallSpecialObjects() { if (FLAG_expose_debug_as != NULL && strlen(FLAG_expose_debug_as) != 0) { // If loading fails we just bail out without installing the // debugger but without tanking the whole context. - if (!Debug::Load()) - return true; + if (!Debug::Load()) return; // Set the security token for the debugger context to the same as // the shell global context to allow calling between these (otherwise // exposing debug global object doesn't make much sense). Debug::debug_context()->set_security_token( - global_context()->security_token()); + global_context->security_token()); Handle<String> debug_string = Factory::LookupAsciiSymbol(FLAG_expose_debug_as); @@ -1187,19 +1238,18 @@ bool Genesis::InstallSpecialObjects() { Handle<Object>(Debug::debug_context()->global_proxy()), DONT_ENUM); } #endif - - return true; } -bool Genesis::InstallExtensions(v8::ExtensionConfiguration* extensions) { +bool Genesis::InstallExtensions(Handle<Context> global_context, + v8::ExtensionConfiguration* extensions) { // Clear coloring of extension list v8::RegisteredExtension* current = v8::RegisteredExtension::first_extension(); while (current != NULL) { current->set_state(v8::UNVISITED); current = current->next(); } - // Install auto extensions + // Install auto extensions. current = v8::RegisteredExtension::first_extension(); while (current != NULL) { if (current->extension()->auto_enable()) @@ -1263,7 +1313,9 @@ bool Genesis::InstallExtension(v8::RegisteredExtension* current) { Handle<String> source_code = Factory::NewStringFromAscii(source); bool result = CompileScriptCached(CStrVector(extension->name()), source_code, - &extensions_cache, extension, + &extensions_cache, + extension, + Handle<Context>(Top::context()), false); ASSERT(Top::has_pending_exception() != result); if (!result) { @@ -1294,7 +1346,7 @@ bool Genesis::ConfigureGlobalObjects( v8::Handle<v8::ObjectTemplate> global_proxy_template) { Handle<JSObject> global_proxy( JSObject::cast(global_context()->global_proxy())); - Handle<JSObject> js_global(JSObject::cast(global_context()->global())); + Handle<JSObject> inner_global(JSObject::cast(global_context()->global())); if (!global_proxy_template.IsEmpty()) { // Configure the global proxy object. @@ -1308,11 +1360,11 @@ bool Genesis::ConfigureGlobalObjects( if (!proxy_constructor->prototype_template()->IsUndefined()) { Handle<ObjectTemplateInfo> inner_data( ObjectTemplateInfo::cast(proxy_constructor->prototype_template())); - if (!ConfigureApiObject(js_global, inner_data)) return false; + if (!ConfigureApiObject(inner_global, inner_data)) return false; } } - SetObjectPrototype(global_proxy, js_global); + SetObjectPrototype(global_proxy, inner_global); return true; } @@ -1366,15 +1418,13 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from, // If the property is already there we skip it if (result.IsProperty()) continue; HandleScope inner; - Handle<DescriptorArray> inst_descs = - Handle<DescriptorArray>(to->map()->instance_descriptors()); + ASSERT(!to->HasFastProperties()); + // Add to dictionary. Handle<String> key = Handle<String>(descs->GetKey(i)); - Handle<Object> entry = Handle<Object>(descs->GetCallbacksObject(i)); - inst_descs = Factory::CopyAppendProxyDescriptor(inst_descs, - key, - entry, - details.attributes()); - to->map()->set_instance_descriptors(*inst_descs); + Handle<Object> callbacks(descs->GetCallbacksObject(i)); + PropertyDetails d = + PropertyDetails(details.attributes(), CALLBACKS, details.index()); + SetNormalizedProperty(to, key, callbacks, d); break; } case MAP_TRANSITION: @@ -1459,32 +1509,51 @@ void Genesis::MakeFunctionInstancePrototypeWritable() { Genesis::Genesis(Handle<Object> global_object, v8::Handle<v8::ObjectTemplate> global_template, v8::ExtensionConfiguration* extensions) { - // Link this genesis object into the stacked genesis chain. This - // must be done before any early exits because the destructor - // will always do unlinking. - previous_ = current_; - current_ = this; result_ = Handle<Context>::null(); - // If V8 isn't running and cannot be initialized, just return. if (!V8::IsRunning() && !V8::Initialize(NULL)) return; // Before creating the roots we must save the context and restore it // on all function exits. HandleScope scope; - SaveContext context; - - CreateRoots(global_template, global_object); - - if (!InstallNatives()) return; - - MakeFunctionInstancePrototypeWritable(); - - if (!ConfigureGlobalObjects(global_template)) return; - - if (!InstallExtensions(extensions)) return; - - if (!InstallSpecialObjects()) return; + SaveContext saved_context; + + Handle<Context> new_context = Snapshot::NewContextFromSnapshot(); + if (!new_context.is_null()) { + global_context_ = + Handle<Context>::cast(GlobalHandles::Create(*new_context)); + Top::set_context(*global_context_); + i::Counters::contexts_created_by_snapshot.Increment(); + result_ = global_context_; + JSFunction* empty_function = + JSFunction::cast(result_->function_map()->prototype()); + empty_function_ = Handle<JSFunction>(empty_function); + Handle<GlobalObject> inner_global; + Handle<JSGlobalProxy> global_proxy = + CreateNewGlobals(global_template, + global_object, + &inner_global); + + HookUpGlobalProxy(inner_global, global_proxy); + HookUpInnerGlobal(inner_global); + + if (!ConfigureGlobalObjects(global_template)) return; + } else { + // We get here if there was no context snapshot. + CreateRoots(); + Handle<JSFunction> empty_function = CreateEmptyFunction(); + Handle<GlobalObject> inner_global; + Handle<JSGlobalProxy> global_proxy = + CreateNewGlobals(global_template, global_object, &inner_global); + HookUpGlobalProxy(inner_global, global_proxy); + InitializeGlobal(inner_global, empty_function); + if (!InstallNatives()) return; + + MakeFunctionInstancePrototypeWritable(); + + if (!ConfigureGlobalObjects(global_template)) return; + i::Counters::contexts_created_from_scratch.Increment(); + } result_ = global_context_; } @@ -1494,46 +1563,46 @@ Genesis::Genesis(Handle<Object> global_object, // Reserve space for statics needing saving and restoring. int Bootstrapper::ArchiveSpacePerThread() { - return Genesis::ArchiveSpacePerThread(); + return BootstrapperActive::ArchiveSpacePerThread(); } // Archive statics that are thread local. char* Bootstrapper::ArchiveState(char* to) { - return Genesis::ArchiveState(to); + return BootstrapperActive::ArchiveState(to); } // Restore statics that are thread local. char* Bootstrapper::RestoreState(char* from) { - return Genesis::RestoreState(from); + return BootstrapperActive::RestoreState(from); } // Called when the top-level V8 mutex is destroyed. void Bootstrapper::FreeThreadResources() { - ASSERT(Genesis::current() == NULL); + ASSERT(!BootstrapperActive::IsActive()); } // Reserve space for statics needing saving and restoring. -int Genesis::ArchiveSpacePerThread() { - return sizeof(current_); +int BootstrapperActive::ArchiveSpacePerThread() { + return sizeof(nesting_); } // Archive statics that are thread local. -char* Genesis::ArchiveState(char* to) { - *reinterpret_cast<Genesis**>(to) = current_; - current_ = NULL; - return to + sizeof(current_); +char* BootstrapperActive::ArchiveState(char* to) { + *reinterpret_cast<int*>(to) = nesting_; + nesting_ = 0; + return to + sizeof(nesting_); } // Restore statics that are thread local. -char* Genesis::RestoreState(char* from) { - current_ = *reinterpret_cast<Genesis**>(from); - return from + sizeof(current_); +char* BootstrapperActive::RestoreState(char* from) { + nesting_ = *reinterpret_cast<int*>(from); + return from + sizeof(nesting_); } } } // namespace v8::internal diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h index cc775b284d..66b8ff478e 100644 --- a/deps/v8/src/bootstrapper.h +++ b/deps/v8/src/bootstrapper.h @@ -32,6 +32,24 @@ namespace v8 { namespace internal { + +class BootstrapperActive BASE_EMBEDDED { + public: + BootstrapperActive() { nesting_++; } + ~BootstrapperActive() { nesting_--; } + + // Support for thread preemption. + static int ArchiveSpacePerThread(); + static char* ArchiveState(char* to); + static char* RestoreState(char* from); + + private: + static bool IsActive() { return nesting_ != 0; } + static int nesting_; + friend class Bootstrapper; +}; + + // The Boostrapper is the public interface for creating a JavaScript global // context. class Bootstrapper : public AllStatic { @@ -50,17 +68,17 @@ class Bootstrapper : public AllStatic { // Detach the environment from its outer global object. static void DetachGlobal(Handle<Context> env); + // Reattach an outer global object to an environment. + static void ReattachGlobal(Handle<Context> env, Handle<Object> global_object); + // Traverses the pointers for memory management. static void Iterate(ObjectVisitor* v); - // Accessors for the native scripts cache. Used in lazy loading. + // Accessor for the native scripts source code. static Handle<String> NativesSourceLookup(int index); - static bool NativesCacheLookup(Vector<const char> name, - Handle<JSFunction>* handle); - static void NativesCacheAdd(Vector<const char> name, Handle<JSFunction> fun); // Tells whether bootstrapping is active. - static bool IsActive(); + static bool IsActive() { return BootstrapperActive::IsActive(); } // Encoding/decoding support for fixup flags. class FixupFlagsUseCodeObject: public BitField<bool, 0, 1> {}; @@ -75,6 +93,10 @@ class Bootstrapper : public AllStatic { // This will allocate a char array that is deleted when V8 is shut down. // It should only be used for strictly finite allocations. static char* AllocateAutoDeletedArray(int bytes); + + // Used for new context creation. + static bool InstallExtensions(Handle<Context> global_context, + v8::ExtensionConfiguration* extensions); }; diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc index 91cb151520..122fbba2c4 100644 --- a/deps/v8/src/builtins.cc +++ b/deps/v8/src/builtins.cc @@ -443,6 +443,38 @@ BUILTIN(ArrayPop) { } +static FixedArray* LeftTrimFixedArray(FixedArray* elms) { + // For now this trick is only applied to fixed arrays in new space. + // In large object space the object's start must coincide with chunk + // and thus the trick is just not applicable. + // In old space we do not use this trick to avoid dealing with + // remembered sets. + ASSERT(Heap::new_space()->Contains(elms)); + + Object** former_map = + HeapObject::RawField(elms, FixedArray::kMapOffset); + Object** former_length = + HeapObject::RawField(elms, FixedArray::kLengthOffset); + Object** former_first = + HeapObject::RawField(elms, FixedArray::kHeaderSize); + // Check that we don't forget to copy all the bits. + STATIC_ASSERT(FixedArray::kMapOffset + 2 * kPointerSize + == FixedArray::kHeaderSize); + + int len = elms->length(); + + *former_first = reinterpret_cast<Object*>(len - 1); + *former_length = Heap::fixed_array_map(); + // Technically in new space this write might be omitted (except for + // debug mode which iterates through the heap), but to play safer + // we still do it. + *former_map = Heap::raw_unchecked_one_pointer_filler_map(); + + ASSERT(elms->address() + kPointerSize == (elms + kPointerSize)->address()); + return elms + kPointerSize; +} + + BUILTIN(ArrayShift) { Object* receiver = *args.receiver(); FixedArray* elms = NULL; @@ -462,10 +494,14 @@ BUILTIN(ArrayShift) { first = Heap::undefined_value(); } - // Shift the elements. - AssertNoAllocation no_gc; - MoveElements(&no_gc, elms, 0, elms, 1, len - 1); - elms->set(len - 1, Heap::the_hole_value()); + if (Heap::new_space()->Contains(elms)) { + array->set_elements(LeftTrimFixedArray(elms)); + } else { + // Shift the elements. + AssertNoAllocation no_gc; + MoveElements(&no_gc, elms, 0, elms, 1, len - 1); + elms->set(len - 1, Heap::the_hole_value()); + } // Set the length. array->set_length(Smi::FromInt(len - 1)); diff --git a/deps/v8/src/circular-queue-inl.h b/deps/v8/src/circular-queue-inl.h index ffe8fb003e..962b069fb0 100644 --- a/deps/v8/src/circular-queue-inl.h +++ b/deps/v8/src/circular-queue-inl.h @@ -82,11 +82,10 @@ Record* CircularQueue<Record>::Next(Record* curr) { void* SamplingCircularQueue::Enqueue() { - Cell* enqueue_pos = reinterpret_cast<Cell*>( - Thread::GetThreadLocal(producer_key_)); - WrapPositionIfNeeded(&enqueue_pos); - Thread::SetThreadLocal(producer_key_, enqueue_pos + record_size_); - return enqueue_pos; + WrapPositionIfNeeded(&producer_pos_->enqueue_pos); + void* result = producer_pos_->enqueue_pos; + producer_pos_->enqueue_pos += record_size_; + return result; } diff --git a/deps/v8/src/circular-queue.cc b/deps/v8/src/circular-queue.cc index 5f7a33eb3a..a7c25323e8 100644 --- a/deps/v8/src/circular-queue.cc +++ b/deps/v8/src/circular-queue.cc @@ -52,52 +52,44 @@ SamplingCircularQueue::SamplingCircularQueue(int record_size_in_bytes, buffer_[i] = kClear; } buffer_[buffer_size_] = kEnd; + + // Layout producer and consumer position pointers each on their own + // cache lines to avoid cache lines thrashing due to simultaneous + // updates of positions by different processor cores. + const int positions_size = + RoundUp(1, kProcessorCacheLineSize) + + RoundUp(sizeof(ProducerPosition), kProcessorCacheLineSize) + + RoundUp(sizeof(ConsumerPosition), kProcessorCacheLineSize); + positions_ = NewArray<byte>(positions_size); + + producer_pos_ = reinterpret_cast<ProducerPosition*>( + RoundUp(positions_, kProcessorCacheLineSize)); + producer_pos_->enqueue_pos = buffer_; + + consumer_pos_ = reinterpret_cast<ConsumerPosition*>( + reinterpret_cast<byte*>(producer_pos_) + kProcessorCacheLineSize); + ASSERT(reinterpret_cast<byte*>(consumer_pos_ + 1) <= + positions_ + positions_size); + consumer_pos_->dequeue_chunk_pos = buffer_; + consumer_pos_->dequeue_chunk_poll_pos = buffer_ + producer_consumer_distance_; + consumer_pos_->dequeue_pos = NULL; } SamplingCircularQueue::~SamplingCircularQueue() { + DeleteArray(positions_); DeleteArray(buffer_); } -void SamplingCircularQueue::SetUpProducer() { - producer_key_ = Thread::CreateThreadLocalKey(); - Thread::SetThreadLocal(producer_key_, buffer_); -} - - -void SamplingCircularQueue::TearDownProducer() { - Thread::DeleteThreadLocalKey(producer_key_); -} - - -void SamplingCircularQueue::SetUpConsumer() { - consumer_key_ = Thread::CreateThreadLocalKey(); - ConsumerPosition* cp = new ConsumerPosition; - cp->dequeue_chunk_pos = buffer_; - cp->dequeue_chunk_poll_pos = buffer_ + producer_consumer_distance_; - cp->dequeue_pos = NULL; - Thread::SetThreadLocal(consumer_key_, cp); -} - - -void SamplingCircularQueue::TearDownConsumer() { - delete reinterpret_cast<ConsumerPosition*>( - Thread::GetThreadLocal(consumer_key_)); - Thread::DeleteThreadLocalKey(consumer_key_); -} - - void* SamplingCircularQueue::StartDequeue() { - ConsumerPosition* cp = reinterpret_cast<ConsumerPosition*>( - Thread::GetThreadLocal(consumer_key_)); - if (cp->dequeue_pos != NULL) { - return cp->dequeue_pos; + if (consumer_pos_->dequeue_pos != NULL) { + return consumer_pos_->dequeue_pos; } else { - if (*cp->dequeue_chunk_poll_pos != kClear) { - cp->dequeue_pos = cp->dequeue_chunk_pos; - cp->dequeue_end_pos = cp->dequeue_pos + chunk_size_; - return cp->dequeue_pos; + if (*consumer_pos_->dequeue_chunk_poll_pos != kClear) { + consumer_pos_->dequeue_pos = consumer_pos_->dequeue_chunk_pos; + consumer_pos_->dequeue_end_pos = consumer_pos_->dequeue_pos + chunk_size_; + return consumer_pos_->dequeue_pos; } else { return NULL; } @@ -106,25 +98,21 @@ void* SamplingCircularQueue::StartDequeue() { void SamplingCircularQueue::FinishDequeue() { - ConsumerPosition* cp = reinterpret_cast<ConsumerPosition*>( - Thread::GetThreadLocal(consumer_key_)); - cp->dequeue_pos += record_size_; - if (cp->dequeue_pos < cp->dequeue_end_pos) return; + consumer_pos_->dequeue_pos += record_size_; + if (consumer_pos_->dequeue_pos < consumer_pos_->dequeue_end_pos) return; // Move to next chunk. - cp->dequeue_pos = NULL; - *cp->dequeue_chunk_pos = kClear; - cp->dequeue_chunk_pos += chunk_size_; - WrapPositionIfNeeded(&cp->dequeue_chunk_pos); - cp->dequeue_chunk_poll_pos += chunk_size_; - WrapPositionIfNeeded(&cp->dequeue_chunk_poll_pos); + consumer_pos_->dequeue_pos = NULL; + *consumer_pos_->dequeue_chunk_pos = kClear; + consumer_pos_->dequeue_chunk_pos += chunk_size_; + WrapPositionIfNeeded(&consumer_pos_->dequeue_chunk_pos); + consumer_pos_->dequeue_chunk_poll_pos += chunk_size_; + WrapPositionIfNeeded(&consumer_pos_->dequeue_chunk_poll_pos); } void SamplingCircularQueue::FlushResidualRecords() { - ConsumerPosition* cp = reinterpret_cast<ConsumerPosition*>( - Thread::GetThreadLocal(consumer_key_)); // Eliminate producer / consumer distance. - cp->dequeue_chunk_poll_pos = cp->dequeue_chunk_pos; + consumer_pos_->dequeue_chunk_poll_pos = consumer_pos_->dequeue_chunk_pos; } diff --git a/deps/v8/src/circular-queue.h b/deps/v8/src/circular-queue.h index 11159e0388..dce7fc2ad9 100644 --- a/deps/v8/src/circular-queue.h +++ b/deps/v8/src/circular-queue.h @@ -76,15 +76,11 @@ class SamplingCircularQueue { int buffer_size_in_chunks); ~SamplingCircularQueue(); - // Executed on the producer (sampler) or application thread. - void SetUpProducer(); // Enqueue returns a pointer to a memory location for storing the next // record. INLINE(void* Enqueue()); - void TearDownProducer(); // Executed on the consumer (analyzer) thread. - void SetUpConsumer(); // StartDequeue returns a pointer to a memory location for retrieving // the next record. After the record had been read by a consumer, // FinishDequeue must be called. Until that moment, subsequent calls @@ -95,7 +91,6 @@ class SamplingCircularQueue { // the queue must be notified whether producing has been finished in order // to process remaining records from the buffer. void FlushResidualRecords(); - void TearDownConsumer(); typedef AtomicWord Cell; // Reserved values for the first cell of a record. @@ -103,6 +98,9 @@ class SamplingCircularQueue { static const Cell kEnd = -1; // Marks the end of the buffer. private: + struct ProducerPosition { + Cell* enqueue_pos; + }; struct ConsumerPosition { Cell* dequeue_chunk_pos; Cell* dequeue_chunk_poll_pos; @@ -118,10 +116,9 @@ class SamplingCircularQueue { const int buffer_size_; const int producer_consumer_distance_; Cell* buffer_; - // Store producer and consumer data in TLS to avoid modifying the - // same CPU cache line from two threads simultaneously. - Thread::LocalStorageKey consumer_key_; - Thread::LocalStorageKey producer_key_; + byte* positions_; + ProducerPosition* producer_pos_; + ConsumerPosition* consumer_pos_; }; diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc index f9913b9c15..56d8f4bb9b 100644 --- a/deps/v8/src/codegen.cc +++ b/deps/v8/src/codegen.cc @@ -66,38 +66,6 @@ Comment::~Comment() { CodeGenerator* CodeGeneratorScope::top_ = NULL; -DeferredCode::DeferredCode() - : masm_(CodeGeneratorScope::Current()->masm()), - statement_position_(masm_->current_statement_position()), - position_(masm_->current_position()) { - ASSERT(statement_position_ != RelocInfo::kNoPosition); - ASSERT(position_ != RelocInfo::kNoPosition); - - CodeGeneratorScope::Current()->AddDeferred(this); -#ifdef DEBUG - comment_ = ""; -#endif - - // Copy the register locations from the code generator's frame. - // These are the registers that will be spilled on entry to the - // deferred code and restored on exit. - VirtualFrame* frame = CodeGeneratorScope::Current()->frame(); - int sp_offset = frame->fp_relative(frame->stack_pointer_); - for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) { - int loc = frame->register_location(i); - if (loc == VirtualFrame::kIllegalIndex) { - registers_[i] = kIgnore; - } else if (frame->elements_[loc].is_synced()) { - // Needs to be restored on exit but not saved on entry. - registers_[i] = frame->fp_relative(loc) | kSyncedFlag; - } else { - int offset = frame->fp_relative(loc); - registers_[i] = (offset < sp_offset) ? kPush : offset; - } - } -} - - void CodeGenerator::ProcessDeferred() { while (!deferred_.is_empty()) { DeferredCode* code = deferred_.RemoveLast(); @@ -336,8 +304,8 @@ void CodeGenerator::ProcessDeclarations(ZoneList<Declaration*>* declarations) { array->set_undefined(j++); } } else { - Handle<JSFunction> function = - Compiler::BuildBoilerplate(node->fun(), script(), this); + Handle<SharedFunctionInfo> function = + Compiler::BuildFunctionInfo(node->fun(), script(), this); // Check for stack-overflow exception. if (HasStackOverflow()) return; array->set(j++, *function); diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h index 40ed6cefe6..4634f4c158 100644 --- a/deps/v8/src/codegen.h +++ b/deps/v8/src/codegen.h @@ -31,7 +31,7 @@ #include "ast.h" #include "code-stubs.h" #include "runtime.h" -#include "number-info.h" +#include "type-info.h" // Include the declaration of the architecture defined class CodeGenerator. // The contract to the shared code is that the the CodeGenerator is a subclass @@ -58,7 +58,7 @@ // ProcessDeferred // Generate // ComputeLazyCompile -// BuildBoilerplate +// BuildFunctionInfo // ComputeCallInitialize // ComputeCallInitializeInLoop // ProcessDeclarations @@ -346,8 +346,13 @@ class CompareStub: public CodeStub { public: CompareStub(Condition cc, bool strict, - NaNInformation nan_info = kBothCouldBeNaN) : - cc_(cc), strict_(strict), never_nan_nan_(nan_info == kCantBothBeNaN) { } + NaNInformation nan_info = kBothCouldBeNaN, + bool include_number_compare = true) : + cc_(cc), + strict_(strict), + never_nan_nan_(nan_info == kCantBothBeNaN), + include_number_compare_(include_number_compare), + name_(NULL) { } void Generate(MacroAssembler* masm); @@ -360,6 +365,16 @@ class CompareStub: public CodeStub { // generating the minor key for other comparisons to avoid creating more // stubs. bool never_nan_nan_; + // Do generate the number comparison code in the stub. Stubs without number + // comparison code is used when the number comparison has been inlined, and + // the stub will be called if one of the operands is not a number. + bool include_number_compare_; + + // Encoding of the minor key CCCCCCCCCCCCCCNS. + class StrictField: public BitField<bool, 0, 1> {}; + class NeverNanNanField: public BitField<bool, 1, 1> {}; + class IncludeNumberCompareField: public BitField<bool, 2, 1> {}; + class ConditionField: public BitField<int, 3, 13> {}; Major MajorKey() { return Compare; } @@ -373,12 +388,16 @@ class CompareStub: public CodeStub { // Unfortunately you have to run without snapshots to see most of these // names in the profile since most compare stubs end up in the snapshot. + char* name_; const char* GetName(); #ifdef DEBUG void Print() { - PrintF("CompareStub (cc %d), (strict %s)\n", + PrintF("CompareStub (cc %d), (strict %s), " + "(never_nan_nan %s), (number_compare %s)\n", static_cast<int>(cc_), - strict_ ? "true" : "false"); + strict_ ? "true" : "false", + never_nan_nan_ ? "true" : "false", + include_number_compare_ ? "included" : "not included"); } #endif }; diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc index 378a24e750..f1ab87b085 100644 --- a/deps/v8/src/compilation-cache.cc +++ b/deps/v8/src/compilation-cache.cc @@ -28,6 +28,7 @@ #include "v8.h" #include "compilation-cache.h" +#include "serialize.h" namespace v8 { namespace internal { @@ -101,18 +102,18 @@ class CompilationCacheScript : public CompilationSubCache { explicit CompilationCacheScript(int generations) : CompilationSubCache(generations) { } - Handle<JSFunction> Lookup(Handle<String> source, - Handle<Object> name, - int line_offset, - int column_offset); - void Put(Handle<String> source, Handle<JSFunction> boilerplate); + Handle<SharedFunctionInfo> Lookup(Handle<String> source, + Handle<Object> name, + int line_offset, + int column_offset); + void Put(Handle<String> source, Handle<SharedFunctionInfo> function_info); private: // Note: Returns a new hash table if operation results in expansion. - Handle<CompilationCacheTable> TablePut(Handle<String> source, - Handle<JSFunction> boilerplate); + Handle<CompilationCacheTable> TablePut( + Handle<String> source, Handle<SharedFunctionInfo> function_info); - bool HasOrigin(Handle<JSFunction> boilerplate, + bool HasOrigin(Handle<SharedFunctionInfo> function_info, Handle<Object> name, int line_offset, int column_offset); @@ -127,17 +128,19 @@ class CompilationCacheEval: public CompilationSubCache { explicit CompilationCacheEval(int generations) : CompilationSubCache(generations) { } - Handle<JSFunction> Lookup(Handle<String> source, Handle<Context> context); + Handle<SharedFunctionInfo> Lookup(Handle<String> source, + Handle<Context> context); void Put(Handle<String> source, Handle<Context> context, - Handle<JSFunction> boilerplate); + Handle<SharedFunctionInfo> function_info); private: // Note: Returns a new hash table if operation results in expansion. - Handle<CompilationCacheTable> TablePut(Handle<String> source, - Handle<Context> context, - Handle<JSFunction> boilerplate); + Handle<CompilationCacheTable> TablePut( + Handle<String> source, + Handle<Context> context, + Handle<SharedFunctionInfo> function_info); DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheEval); }; @@ -225,12 +228,13 @@ void CompilationSubCache::Clear() { // We only re-use a cached function for some script source code if the // script originates from the same place. This is to avoid issues // when reporting errors, etc. -bool CompilationCacheScript::HasOrigin(Handle<JSFunction> boilerplate, - Handle<Object> name, - int line_offset, - int column_offset) { +bool CompilationCacheScript::HasOrigin( + Handle<SharedFunctionInfo> function_info, + Handle<Object> name, + int line_offset, + int column_offset) { Handle<Script> script = - Handle<Script>(Script::cast(boilerplate->shared()->script())); + Handle<Script>(Script::cast(function_info->script())); // If the script name isn't set, the boilerplate script should have // an undefined name to have the same origin. if (name.is_null()) { @@ -250,10 +254,10 @@ bool CompilationCacheScript::HasOrigin(Handle<JSFunction> boilerplate, // be cached in the same script generation. Currently the first use // will be cached, but subsequent code from different source / line // won't. -Handle<JSFunction> CompilationCacheScript::Lookup(Handle<String> source, - Handle<Object> name, - int line_offset, - int column_offset) { +Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(Handle<String> source, + Handle<Object> name, + int line_offset, + int column_offset) { Object* result = NULL; int generation; @@ -263,12 +267,13 @@ Handle<JSFunction> CompilationCacheScript::Lookup(Handle<String> source, for (generation = 0; generation < generations(); generation++) { Handle<CompilationCacheTable> table = GetTable(generation); Handle<Object> probe(table->Lookup(*source)); - if (probe->IsJSFunction()) { - Handle<JSFunction> boilerplate = Handle<JSFunction>::cast(probe); + if (probe->IsSharedFunctionInfo()) { + Handle<SharedFunctionInfo> function_info = + Handle<SharedFunctionInfo>::cast(probe); // Break when we've found a suitable boilerplate function that // matches the origin. - if (HasOrigin(boilerplate, name, line_offset, column_offset)) { - result = *boilerplate; + if (HasOrigin(function_info, name, line_offset, column_offset)) { + result = *function_info; break; } } @@ -290,38 +295,37 @@ Handle<JSFunction> CompilationCacheScript::Lookup(Handle<String> source, // to see if we actually found a cached script. If so, we return a // handle created in the caller's handle scope. if (result != NULL) { - Handle<JSFunction> boilerplate(JSFunction::cast(result)); - ASSERT(HasOrigin(boilerplate, name, line_offset, column_offset)); + Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(result)); + ASSERT(HasOrigin(shared, name, line_offset, column_offset)); // If the script was found in a later generation, we promote it to // the first generation to let it survive longer in the cache. - if (generation != 0) Put(source, boilerplate); + if (generation != 0) Put(source, shared); Counters::compilation_cache_hits.Increment(); - return boilerplate; + return shared; } else { Counters::compilation_cache_misses.Increment(); - return Handle<JSFunction>::null(); + return Handle<SharedFunctionInfo>::null(); } } Handle<CompilationCacheTable> CompilationCacheScript::TablePut( Handle<String> source, - Handle<JSFunction> boilerplate) { - CALL_HEAP_FUNCTION(GetFirstTable()->Put(*source, *boilerplate), + Handle<SharedFunctionInfo> function_info) { + CALL_HEAP_FUNCTION(GetFirstTable()->Put(*source, *function_info), CompilationCacheTable); } void CompilationCacheScript::Put(Handle<String> source, - Handle<JSFunction> boilerplate) { + Handle<SharedFunctionInfo> function_info) { HandleScope scope; - ASSERT(boilerplate->IsBoilerplate()); - SetFirstTable(TablePut(source, boilerplate)); + SetFirstTable(TablePut(source, function_info)); } -Handle<JSFunction> CompilationCacheEval::Lookup(Handle<String> source, - Handle<Context> context) { +Handle<SharedFunctionInfo> CompilationCacheEval::Lookup( + Handle<String> source, Handle<Context> context) { // Make sure not to leak the table into the surrounding handle // scope. Otherwise, we risk keeping old tables around even after // having cleared the cache. @@ -331,21 +335,22 @@ Handle<JSFunction> CompilationCacheEval::Lookup(Handle<String> source, for (generation = 0; generation < generations(); generation++) { Handle<CompilationCacheTable> table = GetTable(generation); result = table->LookupEval(*source, *context); - if (result->IsJSFunction()) { + if (result->IsSharedFunctionInfo()) { break; } } } - if (result->IsJSFunction()) { - Handle<JSFunction> boilerplate(JSFunction::cast(result)); + if (result->IsSharedFunctionInfo()) { + Handle<SharedFunctionInfo> + function_info(SharedFunctionInfo::cast(result)); if (generation != 0) { - Put(source, context, boilerplate); + Put(source, context, function_info); } Counters::compilation_cache_hits.Increment(); - return boilerplate; + return function_info; } else { Counters::compilation_cache_misses.Increment(); - return Handle<JSFunction>::null(); + return Handle<SharedFunctionInfo>::null(); } } @@ -353,18 +358,19 @@ Handle<JSFunction> CompilationCacheEval::Lookup(Handle<String> source, Handle<CompilationCacheTable> CompilationCacheEval::TablePut( Handle<String> source, Handle<Context> context, - Handle<JSFunction> boilerplate) { - CALL_HEAP_FUNCTION(GetFirstTable()->PutEval(*source, *context, *boilerplate), + Handle<SharedFunctionInfo> function_info) { + CALL_HEAP_FUNCTION(GetFirstTable()->PutEval(*source, + *context, + *function_info), CompilationCacheTable); } void CompilationCacheEval::Put(Handle<String> source, Handle<Context> context, - Handle<JSFunction> boilerplate) { + Handle<SharedFunctionInfo> function_info) { HandleScope scope; - ASSERT(boilerplate->IsBoilerplate()); - SetFirstTable(TablePut(source, context, boilerplate)); + SetFirstTable(TablePut(source, context, function_info)); } @@ -415,26 +421,26 @@ void CompilationCacheRegExp::Put(Handle<String> source, } -Handle<JSFunction> CompilationCache::LookupScript(Handle<String> source, - Handle<Object> name, - int line_offset, - int column_offset) { +Handle<SharedFunctionInfo> CompilationCache::LookupScript(Handle<String> source, + Handle<Object> name, + int line_offset, + int column_offset) { if (!IsEnabled()) { - return Handle<JSFunction>::null(); + return Handle<SharedFunctionInfo>::null(); } return script.Lookup(source, name, line_offset, column_offset); } -Handle<JSFunction> CompilationCache::LookupEval(Handle<String> source, - Handle<Context> context, - bool is_global) { +Handle<SharedFunctionInfo> CompilationCache::LookupEval(Handle<String> source, + Handle<Context> context, + bool is_global) { if (!IsEnabled()) { - return Handle<JSFunction>::null(); + return Handle<SharedFunctionInfo>::null(); } - Handle<JSFunction> result; + Handle<SharedFunctionInfo> result; if (is_global) { result = eval_global.Lookup(source, context); } else { @@ -455,30 +461,28 @@ Handle<FixedArray> CompilationCache::LookupRegExp(Handle<String> source, void CompilationCache::PutScript(Handle<String> source, - Handle<JSFunction> boilerplate) { + Handle<SharedFunctionInfo> function_info) { if (!IsEnabled()) { return; } - ASSERT(boilerplate->IsBoilerplate()); - script.Put(source, boilerplate); + script.Put(source, function_info); } void CompilationCache::PutEval(Handle<String> source, Handle<Context> context, bool is_global, - Handle<JSFunction> boilerplate) { + Handle<SharedFunctionInfo> function_info) { if (!IsEnabled()) { return; } HandleScope scope; - ASSERT(boilerplate->IsBoilerplate()); if (is_global) { - eval_global.Put(source, context, boilerplate); + eval_global.Put(source, context, function_info); } else { - eval_contextual.Put(source, context, boilerplate); + eval_contextual.Put(source, context, function_info); } } diff --git a/deps/v8/src/compilation-cache.h b/deps/v8/src/compilation-cache.h index 3487c08a15..d23182258e 100644 --- a/deps/v8/src/compilation-cache.h +++ b/deps/v8/src/compilation-cache.h @@ -40,17 +40,17 @@ class CompilationCache { // Finds the script function boilerplate for a source // string. Returns an empty handle if the cache doesn't contain a // script for the given source string with the right origin. - static Handle<JSFunction> LookupScript(Handle<String> source, - Handle<Object> name, - int line_offset, - int column_offset); + static Handle<SharedFunctionInfo> LookupScript(Handle<String> source, + Handle<Object> name, + int line_offset, + int column_offset); // Finds the function boilerplate for a source string for eval in a // given context. Returns an empty handle if the cache doesn't // contain a script for the given source string. - static Handle<JSFunction> LookupEval(Handle<String> source, - Handle<Context> context, - bool is_global); + static Handle<SharedFunctionInfo> LookupEval(Handle<String> source, + Handle<Context> context, + bool is_global); // Returns the regexp data associated with the given regexp if it // is in cache, otherwise an empty handle. @@ -60,14 +60,14 @@ class CompilationCache { // Associate the (source, kind) pair to the boilerplate. This may // overwrite an existing mapping. static void PutScript(Handle<String> source, - Handle<JSFunction> boilerplate); + Handle<SharedFunctionInfo> function_info); // Associate the (source, context->closure()->shared(), kind) triple // with the boilerplate. This may overwrite an existing mapping. static void PutEval(Handle<String> source, Handle<Context> context, bool is_global, - Handle<JSFunction> boilerplate); + Handle<SharedFunctionInfo> function_info); // Associate the (source, flags) pair to the given regexp data. // This may overwrite an existing mapping. diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc index 11098bae98..e2021fa2ae 100755 --- a/deps/v8/src/compiler.cc +++ b/deps/v8/src/compiler.cc @@ -1,4 +1,4 @@ -// Copyright 2009 the V8 project authors. All rights reserved. +// Copyright 2010 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -89,23 +89,33 @@ static Handle<Code> MakeCode(Handle<Context> context, CompilationInfo* info) { } if (FLAG_use_flow_graph) { - FlowGraphBuilder builder; + int variable_count = + function->num_parameters() + function->scope()->num_stack_slots(); + FlowGraphBuilder builder(variable_count); builder.Build(function); if (!builder.HasStackOverflow()) { - int variable_count = - function->num_parameters() + function->scope()->num_stack_slots(); - if (variable_count > 0 && builder.definitions()->length() > 0) { + if (variable_count > 0) { ReachingDefinitions rd(builder.postorder(), - builder.definitions(), + builder.body_definitions(), variable_count); rd.Compute(); + + TypeAnalyzer ta(builder.postorder(), + builder.body_definitions(), + variable_count, + function->num_parameters()); + ta.Compute(); + + MarkLiveCode(builder.preorder(), + builder.body_definitions(), + variable_count); } } #ifdef DEBUG if (FLAG_print_graph_text && !builder.HasStackOverflow()) { - builder.graph()->PrintText(builder.postorder()); + builder.graph()->PrintText(function, builder.postorder()); } #endif } @@ -156,13 +166,13 @@ Handle<Code> MakeCodeForLiveEdit(CompilationInfo* info) { #endif -static Handle<JSFunction> MakeFunction(bool is_global, - bool is_eval, - Compiler::ValidationState validate, - Handle<Script> script, - Handle<Context> context, - v8::Extension* extension, - ScriptDataImpl* pre_data) { +static Handle<SharedFunctionInfo> MakeFunctionInfo(bool is_global, + bool is_eval, + Compiler::ValidationState validate, + Handle<Script> script, + Handle<Context> context, + v8::Extension* extension, + ScriptDataImpl* pre_data) { CompilationZoneScope zone_scope(DELETE_ON_EXIT); PostponeInterruptsScope postpone; @@ -204,7 +214,7 @@ static Handle<JSFunction> MakeFunction(bool is_global, // Check for parse errors. if (lit == NULL) { ASSERT(Top::has_pending_exception()); - return Handle<JSFunction>::null(); + return Handle<SharedFunctionInfo>::null(); } // Measure how long it takes to do the compilation; only take the @@ -222,7 +232,7 @@ static Handle<JSFunction> MakeFunction(bool is_global, // Check for stack-overflow exceptions. if (code.is_null()) { Top::StackOverflow(); - return Handle<JSFunction>::null(); + return Handle<SharedFunctionInfo>::null(); } #if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT @@ -248,38 +258,39 @@ static Handle<JSFunction> MakeFunction(bool is_global, #endif // Allocate function. - Handle<JSFunction> fun = - Factory::NewFunctionBoilerplate(lit->name(), - lit->materialized_literal_count(), - code); + Handle<SharedFunctionInfo> result = + Factory::NewSharedFunctionInfo(lit->name(), + lit->materialized_literal_count(), + code); ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position()); - Compiler::SetFunctionInfo(fun, lit, true, script); + Compiler::SetFunctionInfo(result, lit, true, script); // Hint to the runtime system used when allocating space for initial // property space by setting the expected number of properties for // the instances of the function. - SetExpectedNofPropertiesFromEstimate(fun, lit->expected_property_count()); + SetExpectedNofPropertiesFromEstimate(result, lit->expected_property_count()); #ifdef ENABLE_DEBUGGER_SUPPORT // Notify debugger Debugger::OnAfterCompile(script, Debugger::NO_AFTER_COMPILE_FLAGS); #endif - return fun; + return result; } static StaticResource<SafeStringInputBuffer> safe_string_input_buffer; -Handle<JSFunction> Compiler::Compile(Handle<String> source, - Handle<Object> script_name, - int line_offset, int column_offset, - v8::Extension* extension, - ScriptDataImpl* input_pre_data, - Handle<Object> script_data, - NativesFlag natives) { +Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source, + Handle<Object> script_name, + int line_offset, + int column_offset, + v8::Extension* extension, + ScriptDataImpl* input_pre_data, + Handle<Object> script_data, + NativesFlag natives) { int source_length = source->length(); Counters::total_load_size.Increment(source_length); Counters::total_compile_size.Increment(source_length); @@ -288,7 +299,7 @@ Handle<JSFunction> Compiler::Compile(Handle<String> source, VMState state(COMPILER); // Do a lookup in the compilation cache but not for extensions. - Handle<JSFunction> result; + Handle<SharedFunctionInfo> result; if (extension == NULL) { result = CompilationCache::LookupScript(source, script_name, @@ -320,13 +331,13 @@ Handle<JSFunction> Compiler::Compile(Handle<String> source, : *script_data); // Compile the function and add it to the cache. - result = MakeFunction(true, - false, - DONT_VALIDATE_JSON, - script, - Handle<Context>::null(), - extension, - pre_data); + result = MakeFunctionInfo(true, + false, + DONT_VALIDATE_JSON, + script, + Handle<Context>::null(), + extension, + pre_data); if (extension == NULL && !result.is_null()) { CompilationCache::PutScript(source, result); } @@ -342,10 +353,10 @@ Handle<JSFunction> Compiler::Compile(Handle<String> source, } -Handle<JSFunction> Compiler::CompileEval(Handle<String> source, - Handle<Context> context, - bool is_global, - ValidationState validate) { +Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source, + Handle<Context> context, + bool is_global, + ValidationState validate) { // Note that if validation is required then no path through this // function is allowed to return a value without validating that // the input is legal json. @@ -361,20 +372,20 @@ Handle<JSFunction> Compiler::CompileEval(Handle<String> source, // invoke the compiler and add the result to the cache. If we're // evaluating json we bypass the cache since we can't be sure a // potential value in the cache has been validated. - Handle<JSFunction> result; + Handle<SharedFunctionInfo> result; if (validate == DONT_VALIDATE_JSON) result = CompilationCache::LookupEval(source, context, is_global); if (result.is_null()) { // Create a script object describing the script to be compiled. Handle<Script> script = Factory::NewScript(source); - result = MakeFunction(is_global, - true, - validate, - script, - context, - NULL, - NULL); + result = MakeFunctionInfo(is_global, + true, + validate, + script, + context, + NULL, + NULL); if (!result.is_null() && validate != VALIDATE_JSON) { // For json it's unlikely that we'll ever see exactly the same // string again so we don't use the compilation cache. @@ -459,9 +470,9 @@ bool Compiler::CompileLazy(CompilationInfo* info) { } -Handle<JSFunction> Compiler::BuildBoilerplate(FunctionLiteral* literal, - Handle<Script> script, - AstVisitor* caller) { +Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal, + Handle<Script> script, + AstVisitor* caller) { #ifdef DEBUG // We should not try to compile the same function literal more than // once. @@ -484,7 +495,7 @@ Handle<JSFunction> Compiler::BuildBoilerplate(FunctionLiteral* literal, // The bodies of function literals have not yet been visited by // the AST optimizer/analyzer. if (!Rewriter::Optimize(literal)) { - return Handle<JSFunction>::null(); + return Handle<SharedFunctionInfo>::null(); } if (literal->scope()->num_parameters() > 0 || @@ -492,28 +503,38 @@ Handle<JSFunction> Compiler::BuildBoilerplate(FunctionLiteral* literal, AssignedVariablesAnalyzer ava(literal); ava.Analyze(); if (ava.HasStackOverflow()) { - return Handle<JSFunction>::null(); + return Handle<SharedFunctionInfo>::null(); } } if (FLAG_use_flow_graph) { - FlowGraphBuilder builder; - builder.Build(literal); - - if (!builder.HasStackOverflow()) { int variable_count = literal->num_parameters() + literal->scope()->num_stack_slots(); - if (variable_count > 0 && builder.definitions()->length() > 0) { - ReachingDefinitions rd(builder.postorder(), - builder.definitions(), - variable_count); - rd.Compute(); + FlowGraphBuilder builder(variable_count); + builder.Build(literal); + + if (!builder.HasStackOverflow()) { + if (variable_count > 0) { + ReachingDefinitions rd(builder.postorder(), + builder.body_definitions(), + variable_count); + rd.Compute(); + + TypeAnalyzer ta(builder.postorder(), + builder.body_definitions(), + variable_count, + literal->num_parameters()); + ta.Compute(); + + MarkLiveCode(builder.preorder(), + builder.body_definitions(), + variable_count); + } } - } #ifdef DEBUG if (FLAG_print_graph_text && !builder.HasStackOverflow()) { - builder.graph()->PrintText(builder.postorder()); + builder.graph()->PrintText(literal, builder.postorder()); } #endif } @@ -553,7 +574,7 @@ Handle<JSFunction> Compiler::BuildBoilerplate(FunctionLiteral* literal, // Check for stack-overflow exception. if (code.is_null()) { caller->SetStackOverflow(); - return Handle<JSFunction>::null(); + return Handle<SharedFunctionInfo>::null(); } // Function compilation complete. @@ -569,22 +590,17 @@ Handle<JSFunction> Compiler::BuildBoilerplate(FunctionLiteral* literal, } // Create a boilerplate function. - Handle<JSFunction> function = - Factory::NewFunctionBoilerplate(literal->name(), - literal->materialized_literal_count(), - code); - SetFunctionInfo(function, literal, false, script); - -#ifdef ENABLE_DEBUGGER_SUPPORT - // Notify debugger that a new function has been added. - Debugger::OnNewFunction(function); -#endif + Handle<SharedFunctionInfo> result = + Factory::NewSharedFunctionInfo(literal->name(), + literal->materialized_literal_count(), + code); + SetFunctionInfo(result, literal, false, script); // Set the expected number of properties for instances and return // the resulting function. - SetExpectedNofPropertiesFromEstimate(function, + SetExpectedNofPropertiesFromEstimate(result, literal->expected_property_count()); - return function; + return result; } @@ -592,23 +608,23 @@ Handle<JSFunction> Compiler::BuildBoilerplate(FunctionLiteral* literal, // The start_position points to the first '(' character after the function name // in the full script source. When counting characters in the script source the // the first character is number 0 (not 1). -void Compiler::SetFunctionInfo(Handle<JSFunction> fun, +void Compiler::SetFunctionInfo(Handle<SharedFunctionInfo> function_info, FunctionLiteral* lit, bool is_toplevel, Handle<Script> script) { - fun->shared()->set_length(lit->num_parameters()); - fun->shared()->set_formal_parameter_count(lit->num_parameters()); - fun->shared()->set_script(*script); - fun->shared()->set_function_token_position(lit->function_token_position()); - fun->shared()->set_start_position(lit->start_position()); - fun->shared()->set_end_position(lit->end_position()); - fun->shared()->set_is_expression(lit->is_expression()); - fun->shared()->set_is_toplevel(is_toplevel); - fun->shared()->set_inferred_name(*lit->inferred_name()); - fun->shared()->SetThisPropertyAssignmentsInfo( + function_info->set_length(lit->num_parameters()); + function_info->set_formal_parameter_count(lit->num_parameters()); + function_info->set_script(*script); + function_info->set_function_token_position(lit->function_token_position()); + function_info->set_start_position(lit->start_position()); + function_info->set_end_position(lit->end_position()); + function_info->set_is_expression(lit->is_expression()); + function_info->set_is_toplevel(is_toplevel); + function_info->set_inferred_name(*lit->inferred_name()); + function_info->SetThisPropertyAssignmentsInfo( lit->has_only_simple_this_property_assignments(), *lit->this_property_assignments()); - fun->shared()->set_try_full_codegen(lit->try_full_codegen()); + function_info->set_try_full_codegen(lit->try_full_codegen()); } diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h index 9492420507..e08e26ebd4 100644 --- a/deps/v8/src/compiler.h +++ b/deps/v8/src/compiler.h @@ -219,9 +219,9 @@ class CompilationInfo BASE_EMBEDDED { // functions, they will be compiled and allocated as part of the compilation // of the source code. -// Please note this interface returns function boilerplates. -// This means you need to call Factory::NewFunctionFromBoilerplate -// before you have a real function with context. +// Please note this interface returns shared function infos. +// This means you need to call Factory::NewFunctionFromSharedFunctionInfo +// before you have a real function with a context. class Compiler : public AllStatic { public: @@ -232,34 +232,35 @@ class Compiler : public AllStatic { // the return handle contains NULL. // Compile a String source within a context. - static Handle<JSFunction> Compile(Handle<String> source, - Handle<Object> script_name, - int line_offset, int column_offset, - v8::Extension* extension, - ScriptDataImpl* pre_data, - Handle<Object> script_data, - NativesFlag is_natives_code); + static Handle<SharedFunctionInfo> Compile(Handle<String> source, + Handle<Object> script_name, + int line_offset, + int column_offset, + v8::Extension* extension, + ScriptDataImpl* pre_data, + Handle<Object> script_data, + NativesFlag is_natives_code); // Compile a String source within a context for Eval. - static Handle<JSFunction> CompileEval(Handle<String> source, - Handle<Context> context, - bool is_global, - ValidationState validation); + static Handle<SharedFunctionInfo> CompileEval(Handle<String> source, + Handle<Context> context, + bool is_global, + ValidationState validation); // Compile from function info (used for lazy compilation). Returns // true on success and false if the compilation resulted in a stack // overflow. static bool CompileLazy(CompilationInfo* info); - // Compile a function boilerplate object (the function is possibly + // Compile a shared function info object (the function is possibly // lazily compiled). Called recursively from a backend code - // generator 'caller' to build the boilerplate. - static Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node, - Handle<Script> script, - AstVisitor* caller); + // generator 'caller' to build the shared function info. + static Handle<SharedFunctionInfo> BuildFunctionInfo(FunctionLiteral* node, + Handle<Script> script, + AstVisitor* caller); // Set the function info for a newly compiled function. - static void SetFunctionInfo(Handle<JSFunction> fun, + static void SetFunctionInfo(Handle<SharedFunctionInfo> function_info, FunctionLiteral* lit, bool is_toplevel, Handle<Script> script); diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h index 499774172a..44c90b6428 100644 --- a/deps/v8/src/contexts.h +++ b/deps/v8/src/contexts.h @@ -86,7 +86,6 @@ enum ContextLookupFlags { V(CALL_AS_FUNCTION_DELEGATE_INDEX, JSFunction, call_as_function_delegate) \ V(CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, JSFunction, \ call_as_constructor_delegate) \ - V(EMPTY_SCRIPT_INDEX, Script, empty_script) \ V(SCRIPT_FUNCTION_INDEX, JSFunction, script_function) \ V(OPAQUE_REFERENCE_FUNCTION_INDEX, JSFunction, opaque_reference_function) \ V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \ @@ -207,7 +206,6 @@ class Context: public FixedArray { RUNTIME_CONTEXT_INDEX, CALL_AS_FUNCTION_DELEGATE_INDEX, CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, - EMPTY_SCRIPT_INDEX, SCRIPT_FUNCTION_INDEX, OPAQUE_REFERENCE_FUNCTION_INDEX, CONTEXT_EXTENSION_FUNCTION_INDEX, diff --git a/deps/v8/src/conversions-inl.h b/deps/v8/src/conversions-inl.h index 3037085141..bf02947585 100644 --- a/deps/v8/src/conversions-inl.h +++ b/deps/v8/src/conversions-inl.h @@ -41,24 +41,6 @@ namespace v8 { namespace internal { -// The fast double-to-int conversion routine does not guarantee -// rounding towards zero. -static inline int FastD2I(double x) { -#ifdef __USE_ISOC99 - // The ISO C99 standard defines the lrint() function which rounds a - // double to an integer according to the current rounding direction. - return lrint(x); -#else - // This is incredibly slow on Intel x86. The reason is that rounding - // towards zero is implied by the C standard. This means that the - // status register of the FPU has to be changed with the 'fldcw' - // instruction. This completely stalls the pipeline and takes many - // hundreds of clock cycles. - return static_cast<int>(x); -#endif -} - - // The fast double-to-unsigned-int conversion routine does not guarantee // rounding towards zero, or any reasonable value if the argument is larger // than what fits in an unsigned 32-bit integer. diff --git a/deps/v8/src/conversions.h b/deps/v8/src/conversions.h index bdc7e44a16..4aaf0c01ba 100644 --- a/deps/v8/src/conversions.h +++ b/deps/v8/src/conversions.h @@ -36,7 +36,12 @@ namespace internal { // rounding towards zero. // The result is unspecified if x is infinite or NaN, or if the rounded // integer value is outside the range of type int. -static inline int FastD2I(double x); +static inline int FastD2I(double x) { + // The static_cast convertion from double to int used to be slow, but + // as new benchmarks show, now it is much faster than lrint(). + return static_cast<int>(x); +} + static inline unsigned int FastD2UI(double x); diff --git a/deps/v8/src/cpu-profiler.cc b/deps/v8/src/cpu-profiler.cc index d36f511209..d16c17f4c0 100644 --- a/deps/v8/src/cpu-profiler.cc +++ b/deps/v8/src/cpu-profiler.cc @@ -176,7 +176,6 @@ bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) { void ProfilerEventsProcessor::Run() { - ticks_buffer_.SetUpConsumer(); unsigned dequeue_order = 0; running_ = true; @@ -194,7 +193,6 @@ void ProfilerEventsProcessor::Run() { ticks_buffer_.FlushResidualRecords(); // Perform processing until we have tick events, skip remaining code events. while (ProcessTicks(dequeue_order) && ProcessCodeEvent(&dequeue_order)) { } - ticks_buffer_.TearDownConsumer(); } diff --git a/deps/v8/src/cpu-profiler.h b/deps/v8/src/cpu-profiler.h index ccfac5c5c7..8a7d2fdd31 100644 --- a/deps/v8/src/cpu-profiler.h +++ b/deps/v8/src/cpu-profiler.h @@ -154,14 +154,11 @@ class ProfilerEventsProcessor : public Thread { void FunctionMoveEvent(Address from, Address to); void FunctionDeleteEvent(Address from); - // Tick sampler registration. Called by sampler thread or signal handler. - inline void SetUpSamplesProducer() { ticks_buffer_.SetUpProducer(); } // Tick sample events are filled directly in the buffer of the circular // queue (because the structure is of fixed width, but usually not all // stack frame entries are filled.) This method returns a pointer to the // next record of the buffer. INLINE(TickSample* TickSampleEvent()); - inline void TearDownSamplesProducer() { ticks_buffer_.TearDownProducer(); } private: union CodeEventsContainer { diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc index dedbd55b4f..73cce46b34 100644 --- a/deps/v8/src/d8.cc +++ b/deps/v8/src/d8.cc @@ -467,9 +467,12 @@ void Shell::Initialize() { // Mark the d8 shell script as native to avoid it showing up as normal source // in the debugger. - i::Handle<i::JSFunction> script_fun = Utils::OpenHandle(*script); - i::Handle<i::Script> script_object = - i::Handle<i::Script>(i::Script::cast(script_fun->shared()->script())); + i::Handle<i::Object> compiled_script = Utils::OpenHandle(*script); + i::Handle<i::Script> script_object = compiled_script->IsJSFunction() + ? i::Handle<i::Script>(i::Script::cast( + i::JSFunction::cast(*compiled_script)->shared()->script())) + : i::Handle<i::Script>(i::Script::cast( + i::SharedFunctionInfo::cast(*compiled_script)->script())); script_object->set_type(i::Smi::FromInt(i::Script::TYPE_NATIVE)); // Create the evaluation context diff --git a/deps/v8/src/data-flow.cc b/deps/v8/src/data-flow.cc index 141718dc86..fe4b3db00e 100644 --- a/deps/v8/src/data-flow.cc +++ b/deps/v8/src/data-flow.cc @@ -195,6 +195,81 @@ void FlowGraphBuilder::Build(FunctionLiteral* lit) { } +// This function peels off one iteration of a for-loop. The return value +// is either a block statement containing the peeled loop or NULL in case +// there is a stack overflow. +static Statement* PeelForLoop(ForStatement* stmt) { + // Mark this for-statement as processed. + stmt->set_peel_this_loop(false); + + // Create new block containing the init statement of the for-loop and + // an if-statement containing the peeled iteration and the original + // loop without the init-statement. + Block* block = new Block(NULL, 2, false); + if (stmt->init() != NULL) { + Statement* init = stmt->init(); + // The init statement gets the statement position of the for-loop + // to make debugging of peeled loops possible. + init->set_statement_pos(stmt->statement_pos()); + block->AddStatement(init); + } + + // Copy the condition. + CopyAstVisitor copy_visitor; + Expression* cond_copy = stmt->cond() != NULL + ? copy_visitor.DeepCopyExpr(stmt->cond()) + : new Literal(Factory::true_value()); + if (copy_visitor.HasStackOverflow()) return NULL; + + // Construct a block with the peeled body and the rest of the for-loop. + Statement* body_copy = copy_visitor.DeepCopyStmt(stmt->body()); + if (copy_visitor.HasStackOverflow()) return NULL; + + Statement* next_copy = stmt->next() != NULL + ? copy_visitor.DeepCopyStmt(stmt->next()) + : new EmptyStatement(); + if (copy_visitor.HasStackOverflow()) return NULL; + + Block* peeled_body = new Block(NULL, 3, false); + peeled_body->AddStatement(body_copy); + peeled_body->AddStatement(next_copy); + peeled_body->AddStatement(stmt); + + // Remove the duplicated init statement from the for-statement. + stmt->set_init(NULL); + + // Create new test at the top and add it to the newly created block. + IfStatement* test = new IfStatement(cond_copy, + peeled_body, + new EmptyStatement()); + block->AddStatement(test); + return block; +} + + +void FlowGraphBuilder::VisitStatements(ZoneList<Statement*>* stmts) { + for (int i = 0, len = stmts->length(); i < len; i++) { + stmts->at(i) = ProcessStatement(stmts->at(i)); + } +} + + +Statement* FlowGraphBuilder::ProcessStatement(Statement* stmt) { + if (FLAG_loop_peeling && + stmt->AsForStatement() != NULL && + stmt->AsForStatement()->peel_this_loop()) { + Statement* tmp_stmt = PeelForLoop(stmt->AsForStatement()); + if (tmp_stmt == NULL) { + SetStackOverflow(); + } else { + stmt = tmp_stmt; + } + } + Visit(stmt); + return stmt; +} + + void FlowGraphBuilder::VisitDeclaration(Declaration* decl) { UNREACHABLE(); } @@ -221,11 +296,11 @@ void FlowGraphBuilder::VisitIfStatement(IfStatement* stmt) { BranchNode* branch = new BranchNode(); FlowGraph original = graph_; graph_ = FlowGraph::Empty(); - Visit(stmt->then_statement()); + stmt->set_then_statement(ProcessStatement(stmt->then_statement())); FlowGraph left = graph_; graph_ = FlowGraph::Empty(); - Visit(stmt->else_statement()); + stmt->set_else_statement(ProcessStatement(stmt->else_statement())); if (HasStackOverflow()) return; JoinNode* join = new JoinNode(); @@ -275,7 +350,7 @@ void FlowGraphBuilder::VisitWhileStatement(WhileStatement* stmt) { void FlowGraphBuilder::VisitForStatement(ForStatement* stmt) { - if (stmt->init() != NULL) Visit(stmt->init()); + if (stmt->init() != NULL) stmt->set_init(ProcessStatement(stmt->init())); JoinNode* join = new JoinNode(); FlowGraph original = graph_; @@ -285,9 +360,9 @@ void FlowGraphBuilder::VisitForStatement(ForStatement* stmt) { BranchNode* branch = new BranchNode(); FlowGraph condition = graph_; graph_ = FlowGraph::Empty(); - Visit(stmt->body()); + stmt->set_body(ProcessStatement(stmt->body())); - if (stmt->next() != NULL) Visit(stmt->next()); + if (stmt->next() != NULL) stmt->set_next(ProcessStatement(stmt->next())); if (HasStackOverflow()) return; original.Loop(join, &condition, branch, &graph_); @@ -320,8 +395,8 @@ void FlowGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) { } -void FlowGraphBuilder::VisitFunctionBoilerplateLiteral( - FunctionBoilerplateLiteral* expr) { +void FlowGraphBuilder::VisitSharedFunctionInfoLiteral( + SharedFunctionInfoLiteral* expr) { SetStackOverflow(); } @@ -376,8 +451,10 @@ void FlowGraphBuilder::VisitAssignment(Assignment* expr) { if (expr->is_compound()) Visit(expr->target()); Visit(expr->value()); if (var->IsStackAllocated()) { - expr->set_num(definitions_.length()); - definitions_.Add(expr); + // The first definition in the body is numbered n, where n is the + // number of parameters and stack-allocated locals. + expr->set_num(body_definitions_.length() + variable_count_); + body_definitions_.Add(expr); } } else if (prop != NULL) { @@ -454,8 +531,10 @@ void FlowGraphBuilder::VisitCountOperation(CountOperation* expr) { Visit(expr->expression()); Variable* var = expr->expression()->AsVariableProxy()->AsVariable(); if (var != NULL && var->IsStackAllocated()) { - expr->set_num(definitions_.length()); - definitions_.Add(expr); + // The first definition in the body is numbered n, where n is the number + // of parameters and stack-allocated locals. + expr->set_num(body_definitions_.length() + variable_count_); + body_definitions_.Add(expr); } if (HasStackOverflow()) return; @@ -638,8 +717,8 @@ void AstLabeler::VisitFunctionLiteral(FunctionLiteral* expr) { } -void AstLabeler::VisitFunctionBoilerplateLiteral( - FunctionBoilerplateLiteral* expr) { +void AstLabeler::VisitSharedFunctionInfoLiteral( + SharedFunctionInfoLiteral* expr) { UNREACHABLE(); } @@ -1015,8 +1094,8 @@ void AssignedVariablesAnalyzer::VisitFunctionLiteral(FunctionLiteral* expr) { } -void AssignedVariablesAnalyzer::VisitFunctionBoilerplateLiteral( - FunctionBoilerplateLiteral* expr) { +void AssignedVariablesAnalyzer::VisitSharedFunctionInfoLiteral( + SharedFunctionInfoLiteral* expr) { // Nothing to do. ASSERT(av_.IsEmpty()); } @@ -1342,9 +1421,9 @@ void TextInstructionPrinter::VisitFunctionLiteral(FunctionLiteral* expr) { } -void TextInstructionPrinter::VisitFunctionBoilerplateLiteral( - FunctionBoilerplateLiteral* expr) { - PrintF("FunctionBoilerplateLiteral"); +void TextInstructionPrinter::VisitSharedFunctionInfoLiteral( + SharedFunctionInfoLiteral* expr) { + PrintF("SharedFunctionInfoLiteral"); } @@ -1584,9 +1663,16 @@ void BlockNode::PrintText() { PrintF("L%d: Block\n", number()); TextInstructionPrinter printer; for (int i = 0, len = instructions_.length(); i < len; i++) { + AstNode* instr = instructions_[i]; + // Print a star next to dead instructions. + if (instr->AsExpression() != NULL && instr->AsExpression()->is_live()) { + PrintF(" "); + } else { + PrintF("* "); + } PrintF("%d ", printer.NextNumber()); - printer.Visit(instructions_[i]); - printer.AssignNumber(instructions_[i]); + printer.Visit(instr); + printer.AssignNumber(instr); PrintF("\n"); } PrintF("goto L%d\n\n", successor_->number()); @@ -1611,8 +1697,9 @@ void JoinNode::PrintText() { } -void FlowGraph::PrintText(ZoneList<Node*>* postorder) { +void FlowGraph::PrintText(FunctionLiteral* fun, ZoneList<Node*>* postorder) { PrintF("\n========\n"); + PrintF("name = %s\n", *fun->name()->ToCString()); // Number nodes and instructions in reverse postorder. node_count = 0; @@ -1664,11 +1751,16 @@ void BlockNode::InitializeReachingDefinitions(int definition_count, int variable_count = variables->length(); rd_.Initialize(definition_count); + // The RD_in set for the entry node has a definition for each parameter + // and local. + if (predecessor_ == NULL) { + for (int i = 0; i < variable_count; i++) rd_.rd_in()->Add(i); + } for (int i = 0; i < instruction_count; i++) { Expression* expr = instructions_[i]->AsExpression(); if (expr == NULL) continue; - Variable* var = expr->AssignedVar(); + Variable* var = expr->AssignedVariable(); if (var == NULL || !var->IsStackAllocated()) continue; // All definitions of this variable are killed. @@ -1845,7 +1937,7 @@ void BlockNode::PropagateReachingDefinitions(List<BitVector*>* variables) { // It may instead (or also) be a definition. If so update the running // value of reaching definitions for the block. - Variable* var = expr->AssignedVar(); + Variable* var = expr->AssignedVariable(); if (var == NULL || !var->IsStackAllocated()) continue; // All definitions of this variable are killed. @@ -1859,40 +1951,25 @@ void BlockNode::PropagateReachingDefinitions(List<BitVector*>* variables) { void ReachingDefinitions::Compute() { - ASSERT(!definitions_->is_empty()); - - int variable_count = variables_.length(); - int definition_count = definitions_->length(); + // The definitions in the body plus an implicit definition for each + // variable at function entry. + int definition_count = body_definitions_->length() + variable_count_; int node_count = postorder_->length(); - // Step 1: For each variable, identify the set of all its definitions in - // the body. - for (int i = 0; i < definition_count; i++) { - Variable* var = definitions_->at(i)->AssignedVar(); - variables_[IndexFor(var, variable_count)]->Add(i); - } - - if (FLAG_print_graph_text) { - for (int i = 0; i < variable_count; i++) { - BitVector* def_set = variables_[i]; - if (!def_set->IsEmpty()) { - // At least one definition. - bool first = true; - for (int j = 0; j < definition_count; j++) { - if (def_set->Contains(j)) { - if (first) { - Variable* var = definitions_->at(j)->AssignedVar(); - ASSERT(var != NULL); - PrintF("Def[%s] = {%d", *var->name()->ToCString(), j); - first = false; - } else { - PrintF(",%d", j); - } - } - } - PrintF("}\n"); - } - } + // Step 1: For each stack-allocated variable, identify the set of all its + // definitions. + List<BitVector*> variables; + for (int i = 0; i < variable_count_; i++) { + // Add the initial definition for each variable. + BitVector* initial = new BitVector(definition_count); + initial->Add(i); + variables.Add(initial); + } + for (int i = 0, len = body_definitions_->length(); i < len; i++) { + // Account for each definition in the body as a definition of the + // defined variable. + Variable* var = body_definitions_->at(i)->AssignedVariable(); + variables[IndexFor(var, variable_count_)]->Add(i + variable_count_); } // Step 2: Compute KILL and GEN for each block node, initialize RD_in for @@ -1902,7 +1979,7 @@ void ReachingDefinitions::Compute() { WorkList<Node> worklist(node_count); for (int i = node_count - 1; i >= 0; i--) { postorder_->at(i)->InitializeReachingDefinitions(definition_count, - &variables_, + &variables, &worklist, mark); } @@ -1919,7 +1996,105 @@ void ReachingDefinitions::Compute() { // Step 4: Based on RD_in for block nodes, propagate reaching definitions // to all variable uses in the block. for (int i = 0; i < node_count; i++) { - postorder_->at(i)->PropagateReachingDefinitions(&variables_); + postorder_->at(i)->PropagateReachingDefinitions(&variables); + } +} + + +bool TypeAnalyzer::IsPrimitiveDef(int def_num) { + if (def_num < param_count_) return false; + if (def_num < variable_count_) return true; + return body_definitions_->at(def_num - variable_count_)->IsPrimitive(); +} + + +void TypeAnalyzer::Compute() { + bool changed; + int count = 0; + + do { + changed = false; + + if (FLAG_print_graph_text) { + PrintF("TypeAnalyzer::Compute - iteration %d\n", count++); + } + + for (int i = postorder_->length() - 1; i >= 0; --i) { + Node* node = postorder_->at(i); + if (node->IsBlockNode()) { + BlockNode* block = BlockNode::cast(node); + for (int j = 0; j < block->instructions()->length(); j++) { + Expression* expr = block->instructions()->at(j)->AsExpression(); + if (expr != NULL) { + // For variable uses: Compute new type from reaching definitions. + VariableProxy* proxy = expr->AsVariableProxy(); + if (proxy != NULL && proxy->reaching_definitions() != NULL) { + BitVector* rd = proxy->reaching_definitions(); + bool prim_type = true; + // TODO(fsc): A sparse set representation of reaching + // definitions would speed up iterating here. + for (int k = 0; k < rd->length(); k++) { + if (rd->Contains(k) && !IsPrimitiveDef(k)) { + prim_type = false; + break; + } + } + // Reset changed flag if new type information was computed. + if (prim_type != proxy->IsPrimitive()) { + changed = true; + proxy->SetIsPrimitive(prim_type); + } + } + } + } + } + } + } while (changed); +} + + +void Node::MarkCriticalInstructions( + List<AstNode*>* stack, + ZoneList<Expression*>* body_definitions, + int variable_count) { +} + + +void BlockNode::MarkCriticalInstructions( + List<AstNode*>* stack, + ZoneList<Expression*>* body_definitions, + int variable_count) { + for (int i = instructions_.length() - 1; i >= 0; i--) { + // Only expressions can appear in the flow graph for now. + Expression* expr = instructions_[i]->AsExpression(); + if (expr != NULL && !expr->is_live() && + (expr->is_loop_condition() || expr->IsCritical())) { + expr->mark_as_live(); + expr->ProcessNonLiveChildren(stack, body_definitions, variable_count); + } + } +} + + +void MarkLiveCode(ZoneList<Node*>* nodes, + ZoneList<Expression*>* body_definitions, + int variable_count) { + List<AstNode*> stack(20); + + // Mark the critical AST nodes as live; mark their dependencies and + // add them to the marking stack. + for (int i = nodes->length() - 1; i >= 0; i--) { + nodes->at(i)->MarkCriticalInstructions(&stack, body_definitions, + variable_count); + } + + // Continue marking dependencies until no more. + while (!stack.is_empty()) { + // Only expressions can appear in the flow graph for now. + Expression* expr = stack.RemoveLast()->AsExpression(); + if (expr != NULL) { + expr->ProcessNonLiveChildren(&stack, body_definitions, variable_count); + } } } diff --git a/deps/v8/src/data-flow.h b/deps/v8/src/data-flow.h index 74a370c0da..8046e4228c 100644 --- a/deps/v8/src/data-flow.h +++ b/deps/v8/src/data-flow.h @@ -241,6 +241,12 @@ class Node: public ZoneObject { virtual void UpdateRDIn(WorkList<Node>* worklist, bool mark) = 0; virtual void PropagateReachingDefinitions(List<BitVector*>* variables); + // Functions used by dead-code elimination. + virtual void MarkCriticalInstructions( + List<AstNode*>* stack, + ZoneList<Expression*>* body_definitions, + int variable_count); + #ifdef DEBUG void AssignNodeNumber(); void PrintReachingDefinitions(); @@ -263,24 +269,24 @@ class ExitNode: public Node { public: ExitNode() : predecessors_(4) {} - bool IsExitNode() { return true; } + virtual bool IsExitNode() { return true; } - void AddPredecessor(Node* predecessor) { + virtual void AddPredecessor(Node* predecessor) { ASSERT(predecessor != NULL); predecessors_.Add(predecessor); } - void AddSuccessor(Node* successor) { UNREACHABLE(); } + virtual void AddSuccessor(Node* successor) { UNREACHABLE(); } - void Traverse(bool mark, - ZoneList<Node*>* preorder, - ZoneList<Node*>* postorder); + virtual void Traverse(bool mark, + ZoneList<Node*>* preorder, + ZoneList<Node*>* postorder); - void ComputeRDOut(BitVector* result); - void UpdateRDIn(WorkList<Node>* worklist, bool mark); + virtual void ComputeRDOut(BitVector* result); + virtual void UpdateRDIn(WorkList<Node>* worklist, bool mark); #ifdef DEBUG - void PrintText(); + virtual void PrintText(); #endif private: @@ -301,16 +307,18 @@ class BlockNode: public Node { return reinterpret_cast<BlockNode*>(node); } - bool IsBlockNode() { return true; } + virtual bool IsBlockNode() { return true; } bool is_empty() { return instructions_.is_empty(); } - void AddPredecessor(Node* predecessor) { + ZoneList<AstNode*>* instructions() { return &instructions_; } + + virtual void AddPredecessor(Node* predecessor) { ASSERT(predecessor_ == NULL && predecessor != NULL); predecessor_ = predecessor; } - void AddSuccessor(Node* successor) { + virtual void AddSuccessor(Node* successor) { ASSERT(successor_ == NULL && successor != NULL); successor_ = successor; } @@ -319,20 +327,25 @@ class BlockNode: public Node { instructions_.Add(instruction); } - void Traverse(bool mark, - ZoneList<Node*>* preorder, - ZoneList<Node*>* postorder); + virtual void Traverse(bool mark, + ZoneList<Node*>* preorder, + ZoneList<Node*>* postorder); - void InitializeReachingDefinitions(int definition_count, - List<BitVector*>* variables, - WorkList<Node>* worklist, - bool mark); - void ComputeRDOut(BitVector* result); - void UpdateRDIn(WorkList<Node>* worklist, bool mark); - void PropagateReachingDefinitions(List<BitVector*>* variables); + virtual void InitializeReachingDefinitions(int definition_count, + List<BitVector*>* variables, + WorkList<Node>* worklist, + bool mark); + virtual void ComputeRDOut(BitVector* result); + virtual void UpdateRDIn(WorkList<Node>* worklist, bool mark); + virtual void PropagateReachingDefinitions(List<BitVector*>* variables); + + virtual void MarkCriticalInstructions( + List<AstNode*>* stack, + ZoneList<Expression*>* body_definitions, + int variable_count); #ifdef DEBUG - void PrintText(); + virtual void PrintText(); #endif private: @@ -349,14 +362,14 @@ class BranchNode: public Node { public: BranchNode() : predecessor_(NULL), successor0_(NULL), successor1_(NULL) {} - bool IsBranchNode() { return true; } + virtual bool IsBranchNode() { return true; } - void AddPredecessor(Node* predecessor) { + virtual void AddPredecessor(Node* predecessor) { ASSERT(predecessor_ == NULL && predecessor != NULL); predecessor_ = predecessor; } - void AddSuccessor(Node* successor) { + virtual void AddSuccessor(Node* successor) { ASSERT(successor1_ == NULL && successor != NULL); if (successor0_ == NULL) { successor0_ = successor; @@ -365,15 +378,15 @@ class BranchNode: public Node { } } - void Traverse(bool mark, - ZoneList<Node*>* preorder, - ZoneList<Node*>* postorder); + virtual void Traverse(bool mark, + ZoneList<Node*>* preorder, + ZoneList<Node*>* postorder); - void ComputeRDOut(BitVector* result); - void UpdateRDIn(WorkList<Node>* worklist, bool mark); + virtual void ComputeRDOut(BitVector* result); + virtual void UpdateRDIn(WorkList<Node>* worklist, bool mark); #ifdef DEBUG - void PrintText(); + virtual void PrintText(); #endif private: @@ -395,27 +408,27 @@ class JoinNode: public Node { return reinterpret_cast<JoinNode*>(node); } - bool IsJoinNode() { return true; } + virtual bool IsJoinNode() { return true; } - void AddPredecessor(Node* predecessor) { + virtual void AddPredecessor(Node* predecessor) { ASSERT(predecessor != NULL); predecessors_.Add(predecessor); } - void AddSuccessor(Node* successor) { + virtual void AddSuccessor(Node* successor) { ASSERT(successor_ == NULL && successor != NULL); successor_ = successor; } - void Traverse(bool mark, - ZoneList<Node*>* preorder, - ZoneList<Node*>* postorder); + virtual void Traverse(bool mark, + ZoneList<Node*>* preorder, + ZoneList<Node*>* postorder); - void ComputeRDOut(BitVector* result); - void UpdateRDIn(WorkList<Node>* worklist, bool mark); + virtual void ComputeRDOut(BitVector* result); + virtual void UpdateRDIn(WorkList<Node>* worklist, bool mark); #ifdef DEBUG - void PrintText(); + virtual void PrintText(); #endif private: @@ -470,7 +483,7 @@ class FlowGraph BASE_EMBEDDED { FlowGraph* body); #ifdef DEBUG - void PrintText(ZoneList<Node*>* postorder); + void PrintText(FunctionLiteral* fun, ZoneList<Node*>* postorder); #endif private: @@ -485,23 +498,29 @@ class FlowGraph BASE_EMBEDDED { // traversal orders as a byproduct. class FlowGraphBuilder: public AstVisitor { public: - FlowGraphBuilder() + explicit FlowGraphBuilder(int variable_count) : graph_(FlowGraph::Empty()), global_exit_(NULL), preorder_(4), postorder_(4), - definitions_(4) { + variable_count_(variable_count), + body_definitions_(4) { } void Build(FunctionLiteral* lit); FlowGraph* graph() { return &graph_; } + ZoneList<Node*>* preorder() { return &preorder_; } ZoneList<Node*>* postorder() { return &postorder_; } - ZoneList<Expression*>* definitions() { return &definitions_; } + ZoneList<Expression*>* body_definitions() { return &body_definitions_; } private: ExitNode* global_exit() { return global_exit_; } + // Helpers to allow tranforming the ast during flow graph construction. + void VisitStatements(ZoneList<Statement*>* stmts); + Statement* ProcessStatement(Statement* stmt); + // AST node visit functions. #define DECLARE_VISIT(type) virtual void Visit##type(type* node); AST_NODE_LIST(DECLARE_VISIT) @@ -512,11 +531,13 @@ class FlowGraphBuilder: public AstVisitor { ZoneList<Node*> preorder_; ZoneList<Node*> postorder_; - // The flow graph builder collects a list of definitions (assignments and - // count operations) to stack-allocated variables to use for reaching - // definitions analysis. AST node numbers in the AST are used to refer - // into this list. - ZoneList<Expression*> definitions_; + // The flow graph builder collects a list of explicit definitions + // (assignments and count operations) to stack-allocated variables to use + // for reaching definitions analysis. It does not count the implicit + // definition at function entry. AST node numbers in the AST are used to + // refer into this list. + int variable_count_; + ZoneList<Expression*> body_definitions_; DISALLOW_COPY_AND_ASSIGN(FlowGraphBuilder); }; @@ -589,15 +610,11 @@ class AssignedVariablesAnalyzer : public AstVisitor { class ReachingDefinitions BASE_EMBEDDED { public: ReachingDefinitions(ZoneList<Node*>* postorder, - ZoneList<Expression*>* definitions, + ZoneList<Expression*>* body_definitions, int variable_count) : postorder_(postorder), - definitions_(definitions), - variables_(variable_count) { - int definition_count = definitions->length(); - for (int i = 0; i < variable_count; i++) { - variables_.Add(new BitVector(definition_count)); - } + body_definitions_(body_definitions), + variable_count_(variable_count) { } static int IndexFor(Variable* var, int variable_count); @@ -609,15 +626,46 @@ class ReachingDefinitions BASE_EMBEDDED { ZoneList<Node*>* postorder_; // A list of all the definitions in the body. - ZoneList<Expression*>* definitions_; + ZoneList<Expression*>* body_definitions_; - // For each variable, the set of all its definitions. - List<BitVector*> variables_; + int variable_count_; DISALLOW_COPY_AND_ASSIGN(ReachingDefinitions); }; +class TypeAnalyzer BASE_EMBEDDED { + public: + TypeAnalyzer(ZoneList<Node*>* postorder, + ZoneList<Expression*>* body_definitions, + int variable_count, + int param_count) + : postorder_(postorder), + body_definitions_(body_definitions), + variable_count_(variable_count), + param_count_(param_count) {} + + void Compute(); + + private: + // Get the primitity of definition number i. Definitions are numbered + // by the flow graph builder. + bool IsPrimitiveDef(int def_num); + + ZoneList<Node*>* postorder_; + ZoneList<Expression*>* body_definitions_; + int variable_count_; + int param_count_; + + DISALLOW_COPY_AND_ASSIGN(TypeAnalyzer); +}; + + +void MarkLiveCode(ZoneList<Node*>* nodes, + ZoneList<Expression*>* body_definitions, + int variable_count); + + } } // namespace v8::internal diff --git a/deps/v8/src/date.js b/deps/v8/src/date.js new file mode 100644 index 0000000000..c7c39406fa --- /dev/null +++ b/deps/v8/src/date.js @@ -0,0 +1,1090 @@ +// Copyright 2006-2008 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +// This file relies on the fact that the following declarations have been made +// in v8natives.js: +// const $isFinite = GlobalIsFinite; + +// ------------------------------------------------------------------- + +// This file contains date support implemented in JavaScript. + + +// Keep reference to original values of some global properties. This +// has the added benefit that the code in this file is isolated from +// changes to these properties. +const $Date = global.Date; + +// Helper function to throw error. +function ThrowDateTypeError() { + throw new $TypeError('this is not a Date object.'); +} + +// ECMA 262 - 5.2 +function Modulo(value, remainder) { + var mod = value % remainder; + // Guard against returning -0. + if (mod == 0) return 0; + return mod >= 0 ? mod : mod + remainder; +} + + +function TimeWithinDay(time) { + return Modulo(time, msPerDay); +} + + +// ECMA 262 - 15.9.1.3 +function DaysInYear(year) { + if (year % 4 != 0) return 365; + if ((year % 100 == 0) && (year % 400 != 0)) return 365; + return 366; +} + + +function DayFromYear(year) { + return 365 * (year-1970) + + FLOOR((year-1969)/4) + - FLOOR((year-1901)/100) + + FLOOR((year-1601)/400); +} + + +function TimeFromYear(year) { + return msPerDay * DayFromYear(year); +} + + +function InLeapYear(time) { + return DaysInYear(YEAR_FROM_TIME(time)) == 366 ? 1 : 0; +} + + +function DayWithinYear(time) { + return DAY(time) - DayFromYear(YEAR_FROM_TIME(time)); +} + + +// ECMA 262 - 15.9.1.9 +function EquivalentYear(year) { + // Returns an equivalent year in the range [2008-2035] matching + // - leap year. + // - week day of first day. + var time = TimeFromYear(year); + var recent_year = (InLeapYear(time) == 0 ? 1967 : 1956) + + (WeekDay(time) * 12) % 28; + // Find the year in the range 2008..2037 that is equivalent mod 28. + // Add 3*28 to give a positive argument to the modulus operator. + return 2008 + (recent_year + 3*28 - 2008) % 28; +} + + +function EquivalentTime(t) { + // The issue here is that some library calls don't work right for dates + // that cannot be represented using a non-negative signed 32 bit integer + // (measured in whole seconds based on the 1970 epoch). + // We solve this by mapping the time to a year with same leap-year-ness + // and same starting day for the year. The ECMAscript specification says + // we must do this, but for compatibility with other browsers, we use + // the actual year if it is in the range 1970..2037 + if (t >= 0 && t <= 2.1e12) return t; + + var day = MakeDay(EquivalentYear(YEAR_FROM_TIME(t)), + MONTH_FROM_TIME(t), + DATE_FROM_TIME(t)); + return MakeDate(day, TimeWithinDay(t)); +} + + +// Because computing the DST offset is a pretty expensive operation +// we keep a cache of last computed offset along with a time interval +// where we know the cache is valid. +var DST_offset_cache = { + // Cached DST offset. + offset: 0, + // Time interval where the cached offset is valid. + start: 0, end: -1, + // Size of next interval expansion. + increment: 0 +}; + + +// NOTE: The implementation relies on the fact that no time zones have +// more than one daylight savings offset change per month. +// If this function is called with NaN it returns NaN. +function DaylightSavingsOffset(t) { + // Load the cache object from the builtins object. + var cache = DST_offset_cache; + + // Cache the start and the end in local variables for fast access. + var start = cache.start; + var end = cache.end; + + if (start <= t) { + // If the time fits in the cached interval, return the cached offset. + if (t <= end) return cache.offset; + + // Compute a possible new interval end. + var new_end = end + cache.increment; + + if (t <= new_end) { + var end_offset = %DateDaylightSavingsOffset(EquivalentTime(new_end)); + if (cache.offset == end_offset) { + // If the offset at the end of the new interval still matches + // the offset in the cache, we grow the cached time interval + // and return the offset. + cache.end = new_end; + cache.increment = msPerMonth; + return end_offset; + } else { + var offset = %DateDaylightSavingsOffset(EquivalentTime(t)); + if (offset == end_offset) { + // The offset at the given time is equal to the offset at the + // new end of the interval, so that means that we've just skipped + // the point in time where the DST offset change occurred. Updated + // the interval to reflect this and reset the increment. + cache.start = t; + cache.end = new_end; + cache.increment = msPerMonth; + } else { + // The interval contains a DST offset change and the given time is + // before it. Adjust the increment to avoid a linear search for + // the offset change point and change the end of the interval. + cache.increment /= 3; + cache.end = t; + } + // Update the offset in the cache and return it. + cache.offset = offset; + return offset; + } + } + } + + // Compute the DST offset for the time and shrink the cache interval + // to only contain the time. This allows fast repeated DST offset + // computations for the same time. + var offset = %DateDaylightSavingsOffset(EquivalentTime(t)); + cache.offset = offset; + cache.start = cache.end = t; + cache.increment = msPerMonth; + return offset; +} + + +var timezone_cache_time = $NaN; +var timezone_cache_timezone; + +function LocalTimezone(t) { + if (NUMBER_IS_NAN(t)) return ""; + if (t == timezone_cache_time) { + return timezone_cache_timezone; + } + var timezone = %DateLocalTimezone(EquivalentTime(t)); + timezone_cache_time = t; + timezone_cache_timezone = timezone; + return timezone; +} + + +function WeekDay(time) { + return Modulo(DAY(time) + 4, 7); +} + +var local_time_offset = %DateLocalTimeOffset(); + +function LocalTime(time) { + if (NUMBER_IS_NAN(time)) return time; + return time + local_time_offset + DaylightSavingsOffset(time); +} + +function LocalTimeNoCheck(time) { + if (time < -MAX_TIME_MS || time > MAX_TIME_MS) { + return $NaN; + } + + // Inline the DST offset cache checks for speed. + var cache = DST_offset_cache; + if (cache.start <= time && time <= cache.end) { + var dst_offset = cache.offset; + } else { + var dst_offset = DaylightSavingsOffset(time); + } + return time + local_time_offset + dst_offset; +} + + +function UTC(time) { + if (NUMBER_IS_NAN(time)) return time; + var tmp = time - local_time_offset; + return tmp - DaylightSavingsOffset(tmp); +} + + +// ECMA 262 - 15.9.1.11 +function MakeTime(hour, min, sec, ms) { + if (!$isFinite(hour)) return $NaN; + if (!$isFinite(min)) return $NaN; + if (!$isFinite(sec)) return $NaN; + if (!$isFinite(ms)) return $NaN; + return TO_INTEGER(hour) * msPerHour + + TO_INTEGER(min) * msPerMinute + + TO_INTEGER(sec) * msPerSecond + + TO_INTEGER(ms); +} + + +// ECMA 262 - 15.9.1.12 +function TimeInYear(year) { + return DaysInYear(year) * msPerDay; +} + + +var ymd_from_time_cache = [$NaN, $NaN, $NaN]; +var ymd_from_time_cached_time = $NaN; + +function YearFromTime(t) { + if (t !== ymd_from_time_cached_time) { + if (!$isFinite(t)) { + return $NaN; + } + + %DateYMDFromTime(t, ymd_from_time_cache); + ymd_from_time_cached_time = t + } + + return ymd_from_time_cache[0]; +} + +function MonthFromTime(t) { + if (t !== ymd_from_time_cached_time) { + if (!$isFinite(t)) { + return $NaN; + } + %DateYMDFromTime(t, ymd_from_time_cache); + ymd_from_time_cached_time = t + } + + return ymd_from_time_cache[1]; +} + +function DateFromTime(t) { + if (t !== ymd_from_time_cached_time) { + if (!$isFinite(t)) { + return $NaN; + } + + %DateYMDFromTime(t, ymd_from_time_cache); + ymd_from_time_cached_time = t + } + + return ymd_from_time_cache[2]; +} + + +// Compute number of days given a year, month, date. +// Note that month and date can lie outside the normal range. +// For example: +// MakeDay(2007, -4, 20) --> MakeDay(2006, 8, 20) +// MakeDay(2007, -33, 1) --> MakeDay(2004, 3, 1) +// MakeDay(2007, 14, -50) --> MakeDay(2007, 8, 11) +function MakeDay(year, month, date) { + if (!$isFinite(year) || !$isFinite(month) || !$isFinite(date)) return $NaN; + + year = TO_INTEGER(year); + month = TO_INTEGER(month); + date = TO_INTEGER(date); + + if (year < kMinYear || year > kMaxYear || + month < kMinMonth || month > kMaxMonth || + date < kMinDate || date > kMaxDate) { + return $NaN; + } + + // Now we rely on year, month and date being SMIs. + return %DateMakeDay(year, month, date); +} + + +// ECMA 262 - 15.9.1.13 +function MakeDate(day, time) { + if (!$isFinite(day)) return $NaN; + if (!$isFinite(time)) return $NaN; + return day * msPerDay + time; +} + + +// ECMA 262 - 15.9.1.14 +function TimeClip(time) { + if (!$isFinite(time)) return $NaN; + if ($abs(time) > 8.64E15) return $NaN; + return TO_INTEGER(time); +} + + +// The Date cache is used to limit the cost of parsing the same Date +// strings over and over again. +var Date_cache = { + // Cached time value. + time: $NaN, + // Cached year when interpreting the time as a local time. Only + // valid when the time matches cached time. + year: $NaN, + // String input for which the cached time is valid. + string: null +}; + + +%SetCode($Date, function(year, month, date, hours, minutes, seconds, ms) { + if (!%_IsConstructCall()) { + // ECMA 262 - 15.9.2 + return (new $Date()).toString(); + } + + // ECMA 262 - 15.9.3 + var argc = %_ArgumentsLength(); + var value; + if (argc == 0) { + value = %DateCurrentTime(); + + } else if (argc == 1) { + if (IS_NUMBER(year)) { + value = TimeClip(year); + + } else if (IS_STRING(year)) { + // Probe the Date cache. If we already have a time value for the + // given time, we re-use that instead of parsing the string again. + var cache = Date_cache; + if (cache.string === year) { + value = cache.time; + } else { + value = DateParse(year); + if (!NUMBER_IS_NAN(value)) { + cache.time = value; + cache.year = YEAR_FROM_TIME(LocalTimeNoCheck(value)); + cache.string = year; + } + } + + } else { + // According to ECMA 262, no hint should be given for this + // conversion. However, ToPrimitive defaults to STRING_HINT for + // Date objects which will lose precision when the Date + // constructor is called with another Date object as its + // argument. We therefore use NUMBER_HINT for the conversion, + // which is the default for everything else than Date objects. + // This makes us behave like KJS and SpiderMonkey. + var time = ToPrimitive(year, NUMBER_HINT); + value = IS_STRING(time) ? DateParse(time) : TimeClip(ToNumber(time)); + } + + } else { + year = ToNumber(year); + month = ToNumber(month); + date = argc > 2 ? ToNumber(date) : 1; + hours = argc > 3 ? ToNumber(hours) : 0; + minutes = argc > 4 ? ToNumber(minutes) : 0; + seconds = argc > 5 ? ToNumber(seconds) : 0; + ms = argc > 6 ? ToNumber(ms) : 0; + year = (!NUMBER_IS_NAN(year) && 0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99) + ? 1900 + TO_INTEGER(year) : year; + var day = MakeDay(year, month, date); + var time = MakeTime(hours, minutes, seconds, ms); + value = TimeClip(UTC(MakeDate(day, time))); + } + %_SetValueOf(this, value); +}); + + +// Helper functions. +function GetTimeFrom(aDate) { + return DATE_VALUE(aDate); +} + +function GetMillisecondsFrom(aDate) { + var t = DATE_VALUE(aDate); + if (NUMBER_IS_NAN(t)) return t; + return MS_FROM_TIME(LocalTimeNoCheck(t)); +} + + +function GetUTCMillisecondsFrom(aDate) { + var t = DATE_VALUE(aDate); + if (NUMBER_IS_NAN(t)) return t; + return MS_FROM_TIME(t); +} + + +function GetSecondsFrom(aDate) { + var t = DATE_VALUE(aDate); + if (NUMBER_IS_NAN(t)) return t; + return SEC_FROM_TIME(LocalTimeNoCheck(t)); +} + + +function GetUTCSecondsFrom(aDate) { + var t = DATE_VALUE(aDate); + if (NUMBER_IS_NAN(t)) return t; + return SEC_FROM_TIME(t); +} + + +function GetMinutesFrom(aDate) { + var t = DATE_VALUE(aDate); + if (NUMBER_IS_NAN(t)) return t; + return MIN_FROM_TIME(LocalTimeNoCheck(t)); +} + + +function GetUTCMinutesFrom(aDate) { + var t = DATE_VALUE(aDate); + if (NUMBER_IS_NAN(t)) return t; + return MIN_FROM_TIME(t); +} + + +function GetHoursFrom(aDate) { + var t = DATE_VALUE(aDate); + if (NUMBER_IS_NAN(t)) return t; + return HOUR_FROM_TIME(LocalTimeNoCheck(t)); +} + + +function GetUTCHoursFrom(aDate) { + var t = DATE_VALUE(aDate); + if (NUMBER_IS_NAN(t)) return t; + return HOUR_FROM_TIME(t); +} + + +function GetFullYearFrom(aDate) { + var t = DATE_VALUE(aDate); + if (NUMBER_IS_NAN(t)) return t; + var cache = Date_cache; + if (cache.time === t) return cache.year; + return YEAR_FROM_TIME(LocalTimeNoCheck(t)); +} + + +function GetUTCFullYearFrom(aDate) { + var t = DATE_VALUE(aDate); + if (NUMBER_IS_NAN(t)) return t; + return YEAR_FROM_TIME(t); +} + + +function GetMonthFrom(aDate) { + var t = DATE_VALUE(aDate); + if (NUMBER_IS_NAN(t)) return t; + return MONTH_FROM_TIME(LocalTimeNoCheck(t)); +} + + +function GetUTCMonthFrom(aDate) { + var t = DATE_VALUE(aDate); + if (NUMBER_IS_NAN(t)) return t; + return MONTH_FROM_TIME(t); +} + + +function GetDateFrom(aDate) { + var t = DATE_VALUE(aDate); + if (NUMBER_IS_NAN(t)) return t; + return DATE_FROM_TIME(LocalTimeNoCheck(t)); +} + + +function GetUTCDateFrom(aDate) { + var t = DATE_VALUE(aDate); + if (NUMBER_IS_NAN(t)) return t; + return DATE_FROM_TIME(t); +} + + +%FunctionSetPrototype($Date, new $Date($NaN)); + + +var WeekDays = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat']; +var Months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']; + + +function TwoDigitString(value) { + return value < 10 ? "0" + value : "" + value; +} + + +function DateString(time) { + return WeekDays[WeekDay(time)] + ' ' + + Months[MonthFromTime(time)] + ' ' + + TwoDigitString(DateFromTime(time)) + ' ' + + YearFromTime(time); +} + + +var LongWeekDays = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday']; +var LongMonths = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']; + + +function LongDateString(time) { + return LongWeekDays[WeekDay(time)] + ', ' + + LongMonths[MonthFromTime(time)] + ' ' + + TwoDigitString(DateFromTime(time)) + ', ' + + YearFromTime(time); +} + + +function TimeString(time) { + return TwoDigitString(HOUR_FROM_TIME(time)) + ':' + + TwoDigitString(MIN_FROM_TIME(time)) + ':' + + TwoDigitString(SEC_FROM_TIME(time)); +} + + +function LocalTimezoneString(time) { + var timezoneOffset = + (local_time_offset + DaylightSavingsOffset(time)) / msPerMinute; + var sign = (timezoneOffset >= 0) ? 1 : -1; + var hours = FLOOR((sign * timezoneOffset)/60); + var min = FLOOR((sign * timezoneOffset)%60); + var gmt = ' GMT' + ((sign == 1) ? '+' : '-') + + TwoDigitString(hours) + TwoDigitString(min); + return gmt + ' (' + LocalTimezone(time) + ')'; +} + + +function DatePrintString(time) { + return DateString(time) + ' ' + TimeString(time); +} + +// ------------------------------------------------------------------- + +// Reused output buffer. Used when parsing date strings. +var parse_buffer = $Array(7); + +// ECMA 262 - 15.9.4.2 +function DateParse(string) { + var arr = %DateParseString(ToString(string), parse_buffer); + if (IS_NULL(arr)) return $NaN; + + var day = MakeDay(arr[0], arr[1], arr[2]); + var time = MakeTime(arr[3], arr[4], arr[5], 0); + var date = MakeDate(day, time); + + if (IS_NULL(arr[6])) { + return TimeClip(UTC(date)); + } else { + return TimeClip(date - arr[6] * 1000); + } +} + + +// ECMA 262 - 15.9.4.3 +function DateUTC(year, month, date, hours, minutes, seconds, ms) { + year = ToNumber(year); + month = ToNumber(month); + var argc = %_ArgumentsLength(); + date = argc > 2 ? ToNumber(date) : 1; + hours = argc > 3 ? ToNumber(hours) : 0; + minutes = argc > 4 ? ToNumber(minutes) : 0; + seconds = argc > 5 ? ToNumber(seconds) : 0; + ms = argc > 6 ? ToNumber(ms) : 0; + year = (!NUMBER_IS_NAN(year) && 0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99) + ? 1900 + TO_INTEGER(year) : year; + var day = MakeDay(year, month, date); + var time = MakeTime(hours, minutes, seconds, ms); + return %_SetValueOf(this, TimeClip(MakeDate(day, time))); +} + + +// Mozilla-specific extension. Returns the number of milliseconds +// elapsed since 1 January 1970 00:00:00 UTC. +function DateNow() { + return %DateCurrentTime(); +} + + +// ECMA 262 - 15.9.5.2 +function DateToString() { + var t = DATE_VALUE(this); + if (NUMBER_IS_NAN(t)) return kInvalidDate; + return DatePrintString(LocalTimeNoCheck(t)) + LocalTimezoneString(t); +} + + +// ECMA 262 - 15.9.5.3 +function DateToDateString() { + var t = DATE_VALUE(this); + if (NUMBER_IS_NAN(t)) return kInvalidDate; + return DateString(LocalTimeNoCheck(t)); +} + + +// ECMA 262 - 15.9.5.4 +function DateToTimeString() { + var t = DATE_VALUE(this); + if (NUMBER_IS_NAN(t)) return kInvalidDate; + var lt = LocalTimeNoCheck(t); + return TimeString(lt) + LocalTimezoneString(lt); +} + + +// ECMA 262 - 15.9.5.5 +function DateToLocaleString() { + return DateToString.call(this); +} + + +// ECMA 262 - 15.9.5.6 +function DateToLocaleDateString() { + var t = DATE_VALUE(this); + if (NUMBER_IS_NAN(t)) return kInvalidDate; + return LongDateString(LocalTimeNoCheck(t)); +} + + +// ECMA 262 - 15.9.5.7 +function DateToLocaleTimeString() { + var t = DATE_VALUE(this); + if (NUMBER_IS_NAN(t)) return kInvalidDate; + var lt = LocalTimeNoCheck(t); + return TimeString(lt); +} + + +// ECMA 262 - 15.9.5.8 +function DateValueOf() { + return DATE_VALUE(this); +} + + +// ECMA 262 - 15.9.5.9 +function DateGetTime() { + return DATE_VALUE(this); +} + + +// ECMA 262 - 15.9.5.10 +function DateGetFullYear() { + return GetFullYearFrom(this) +} + + +// ECMA 262 - 15.9.5.11 +function DateGetUTCFullYear() { + return GetUTCFullYearFrom(this) +} + + +// ECMA 262 - 15.9.5.12 +function DateGetMonth() { + return GetMonthFrom(this); +} + + +// ECMA 262 - 15.9.5.13 +function DateGetUTCMonth() { + return GetUTCMonthFrom(this); +} + + +// ECMA 262 - 15.9.5.14 +function DateGetDate() { + return GetDateFrom(this); +} + + +// ECMA 262 - 15.9.5.15 +function DateGetUTCDate() { + return GetUTCDateFrom(this); +} + + +// ECMA 262 - 15.9.5.16 +function DateGetDay() { + var t = %_ValueOf(this); + if (NUMBER_IS_NAN(t)) return t; + return WeekDay(LocalTimeNoCheck(t)); +} + + +// ECMA 262 - 15.9.5.17 +function DateGetUTCDay() { + var t = %_ValueOf(this); + if (NUMBER_IS_NAN(t)) return t; + return WeekDay(t); +} + + +// ECMA 262 - 15.9.5.18 +function DateGetHours() { + return GetHoursFrom(this); +} + + +// ECMA 262 - 15.9.5.19 +function DateGetUTCHours() { + return GetUTCHoursFrom(this); +} + + +// ECMA 262 - 15.9.5.20 +function DateGetMinutes() { + return GetMinutesFrom(this); +} + + +// ECMA 262 - 15.9.5.21 +function DateGetUTCMinutes() { + return GetUTCMinutesFrom(this); +} + + +// ECMA 262 - 15.9.5.22 +function DateGetSeconds() { + return GetSecondsFrom(this); +} + + +// ECMA 262 - 15.9.5.23 +function DateGetUTCSeconds() { + return GetUTCSecondsFrom(this); +} + + +// ECMA 262 - 15.9.5.24 +function DateGetMilliseconds() { + return GetMillisecondsFrom(this); +} + + +// ECMA 262 - 15.9.5.25 +function DateGetUTCMilliseconds() { + return GetUTCMillisecondsFrom(this); +} + + +// ECMA 262 - 15.9.5.26 +function DateGetTimezoneOffset() { + var t = DATE_VALUE(this); + if (NUMBER_IS_NAN(t)) return t; + return (t - LocalTimeNoCheck(t)) / msPerMinute; +} + + +// ECMA 262 - 15.9.5.27 +function DateSetTime(ms) { + if (!IS_DATE(this)) ThrowDateTypeError(); + return %_SetValueOf(this, TimeClip(ToNumber(ms))); +} + + +// ECMA 262 - 15.9.5.28 +function DateSetMilliseconds(ms) { + var t = LocalTime(DATE_VALUE(this)); + ms = ToNumber(ms); + var time = MakeTime(HOUR_FROM_TIME(t), MIN_FROM_TIME(t), SEC_FROM_TIME(t), ms); + return %_SetValueOf(this, TimeClip(UTC(MakeDate(DAY(t), time)))); +} + + +// ECMA 262 - 15.9.5.29 +function DateSetUTCMilliseconds(ms) { + var t = DATE_VALUE(this); + ms = ToNumber(ms); + var time = MakeTime(HOUR_FROM_TIME(t), MIN_FROM_TIME(t), SEC_FROM_TIME(t), ms); + return %_SetValueOf(this, TimeClip(MakeDate(DAY(t), time))); +} + + +// ECMA 262 - 15.9.5.30 +function DateSetSeconds(sec, ms) { + var t = LocalTime(DATE_VALUE(this)); + sec = ToNumber(sec); + ms = %_ArgumentsLength() < 2 ? GetMillisecondsFrom(this) : ToNumber(ms); + var time = MakeTime(HOUR_FROM_TIME(t), MIN_FROM_TIME(t), sec, ms); + return %_SetValueOf(this, TimeClip(UTC(MakeDate(DAY(t), time)))); +} + + +// ECMA 262 - 15.9.5.31 +function DateSetUTCSeconds(sec, ms) { + var t = DATE_VALUE(this); + sec = ToNumber(sec); + ms = %_ArgumentsLength() < 2 ? GetUTCMillisecondsFrom(this) : ToNumber(ms); + var time = MakeTime(HOUR_FROM_TIME(t), MIN_FROM_TIME(t), sec, ms); + return %_SetValueOf(this, TimeClip(MakeDate(DAY(t), time))); +} + + +// ECMA 262 - 15.9.5.33 +function DateSetMinutes(min, sec, ms) { + var t = LocalTime(DATE_VALUE(this)); + min = ToNumber(min); + var argc = %_ArgumentsLength(); + sec = argc < 2 ? GetSecondsFrom(this) : ToNumber(sec); + ms = argc < 3 ? GetMillisecondsFrom(this) : ToNumber(ms); + var time = MakeTime(HOUR_FROM_TIME(t), min, sec, ms); + return %_SetValueOf(this, TimeClip(UTC(MakeDate(DAY(t), time)))); +} + + +// ECMA 262 - 15.9.5.34 +function DateSetUTCMinutes(min, sec, ms) { + var t = DATE_VALUE(this); + min = ToNumber(min); + var argc = %_ArgumentsLength(); + sec = argc < 2 ? GetUTCSecondsFrom(this) : ToNumber(sec); + ms = argc < 3 ? GetUTCMillisecondsFrom(this) : ToNumber(ms); + var time = MakeTime(HOUR_FROM_TIME(t), min, sec, ms); + return %_SetValueOf(this, TimeClip(MakeDate(DAY(t), time))); +} + + +// ECMA 262 - 15.9.5.35 +function DateSetHours(hour, min, sec, ms) { + var t = LocalTime(DATE_VALUE(this)); + hour = ToNumber(hour); + var argc = %_ArgumentsLength(); + min = argc < 2 ? GetMinutesFrom(this) : ToNumber(min); + sec = argc < 3 ? GetSecondsFrom(this) : ToNumber(sec); + ms = argc < 4 ? GetMillisecondsFrom(this) : ToNumber(ms); + var time = MakeTime(hour, min, sec, ms); + return %_SetValueOf(this, TimeClip(UTC(MakeDate(DAY(t), time)))); +} + + +// ECMA 262 - 15.9.5.34 +function DateSetUTCHours(hour, min, sec, ms) { + var t = DATE_VALUE(this); + hour = ToNumber(hour); + var argc = %_ArgumentsLength(); + min = argc < 2 ? GetUTCMinutesFrom(this) : ToNumber(min); + sec = argc < 3 ? GetUTCSecondsFrom(this) : ToNumber(sec); + ms = argc < 4 ? GetUTCMillisecondsFrom(this) : ToNumber(ms); + var time = MakeTime(hour, min, sec, ms); + return %_SetValueOf(this, TimeClip(MakeDate(DAY(t), time))); +} + + +// ECMA 262 - 15.9.5.36 +function DateSetDate(date) { + var t = LocalTime(DATE_VALUE(this)); + date = ToNumber(date); + var day = MakeDay(YEAR_FROM_TIME(t), MONTH_FROM_TIME(t), date); + return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t))))); +} + + +// ECMA 262 - 15.9.5.37 +function DateSetUTCDate(date) { + var t = DATE_VALUE(this); + date = ToNumber(date); + var day = MakeDay(YEAR_FROM_TIME(t), MONTH_FROM_TIME(t), date); + return %_SetValueOf(this, TimeClip(MakeDate(day, TimeWithinDay(t)))); +} + + +// ECMA 262 - 15.9.5.38 +function DateSetMonth(month, date) { + var t = LocalTime(DATE_VALUE(this)); + month = ToNumber(month); + date = %_ArgumentsLength() < 2 ? GetDateFrom(this) : ToNumber(date); + var day = MakeDay(YEAR_FROM_TIME(t), month, date); + return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t))))); +} + + +// ECMA 262 - 15.9.5.39 +function DateSetUTCMonth(month, date) { + var t = DATE_VALUE(this); + month = ToNumber(month); + date = %_ArgumentsLength() < 2 ? GetUTCDateFrom(this) : ToNumber(date); + var day = MakeDay(YEAR_FROM_TIME(t), month, date); + return %_SetValueOf(this, TimeClip(MakeDate(day, TimeWithinDay(t)))); +} + + +// ECMA 262 - 15.9.5.40 +function DateSetFullYear(year, month, date) { + var t = DATE_VALUE(this); + t = NUMBER_IS_NAN(t) ? 0 : LocalTimeNoCheck(t); + year = ToNumber(year); + var argc = %_ArgumentsLength(); + month = argc < 2 ? MONTH_FROM_TIME(t) : ToNumber(month); + date = argc < 3 ? DATE_FROM_TIME(t) : ToNumber(date); + var day = MakeDay(year, month, date); + return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t))))); +} + + +// ECMA 262 - 15.9.5.41 +function DateSetUTCFullYear(year, month, date) { + var t = DATE_VALUE(this); + if (NUMBER_IS_NAN(t)) t = 0; + var argc = %_ArgumentsLength(); + year = ToNumber(year); + month = argc < 2 ? MONTH_FROM_TIME(t) : ToNumber(month); + date = argc < 3 ? DATE_FROM_TIME(t) : ToNumber(date); + var day = MakeDay(year, month, date); + return %_SetValueOf(this, TimeClip(MakeDate(day, TimeWithinDay(t)))); +} + + +// ECMA 262 - 15.9.5.42 +function DateToUTCString() { + var t = DATE_VALUE(this); + if (NUMBER_IS_NAN(t)) return kInvalidDate; + // Return UTC string of the form: Sat, 31 Jan 1970 23:00:00 GMT + return WeekDays[WeekDay(t)] + ', ' + + TwoDigitString(DATE_FROM_TIME(t)) + ' ' + + Months[MONTH_FROM_TIME(t)] + ' ' + + YEAR_FROM_TIME(t) + ' ' + + TimeString(t) + ' GMT'; +} + + +// ECMA 262 - B.2.4 +function DateGetYear() { + var t = DATE_VALUE(this); + if (NUMBER_IS_NAN(t)) return $NaN; + return YEAR_FROM_TIME(LocalTimeNoCheck(t)) - 1900; +} + + +// ECMA 262 - B.2.5 +function DateSetYear(year) { + var t = LocalTime(DATE_VALUE(this)); + if (NUMBER_IS_NAN(t)) t = 0; + year = ToNumber(year); + if (NUMBER_IS_NAN(year)) return %_SetValueOf(this, $NaN); + year = (0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99) + ? 1900 + TO_INTEGER(year) : year; + var day = MakeDay(year, MONTH_FROM_TIME(t), DATE_FROM_TIME(t)); + return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t))))); +} + + +// ECMA 262 - B.2.6 +// +// Notice that this does not follow ECMA 262 completely. ECMA 262 +// says that toGMTString should be the same Function object as +// toUTCString. JSC does not do this, so for compatibility we do not +// do that either. Instead, we create a new function whose name +// property will return toGMTString. +function DateToGMTString() { + return DateToUTCString.call(this); +} + + +function PadInt(n, digits) { + if (digits == 1) return n; + return n < MathPow(10, digits - 1) ? '0' + PadInt(n, digits - 1) : n; +} + + +function DateToISOString() { + var t = DATE_VALUE(this); + if (NUMBER_IS_NAN(t)) return kInvalidDate; + return this.getUTCFullYear() + '-' + PadInt(this.getUTCMonth() + 1, 2) + + '-' + PadInt(this.getUTCDate(), 2) + 'T' + PadInt(this.getUTCHours(), 2) + + ':' + PadInt(this.getUTCMinutes(), 2) + ':' + PadInt(this.getUTCSeconds(), 2) + + '.' + PadInt(this.getUTCMilliseconds(), 3) + + 'Z'; +} + + +function DateToJSON(key) { + return CheckJSONPrimitive(this.toISOString()); +} + + +// ------------------------------------------------------------------- + +function SetupDate() { + // Setup non-enumerable properties of the Date object itself. + InstallFunctions($Date, DONT_ENUM, $Array( + "UTC", DateUTC, + "parse", DateParse, + "now", DateNow + )); + + // Setup non-enumerable constructor property of the Date prototype object. + %SetProperty($Date.prototype, "constructor", $Date, DONT_ENUM); + + // Setup non-enumerable functions of the Date prototype object and + // set their names. + InstallFunctionsOnHiddenPrototype($Date.prototype, DONT_ENUM, $Array( + "toString", DateToString, + "toDateString", DateToDateString, + "toTimeString", DateToTimeString, + "toLocaleString", DateToLocaleString, + "toLocaleDateString", DateToLocaleDateString, + "toLocaleTimeString", DateToLocaleTimeString, + "valueOf", DateValueOf, + "getTime", DateGetTime, + "getFullYear", DateGetFullYear, + "getUTCFullYear", DateGetUTCFullYear, + "getMonth", DateGetMonth, + "getUTCMonth", DateGetUTCMonth, + "getDate", DateGetDate, + "getUTCDate", DateGetUTCDate, + "getDay", DateGetDay, + "getUTCDay", DateGetUTCDay, + "getHours", DateGetHours, + "getUTCHours", DateGetUTCHours, + "getMinutes", DateGetMinutes, + "getUTCMinutes", DateGetUTCMinutes, + "getSeconds", DateGetSeconds, + "getUTCSeconds", DateGetUTCSeconds, + "getMilliseconds", DateGetMilliseconds, + "getUTCMilliseconds", DateGetUTCMilliseconds, + "getTimezoneOffset", DateGetTimezoneOffset, + "setTime", DateSetTime, + "setMilliseconds", DateSetMilliseconds, + "setUTCMilliseconds", DateSetUTCMilliseconds, + "setSeconds", DateSetSeconds, + "setUTCSeconds", DateSetUTCSeconds, + "setMinutes", DateSetMinutes, + "setUTCMinutes", DateSetUTCMinutes, + "setHours", DateSetHours, + "setUTCHours", DateSetUTCHours, + "setDate", DateSetDate, + "setUTCDate", DateSetUTCDate, + "setMonth", DateSetMonth, + "setUTCMonth", DateSetUTCMonth, + "setFullYear", DateSetFullYear, + "setUTCFullYear", DateSetUTCFullYear, + "toGMTString", DateToGMTString, + "toUTCString", DateToUTCString, + "getYear", DateGetYear, + "setYear", DateSetYear, + "toISOString", DateToISOString, + "toJSON", DateToJSON + )); +} + +SetupDate(); diff --git a/deps/v8/src/debug-debugger.js b/deps/v8/src/debug-debugger.js new file mode 100644 index 0000000000..a81530e843 --- /dev/null +++ b/deps/v8/src/debug-debugger.js @@ -0,0 +1,2132 @@ +// Copyright 2006-2008 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Default number of frames to include in the response to backtrace request. +const kDefaultBacktraceLength = 10; + +const Debug = {}; + +// Regular expression to skip "crud" at the beginning of a source line which is +// not really code. Currently the regular expression matches whitespace and +// comments. +const sourceLineBeginningSkip = /^(?:\s*(?:\/\*.*?\*\/)*)*/; + +// Debug events which can occour in the V8 JavaScript engine. These originate +// from the API include file debug.h. +Debug.DebugEvent = { Break: 1, + Exception: 2, + NewFunction: 3, + BeforeCompile: 4, + AfterCompile: 5, + ScriptCollected: 6 }; + +// Types of exceptions that can be broken upon. +Debug.ExceptionBreak = { All : 0, + Uncaught: 1 }; + +// The different types of steps. +Debug.StepAction = { StepOut: 0, + StepNext: 1, + StepIn: 2, + StepMin: 3, + StepInMin: 4 }; + +// The different types of scripts matching enum ScriptType in objects.h. +Debug.ScriptType = { Native: 0, + Extension: 1, + Normal: 2 }; + +// The different types of script compilations matching enum +// Script::CompilationType in objects.h. +Debug.ScriptCompilationType = { Host: 0, + Eval: 1, + JSON: 2 }; + +// The different script break point types. +Debug.ScriptBreakPointType = { ScriptId: 0, + ScriptName: 1 }; + +function ScriptTypeFlag(type) { + return (1 << type); +} + +// Globals. +var next_response_seq = 0; +var next_break_point_number = 1; +var break_points = []; +var script_break_points = []; + + +// Create a new break point object and add it to the list of break points. +function MakeBreakPoint(source_position, opt_line, opt_column, opt_script_break_point) { + var break_point = new BreakPoint(source_position, opt_line, opt_column, opt_script_break_point); + break_points.push(break_point); + return break_point; +} + + +// Object representing a break point. +// NOTE: This object does not have a reference to the function having break +// point as this would cause function not to be garbage collected when it is +// not used any more. We do not want break points to keep functions alive. +function BreakPoint(source_position, opt_line, opt_column, opt_script_break_point) { + this.source_position_ = source_position; + this.source_line_ = opt_line; + this.source_column_ = opt_column; + if (opt_script_break_point) { + this.script_break_point_ = opt_script_break_point; + } else { + this.number_ = next_break_point_number++; + } + this.hit_count_ = 0; + this.active_ = true; + this.condition_ = null; + this.ignoreCount_ = 0; +} + + +BreakPoint.prototype.number = function() { + return this.number_; +}; + + +BreakPoint.prototype.func = function() { + return this.func_; +}; + + +BreakPoint.prototype.source_position = function() { + return this.source_position_; +}; + + +BreakPoint.prototype.hit_count = function() { + return this.hit_count_; +}; + + +BreakPoint.prototype.active = function() { + if (this.script_break_point()) { + return this.script_break_point().active(); + } + return this.active_; +}; + + +BreakPoint.prototype.condition = function() { + if (this.script_break_point() && this.script_break_point().condition()) { + return this.script_break_point().condition(); + } + return this.condition_; +}; + + +BreakPoint.prototype.ignoreCount = function() { + return this.ignoreCount_; +}; + + +BreakPoint.prototype.script_break_point = function() { + return this.script_break_point_; +}; + + +BreakPoint.prototype.enable = function() { + this.active_ = true; +}; + + +BreakPoint.prototype.disable = function() { + this.active_ = false; +}; + + +BreakPoint.prototype.setCondition = function(condition) { + this.condition_ = condition; +}; + + +BreakPoint.prototype.setIgnoreCount = function(ignoreCount) { + this.ignoreCount_ = ignoreCount; +}; + + +BreakPoint.prototype.isTriggered = function(exec_state) { + // Break point not active - not triggered. + if (!this.active()) return false; + + // Check for conditional break point. + if (this.condition()) { + // If break point has condition try to evaluate it in the top frame. + try { + var mirror = exec_state.frame(0).evaluate(this.condition()); + // If no sensible mirror or non true value break point not triggered. + if (!(mirror instanceof ValueMirror) || !%ToBoolean(mirror.value_)) { + return false; + } + } catch (e) { + // Exception evaluating condition counts as not triggered. + return false; + } + } + + // Update the hit count. + this.hit_count_++; + if (this.script_break_point_) { + this.script_break_point_.hit_count_++; + } + + // If the break point has an ignore count it is not triggered. + if (this.ignoreCount_ > 0) { + this.ignoreCount_--; + return false; + } + + // Break point triggered. + return true; +}; + + +// Function called from the runtime when a break point is hit. Returns true if +// the break point is triggered and supposed to break execution. +function IsBreakPointTriggered(break_id, break_point) { + return break_point.isTriggered(MakeExecutionState(break_id)); +} + + +// Object representing a script break point. The script is referenced by its +// script name or script id and the break point is represented as line and +// column. +function ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column, + opt_groupId) { + this.type_ = type; + if (type == Debug.ScriptBreakPointType.ScriptId) { + this.script_id_ = script_id_or_name; + } else { // type == Debug.ScriptBreakPointType.ScriptName + this.script_name_ = script_id_or_name; + } + this.line_ = opt_line || 0; + this.column_ = opt_column; + this.groupId_ = opt_groupId; + this.hit_count_ = 0; + this.active_ = true; + this.condition_ = null; + this.ignoreCount_ = 0; +} + + +ScriptBreakPoint.prototype.number = function() { + return this.number_; +}; + + +ScriptBreakPoint.prototype.groupId = function() { + return this.groupId_; +}; + + +ScriptBreakPoint.prototype.type = function() { + return this.type_; +}; + + +ScriptBreakPoint.prototype.script_id = function() { + return this.script_id_; +}; + + +ScriptBreakPoint.prototype.script_name = function() { + return this.script_name_; +}; + + +ScriptBreakPoint.prototype.line = function() { + return this.line_; +}; + + +ScriptBreakPoint.prototype.column = function() { + return this.column_; +}; + + +ScriptBreakPoint.prototype.hit_count = function() { + return this.hit_count_; +}; + + +ScriptBreakPoint.prototype.active = function() { + return this.active_; +}; + + +ScriptBreakPoint.prototype.condition = function() { + return this.condition_; +}; + + +ScriptBreakPoint.prototype.ignoreCount = function() { + return this.ignoreCount_; +}; + + +ScriptBreakPoint.prototype.enable = function() { + this.active_ = true; +}; + + +ScriptBreakPoint.prototype.disable = function() { + this.active_ = false; +}; + + +ScriptBreakPoint.prototype.setCondition = function(condition) { + this.condition_ = condition; +}; + + +ScriptBreakPoint.prototype.setIgnoreCount = function(ignoreCount) { + this.ignoreCount_ = ignoreCount; + + // Set ignore count on all break points created from this script break point. + for (var i = 0; i < break_points.length; i++) { + if (break_points[i].script_break_point() === this) { + break_points[i].setIgnoreCount(ignoreCount); + } + } +}; + + +// Check whether a script matches this script break point. Currently this is +// only based on script name. +ScriptBreakPoint.prototype.matchesScript = function(script) { + if (this.type_ == Debug.ScriptBreakPointType.ScriptId) { + return this.script_id_ == script.id; + } else { // this.type_ == Debug.ScriptBreakPointType.ScriptName + return this.script_name_ == script.name && + script.line_offset <= this.line_ && + this.line_ < script.line_offset + script.lineCount(); + } +}; + + +// Set the script break point in a script. +ScriptBreakPoint.prototype.set = function (script) { + var column = this.column(); + var line = this.line(); + // If the column is undefined the break is on the line. To help locate the + // first piece of breakable code on the line try to find the column on the + // line which contains some source. + if (IS_UNDEFINED(column)) { + var source_line = script.sourceLine(this.line()); + + // Allocate array for caching the columns where the actual source starts. + if (!script.sourceColumnStart_) { + script.sourceColumnStart_ = new Array(script.lineCount()); + } + + // Fill cache if needed and get column where the actual source starts. + if (IS_UNDEFINED(script.sourceColumnStart_[line])) { + script.sourceColumnStart_[line] = + source_line.match(sourceLineBeginningSkip)[0].length; + } + column = script.sourceColumnStart_[line]; + } + + // Convert the line and column into an absolute position within the script. + var pos = Debug.findScriptSourcePosition(script, this.line(), column); + + // If the position is not found in the script (the script might be shorter + // than it used to be) just ignore it. + if (pos === null) return; + + // Create a break point object and set the break point. + break_point = MakeBreakPoint(pos, this.line(), this.column(), this); + break_point.setIgnoreCount(this.ignoreCount()); + %SetScriptBreakPoint(script, pos, break_point); + + return break_point; +}; + + +// Clear all the break points created from this script break point +ScriptBreakPoint.prototype.clear = function () { + var remaining_break_points = []; + for (var i = 0; i < break_points.length; i++) { + if (break_points[i].script_break_point() && + break_points[i].script_break_point() === this) { + %ClearBreakPoint(break_points[i]); + } else { + remaining_break_points.push(break_points[i]); + } + } + break_points = remaining_break_points; +}; + + +// Function called from runtime when a new script is compiled to set any script +// break points set in this script. +function UpdateScriptBreakPoints(script) { + for (var i = 0; i < script_break_points.length; i++) { + if (script_break_points[i].type() == Debug.ScriptBreakPointType.ScriptName && + script_break_points[i].matchesScript(script)) { + script_break_points[i].set(script); + } + } +} + + +Debug.setListener = function(listener, opt_data) { + if (!IS_FUNCTION(listener) && !IS_UNDEFINED(listener) && !IS_NULL(listener)) { + throw new Error('Parameters have wrong types.'); + } + %SetDebugEventListener(listener, opt_data); +}; + + +Debug.breakExecution = function(f) { + %Break(); +}; + +Debug.breakLocations = function(f) { + if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.'); + return %GetBreakLocations(f); +}; + +// Returns a Script object. If the parameter is a function the return value +// is the script in which the function is defined. If the parameter is a string +// the return value is the script for which the script name has that string +// value. If it is a regexp and there is a unique script whose name matches +// we return that, otherwise undefined. +Debug.findScript = function(func_or_script_name) { + if (IS_FUNCTION(func_or_script_name)) { + return %FunctionGetScript(func_or_script_name); + } else if (IS_REGEXP(func_or_script_name)) { + var scripts = Debug.scripts(); + var last_result = null; + var result_count = 0; + for (var i in scripts) { + var script = scripts[i]; + if (func_or_script_name.test(script.name)) { + last_result = script; + result_count++; + } + } + // Return the unique script matching the regexp. If there are more + // than one we don't return a value since there is no good way to + // decide which one to return. Returning a "random" one, say the + // first, would introduce nondeterminism (or something close to it) + // because the order is the heap iteration order. + if (result_count == 1) { + return last_result; + } else { + return undefined; + } + } else { + return %GetScript(func_or_script_name); + } +}; + +// Returns the script source. If the parameter is a function the return value +// is the script source for the script in which the function is defined. If the +// parameter is a string the return value is the script for which the script +// name has that string value. +Debug.scriptSource = function(func_or_script_name) { + return this.findScript(func_or_script_name).source; +}; + +Debug.source = function(f) { + if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.'); + return %FunctionGetSourceCode(f); +}; + +Debug.disassemble = function(f) { + if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.'); + return %DebugDisassembleFunction(f); +}; + +Debug.disassembleConstructor = function(f) { + if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.'); + return %DebugDisassembleConstructor(f); +}; + +Debug.sourcePosition = function(f) { + if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.'); + return %FunctionGetScriptSourcePosition(f); +}; + + +Debug.findFunctionSourceLocation = function(func, opt_line, opt_column) { + var script = %FunctionGetScript(func); + var script_offset = %FunctionGetScriptSourcePosition(func); + return script.locationFromLine(opt_line, opt_column, script_offset); +} + + +// Returns the character position in a script based on a line number and an +// optional position within that line. +Debug.findScriptSourcePosition = function(script, opt_line, opt_column) { + var location = script.locationFromLine(opt_line, opt_column); + return location ? location.position : null; +} + + +Debug.findBreakPoint = function(break_point_number, remove) { + var break_point; + for (var i = 0; i < break_points.length; i++) { + if (break_points[i].number() == break_point_number) { + break_point = break_points[i]; + // Remove the break point from the list if requested. + if (remove) { + break_points.splice(i, 1); + } + break; + } + } + if (break_point) { + return break_point; + } else { + return this.findScriptBreakPoint(break_point_number, remove); + } +}; + + +Debug.setBreakPoint = function(func, opt_line, opt_column, opt_condition) { + if (!IS_FUNCTION(func)) throw new Error('Parameters have wrong types.'); + // Break points in API functions are not supported. + if (%FunctionIsAPIFunction(func)) { + throw new Error('Cannot set break point in native code.'); + } + // Find source position relative to start of the function + var break_position = + this.findFunctionSourceLocation(func, opt_line, opt_column).position; + var source_position = break_position - this.sourcePosition(func); + // Find the script for the function. + var script = %FunctionGetScript(func); + // Break in builtin JavaScript code is not supported. + if (script.type == Debug.ScriptType.Native) { + throw new Error('Cannot set break point in native code.'); + } + // If the script for the function has a name convert this to a script break + // point. + if (script && script.id) { + // Adjust the source position to be script relative. + source_position += %FunctionGetScriptSourcePosition(func); + // Find line and column for the position in the script and set a script + // break point from that. + var location = script.locationFromPosition(source_position, false); + return this.setScriptBreakPointById(script.id, + location.line, location.column, + opt_condition); + } else { + // Set a break point directly on the function. + var break_point = MakeBreakPoint(source_position, opt_line, opt_column); + %SetFunctionBreakPoint(func, source_position, break_point); + break_point.setCondition(opt_condition); + return break_point.number(); + } +}; + + +Debug.enableBreakPoint = function(break_point_number) { + var break_point = this.findBreakPoint(break_point_number, false); + break_point.enable(); +}; + + +Debug.disableBreakPoint = function(break_point_number) { + var break_point = this.findBreakPoint(break_point_number, false); + break_point.disable(); +}; + + +Debug.changeBreakPointCondition = function(break_point_number, condition) { + var break_point = this.findBreakPoint(break_point_number, false); + break_point.setCondition(condition); +}; + + +Debug.changeBreakPointIgnoreCount = function(break_point_number, ignoreCount) { + if (ignoreCount < 0) { + throw new Error('Invalid argument'); + } + var break_point = this.findBreakPoint(break_point_number, false); + break_point.setIgnoreCount(ignoreCount); +}; + + +Debug.clearBreakPoint = function(break_point_number) { + var break_point = this.findBreakPoint(break_point_number, true); + if (break_point) { + return %ClearBreakPoint(break_point); + } else { + break_point = this.findScriptBreakPoint(break_point_number, true); + if (!break_point) { + throw new Error('Invalid breakpoint'); + } + } +}; + + +Debug.clearAllBreakPoints = function() { + for (var i = 0; i < break_points.length; i++) { + break_point = break_points[i]; + %ClearBreakPoint(break_point); + } + break_points = []; +}; + + +Debug.findScriptBreakPoint = function(break_point_number, remove) { + var script_break_point; + for (var i = 0; i < script_break_points.length; i++) { + if (script_break_points[i].number() == break_point_number) { + script_break_point = script_break_points[i]; + // Remove the break point from the list if requested. + if (remove) { + script_break_point.clear(); + script_break_points.splice(i,1); + } + break; + } + } + return script_break_point; +} + + +// Sets a breakpoint in a script identified through id or name at the +// specified source line and column within that line. +Debug.setScriptBreakPoint = function(type, script_id_or_name, + opt_line, opt_column, opt_condition, + opt_groupId) { + // Create script break point object. + var script_break_point = + new ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column, + opt_groupId); + + // Assign number to the new script break point and add it. + script_break_point.number_ = next_break_point_number++; + script_break_point.setCondition(opt_condition); + script_break_points.push(script_break_point); + + // Run through all scripts to see if this script break point matches any + // loaded scripts. + var scripts = this.scripts(); + for (var i = 0; i < scripts.length; i++) { + if (script_break_point.matchesScript(scripts[i])) { + script_break_point.set(scripts[i]); + } + } + + return script_break_point.number(); +} + + +Debug.setScriptBreakPointById = function(script_id, + opt_line, opt_column, + opt_condition, opt_groupId) { + return this.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptId, + script_id, opt_line, opt_column, + opt_condition, opt_groupId); +} + + +Debug.setScriptBreakPointByName = function(script_name, + opt_line, opt_column, + opt_condition, opt_groupId) { + return this.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptName, + script_name, opt_line, opt_column, + opt_condition, opt_groupId); +} + + +Debug.enableScriptBreakPoint = function(break_point_number) { + var script_break_point = this.findScriptBreakPoint(break_point_number, false); + script_break_point.enable(); +}; + + +Debug.disableScriptBreakPoint = function(break_point_number) { + var script_break_point = this.findScriptBreakPoint(break_point_number, false); + script_break_point.disable(); +}; + + +Debug.changeScriptBreakPointCondition = function(break_point_number, condition) { + var script_break_point = this.findScriptBreakPoint(break_point_number, false); + script_break_point.setCondition(condition); +}; + + +Debug.changeScriptBreakPointIgnoreCount = function(break_point_number, ignoreCount) { + if (ignoreCount < 0) { + throw new Error('Invalid argument'); + } + var script_break_point = this.findScriptBreakPoint(break_point_number, false); + script_break_point.setIgnoreCount(ignoreCount); +}; + + +Debug.scriptBreakPoints = function() { + return script_break_points; +} + + +Debug.clearStepping = function() { + %ClearStepping(); +} + +Debug.setBreakOnException = function() { + return %ChangeBreakOnException(Debug.ExceptionBreak.All, true); +}; + +Debug.clearBreakOnException = function() { + return %ChangeBreakOnException(Debug.ExceptionBreak.All, false); +}; + +Debug.setBreakOnUncaughtException = function() { + return %ChangeBreakOnException(Debug.ExceptionBreak.Uncaught, true); +}; + +Debug.clearBreakOnUncaughtException = function() { + return %ChangeBreakOnException(Debug.ExceptionBreak.Uncaught, false); +}; + +Debug.showBreakPoints = function(f, full) { + if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.'); + var source = full ? this.scriptSource(f) : this.source(f); + var offset = full ? this.sourcePosition(f) : 0; + var locations = this.breakLocations(f); + if (!locations) return source; + locations.sort(function(x, y) { return x - y; }); + var result = ""; + var prev_pos = 0; + var pos; + for (var i = 0; i < locations.length; i++) { + pos = locations[i] - offset; + result += source.slice(prev_pos, pos); + result += "[B" + i + "]"; + prev_pos = pos; + } + pos = source.length; + result += source.substring(prev_pos, pos); + return result; +}; + + +// Get all the scripts currently loaded. Locating all the scripts is based on +// scanning the heap. +Debug.scripts = function() { + // Collect all scripts in the heap. + return %DebugGetLoadedScripts(); +} + +function MakeExecutionState(break_id) { + return new ExecutionState(break_id); +} + +function ExecutionState(break_id) { + this.break_id = break_id; + this.selected_frame = 0; +} + +ExecutionState.prototype.prepareStep = function(opt_action, opt_count) { + var action = Debug.StepAction.StepIn; + if (!IS_UNDEFINED(opt_action)) action = %ToNumber(opt_action); + var count = opt_count ? %ToNumber(opt_count) : 1; + + return %PrepareStep(this.break_id, action, count); +} + +ExecutionState.prototype.evaluateGlobal = function(source, disable_break) { + return MakeMirror( + %DebugEvaluateGlobal(this.break_id, source, Boolean(disable_break))); +}; + +ExecutionState.prototype.frameCount = function() { + return %GetFrameCount(this.break_id); +}; + +ExecutionState.prototype.threadCount = function() { + return %GetThreadCount(this.break_id); +}; + +ExecutionState.prototype.frame = function(opt_index) { + // If no index supplied return the selected frame. + if (opt_index == null) opt_index = this.selected_frame; + if (opt_index < 0 || opt_index >= this.frameCount()) + throw new Error('Illegal frame index.'); + return new FrameMirror(this.break_id, opt_index); +}; + +ExecutionState.prototype.cframesValue = function(opt_from_index, opt_to_index) { + return %GetCFrames(this.break_id); +}; + +ExecutionState.prototype.setSelectedFrame = function(index) { + var i = %ToNumber(index); + if (i < 0 || i >= this.frameCount()) throw new Error('Illegal frame index.'); + this.selected_frame = i; +}; + +ExecutionState.prototype.selectedFrame = function() { + return this.selected_frame; +}; + +ExecutionState.prototype.debugCommandProcessor = function(opt_is_running) { + return new DebugCommandProcessor(this, opt_is_running); +}; + + +function MakeBreakEvent(exec_state, break_points_hit) { + return new BreakEvent(exec_state, break_points_hit); +} + + +function BreakEvent(exec_state, break_points_hit) { + this.exec_state_ = exec_state; + this.break_points_hit_ = break_points_hit; +} + + +BreakEvent.prototype.executionState = function() { + return this.exec_state_; +}; + + +BreakEvent.prototype.eventType = function() { + return Debug.DebugEvent.Break; +}; + + +BreakEvent.prototype.func = function() { + return this.exec_state_.frame(0).func(); +}; + + +BreakEvent.prototype.sourceLine = function() { + return this.exec_state_.frame(0).sourceLine(); +}; + + +BreakEvent.prototype.sourceColumn = function() { + return this.exec_state_.frame(0).sourceColumn(); +}; + + +BreakEvent.prototype.sourceLineText = function() { + return this.exec_state_.frame(0).sourceLineText(); +}; + + +BreakEvent.prototype.breakPointsHit = function() { + return this.break_points_hit_; +}; + + +BreakEvent.prototype.toJSONProtocol = function() { + var o = { seq: next_response_seq++, + type: "event", + event: "break", + body: { invocationText: this.exec_state_.frame(0).invocationText(), + } + }; + + // Add script related information to the event if available. + var script = this.func().script(); + if (script) { + o.body.sourceLine = this.sourceLine(), + o.body.sourceColumn = this.sourceColumn(), + o.body.sourceLineText = this.sourceLineText(), + o.body.script = MakeScriptObject_(script, false); + } + + // Add an Array of break points hit if any. + if (this.breakPointsHit()) { + o.body.breakpoints = []; + for (var i = 0; i < this.breakPointsHit().length; i++) { + // Find the break point number. For break points originating from a + // script break point supply the script break point number. + var breakpoint = this.breakPointsHit()[i]; + var script_break_point = breakpoint.script_break_point(); + var number; + if (script_break_point) { + number = script_break_point.number(); + } else { + number = breakpoint.number(); + } + o.body.breakpoints.push(number); + } + } + return JSON.stringify(ObjectToProtocolObject_(o)); +}; + + +function MakeExceptionEvent(exec_state, exception, uncaught) { + return new ExceptionEvent(exec_state, exception, uncaught); +} + + +function ExceptionEvent(exec_state, exception, uncaught) { + this.exec_state_ = exec_state; + this.exception_ = exception; + this.uncaught_ = uncaught; +} + + +ExceptionEvent.prototype.executionState = function() { + return this.exec_state_; +}; + + +ExceptionEvent.prototype.eventType = function() { + return Debug.DebugEvent.Exception; +}; + + +ExceptionEvent.prototype.exception = function() { + return this.exception_; +} + + +ExceptionEvent.prototype.uncaught = function() { + return this.uncaught_; +} + + +ExceptionEvent.prototype.func = function() { + return this.exec_state_.frame(0).func(); +}; + + +ExceptionEvent.prototype.sourceLine = function() { + return this.exec_state_.frame(0).sourceLine(); +}; + + +ExceptionEvent.prototype.sourceColumn = function() { + return this.exec_state_.frame(0).sourceColumn(); +}; + + +ExceptionEvent.prototype.sourceLineText = function() { + return this.exec_state_.frame(0).sourceLineText(); +}; + + +ExceptionEvent.prototype.toJSONProtocol = function() { + var o = new ProtocolMessage(); + o.event = "exception"; + o.body = { uncaught: this.uncaught_, + exception: MakeMirror(this.exception_) + }; + + // Exceptions might happen whithout any JavaScript frames. + if (this.exec_state_.frameCount() > 0) { + o.body.sourceLine = this.sourceLine(); + o.body.sourceColumn = this.sourceColumn(); + o.body.sourceLineText = this.sourceLineText(); + + // Add script information to the event if available. + var script = this.func().script(); + if (script) { + o.body.script = MakeScriptObject_(script, false); + } + } else { + o.body.sourceLine = -1; + } + + return o.toJSONProtocol(); +}; + + +function MakeCompileEvent(exec_state, script, before) { + return new CompileEvent(exec_state, script, before); +} + + +function CompileEvent(exec_state, script, before) { + this.exec_state_ = exec_state; + this.script_ = MakeMirror(script); + this.before_ = before; +} + + +CompileEvent.prototype.executionState = function() { + return this.exec_state_; +}; + + +CompileEvent.prototype.eventType = function() { + if (this.before_) { + return Debug.DebugEvent.BeforeCompile; + } else { + return Debug.DebugEvent.AfterCompile; + } +}; + + +CompileEvent.prototype.script = function() { + return this.script_; +}; + + +CompileEvent.prototype.toJSONProtocol = function() { + var o = new ProtocolMessage(); + o.running = true; + if (this.before_) { + o.event = "beforeCompile"; + } else { + o.event = "afterCompile"; + } + o.body = {}; + o.body.script = this.script_; + + return o.toJSONProtocol(); +} + + +function MakeNewFunctionEvent(func) { + return new NewFunctionEvent(func); +} + + +function NewFunctionEvent(func) { + this.func = func; +} + + +NewFunctionEvent.prototype.eventType = function() { + return Debug.DebugEvent.NewFunction; +}; + + +NewFunctionEvent.prototype.name = function() { + return this.func.name; +}; + + +NewFunctionEvent.prototype.setBreakPoint = function(p) { + Debug.setBreakPoint(this.func, p || 0); +}; + + +function MakeScriptCollectedEvent(exec_state, id) { + return new ScriptCollectedEvent(exec_state, id); +} + + +function ScriptCollectedEvent(exec_state, id) { + this.exec_state_ = exec_state; + this.id_ = id; +} + + +ScriptCollectedEvent.prototype.id = function() { + return this.id_; +}; + + +ScriptCollectedEvent.prototype.executionState = function() { + return this.exec_state_; +}; + + +ScriptCollectedEvent.prototype.toJSONProtocol = function() { + var o = new ProtocolMessage(); + o.running = true; + o.event = "scriptCollected"; + o.body = {}; + o.body.script = { id: this.id() }; + return o.toJSONProtocol(); +} + + +function MakeScriptObject_(script, include_source) { + var o = { id: script.id(), + name: script.name(), + lineOffset: script.lineOffset(), + columnOffset: script.columnOffset(), + lineCount: script.lineCount(), + }; + if (!IS_UNDEFINED(script.data())) { + o.data = script.data(); + } + if (include_source) { + o.source = script.source(); + } + return o; +}; + + +function DebugCommandProcessor(exec_state, opt_is_running) { + this.exec_state_ = exec_state; + this.running_ = opt_is_running || false; +}; + + +DebugCommandProcessor.prototype.processDebugRequest = function (request) { + return this.processDebugJSONRequest(request); +} + + +function ProtocolMessage(request) { + // Update sequence number. + this.seq = next_response_seq++; + + if (request) { + // If message is based on a request this is a response. Fill the initial + // response from the request. + this.type = 'response'; + this.request_seq = request.seq; + this.command = request.command; + } else { + // If message is not based on a request it is a dabugger generated event. + this.type = 'event'; + } + this.success = true; + // Handler may set this field to control debugger state. + this.running = undefined; +} + + +ProtocolMessage.prototype.setOption = function(name, value) { + if (!this.options_) { + this.options_ = {}; + } + this.options_[name] = value; +} + + +ProtocolMessage.prototype.failed = function(message) { + this.success = false; + this.message = message; +} + + +ProtocolMessage.prototype.toJSONProtocol = function() { + // Encode the protocol header. + var json = {}; + json.seq= this.seq; + if (this.request_seq) { + json.request_seq = this.request_seq; + } + json.type = this.type; + if (this.event) { + json.event = this.event; + } + if (this.command) { + json.command = this.command; + } + if (this.success) { + json.success = this.success; + } else { + json.success = false; + } + if (this.body) { + // Encode the body part. + var bodyJson; + var serializer = MakeMirrorSerializer(true, this.options_); + if (this.body instanceof Mirror) { + bodyJson = serializer.serializeValue(this.body); + } else if (this.body instanceof Array) { + bodyJson = []; + for (var i = 0; i < this.body.length; i++) { + if (this.body[i] instanceof Mirror) { + bodyJson.push(serializer.serializeValue(this.body[i])); + } else { + bodyJson.push(ObjectToProtocolObject_(this.body[i], serializer)); + } + } + } else { + bodyJson = ObjectToProtocolObject_(this.body, serializer); + } + json.body = bodyJson; + json.refs = serializer.serializeReferencedObjects(); + } + if (this.message) { + json.message = this.message; + } + json.running = this.running; + return JSON.stringify(json); +} + + +DebugCommandProcessor.prototype.createResponse = function(request) { + return new ProtocolMessage(request); +}; + + +DebugCommandProcessor.prototype.processDebugJSONRequest = function(json_request) { + var request; // Current request. + var response; // Generated response. + try { + try { + // Convert the JSON string to an object. + request = %CompileString('(' + json_request + ')', false)(); + + // Create an initial response. + response = this.createResponse(request); + + if (!request.type) { + throw new Error('Type not specified'); + } + + if (request.type != 'request') { + throw new Error("Illegal type '" + request.type + "' in request"); + } + + if (!request.command) { + throw new Error('Command not specified'); + } + + if (request.arguments) { + var args = request.arguments; + // TODO(yurys): remove request.arguments.compactFormat check once + // ChromeDevTools are switched to 'inlineRefs' + if (args.inlineRefs || args.compactFormat) { + response.setOption('inlineRefs', true); + } + if (!IS_UNDEFINED(args.maxStringLength)) { + response.setOption('maxStringLength', args.maxStringLength); + } + } + + if (request.command == 'continue') { + this.continueRequest_(request, response); + } else if (request.command == 'break') { + this.breakRequest_(request, response); + } else if (request.command == 'setbreakpoint') { + this.setBreakPointRequest_(request, response); + } else if (request.command == 'changebreakpoint') { + this.changeBreakPointRequest_(request, response); + } else if (request.command == 'clearbreakpoint') { + this.clearBreakPointRequest_(request, response); + } else if (request.command == 'clearbreakpointgroup') { + this.clearBreakPointGroupRequest_(request, response); + } else if (request.command == 'backtrace') { + this.backtraceRequest_(request, response); + } else if (request.command == 'frame') { + this.frameRequest_(request, response); + } else if (request.command == 'scopes') { + this.scopesRequest_(request, response); + } else if (request.command == 'scope') { + this.scopeRequest_(request, response); + } else if (request.command == 'evaluate') { + this.evaluateRequest_(request, response); + } else if (request.command == 'lookup') { + this.lookupRequest_(request, response); + } else if (request.command == 'references') { + this.referencesRequest_(request, response); + } else if (request.command == 'source') { + this.sourceRequest_(request, response); + } else if (request.command == 'scripts') { + this.scriptsRequest_(request, response); + } else if (request.command == 'threads') { + this.threadsRequest_(request, response); + } else if (request.command == 'suspend') { + this.suspendRequest_(request, response); + } else if (request.command == 'version') { + this.versionRequest_(request, response); + } else if (request.command == 'profile') { + this.profileRequest_(request, response); + } else if (request.command == 'changelive') { + this.changeLiveRequest_(request, response); + } else { + throw new Error('Unknown command "' + request.command + '" in request'); + } + } catch (e) { + // If there is no response object created one (without command). + if (!response) { + response = this.createResponse(); + } + response.success = false; + response.message = %ToString(e); + } + + // Return the response as a JSON encoded string. + try { + if (!IS_UNDEFINED(response.running)) { + // Response controls running state. + this.running_ = response.running; + } + response.running = this.running_; + return response.toJSONProtocol(); + } catch (e) { + // Failed to generate response - return generic error. + return '{"seq":' + response.seq + ',' + + '"request_seq":' + request.seq + ',' + + '"type":"response",' + + '"success":false,' + + '"message":"Internal error: ' + %ToString(e) + '"}'; + } + } catch (e) { + // Failed in one of the catch blocks above - most generic error. + return '{"seq":0,"type":"response","success":false,"message":"Internal error"}'; + } +}; + + +DebugCommandProcessor.prototype.continueRequest_ = function(request, response) { + // Check for arguments for continue. + if (request.arguments) { + var count = 1; + var action = Debug.StepAction.StepIn; + + // Pull out arguments. + var stepaction = request.arguments.stepaction; + var stepcount = request.arguments.stepcount; + + // Get the stepcount argument if any. + if (stepcount) { + count = %ToNumber(stepcount); + if (count < 0) { + throw new Error('Invalid stepcount argument "' + stepcount + '".'); + } + } + + // Get the stepaction argument. + if (stepaction) { + if (stepaction == 'in') { + action = Debug.StepAction.StepIn; + } else if (stepaction == 'min') { + action = Debug.StepAction.StepMin; + } else if (stepaction == 'next') { + action = Debug.StepAction.StepNext; + } else if (stepaction == 'out') { + action = Debug.StepAction.StepOut; + } else { + throw new Error('Invalid stepaction argument "' + stepaction + '".'); + } + } + + // Setup the VM for stepping. + this.exec_state_.prepareStep(action, count); + } + + // VM should be running after executing this request. + response.running = true; +}; + + +DebugCommandProcessor.prototype.breakRequest_ = function(request, response) { + // Ignore as break command does not do anything when broken. +}; + + +DebugCommandProcessor.prototype.setBreakPointRequest_ = + function(request, response) { + // Check for legal request. + if (!request.arguments) { + response.failed('Missing arguments'); + return; + } + + // Pull out arguments. + var type = request.arguments.type; + var target = request.arguments.target; + var line = request.arguments.line; + var column = request.arguments.column; + var enabled = IS_UNDEFINED(request.arguments.enabled) ? + true : request.arguments.enabled; + var condition = request.arguments.condition; + var ignoreCount = request.arguments.ignoreCount; + var groupId = request.arguments.groupId; + + // Check for legal arguments. + if (!type || IS_UNDEFINED(target)) { + response.failed('Missing argument "type" or "target"'); + return; + } + if (type != 'function' && type != 'handle' && + type != 'script' && type != 'scriptId') { + response.failed('Illegal type "' + type + '"'); + return; + } + + // Either function or script break point. + var break_point_number; + if (type == 'function') { + // Handle function break point. + if (!IS_STRING(target)) { + response.failed('Argument "target" is not a string value'); + return; + } + var f; + try { + // Find the function through a global evaluate. + f = this.exec_state_.evaluateGlobal(target).value(); + } catch (e) { + response.failed('Error: "' + %ToString(e) + + '" evaluating "' + target + '"'); + return; + } + if (!IS_FUNCTION(f)) { + response.failed('"' + target + '" does not evaluate to a function'); + return; + } + + // Set function break point. + break_point_number = Debug.setBreakPoint(f, line, column, condition); + } else if (type == 'handle') { + // Find the object pointed by the specified handle. + var handle = parseInt(target, 10); + var mirror = LookupMirror(handle); + if (!mirror) { + return response.failed('Object #' + handle + '# not found'); + } + if (!mirror.isFunction()) { + return response.failed('Object #' + handle + '# is not a function'); + } + + // Set function break point. + break_point_number = Debug.setBreakPoint(mirror.value(), + line, column, condition); + } else if (type == 'script') { + // set script break point. + break_point_number = + Debug.setScriptBreakPointByName(target, line, column, condition, + groupId); + } else { // type == 'scriptId. + break_point_number = + Debug.setScriptBreakPointById(target, line, column, condition, groupId); + } + + // Set additional break point properties. + var break_point = Debug.findBreakPoint(break_point_number); + if (ignoreCount) { + Debug.changeBreakPointIgnoreCount(break_point_number, ignoreCount); + } + if (!enabled) { + Debug.disableBreakPoint(break_point_number); + } + + // Add the break point number to the response. + response.body = { type: type, + breakpoint: break_point_number } + + // Add break point information to the response. + if (break_point instanceof ScriptBreakPoint) { + if (break_point.type() == Debug.ScriptBreakPointType.ScriptId) { + response.body.type = 'scriptId'; + response.body.script_id = break_point.script_id(); + } else { + response.body.type = 'scriptName'; + response.body.script_name = break_point.script_name(); + } + response.body.line = break_point.line(); + response.body.column = break_point.column(); + } else { + response.body.type = 'function'; + } +}; + + +DebugCommandProcessor.prototype.changeBreakPointRequest_ = function(request, response) { + // Check for legal request. + if (!request.arguments) { + response.failed('Missing arguments'); + return; + } + + // Pull out arguments. + var break_point = %ToNumber(request.arguments.breakpoint); + var enabled = request.arguments.enabled; + var condition = request.arguments.condition; + var ignoreCount = request.arguments.ignoreCount; + + // Check for legal arguments. + if (!break_point) { + response.failed('Missing argument "breakpoint"'); + return; + } + + // Change enabled state if supplied. + if (!IS_UNDEFINED(enabled)) { + if (enabled) { + Debug.enableBreakPoint(break_point); + } else { + Debug.disableBreakPoint(break_point); + } + } + + // Change condition if supplied + if (!IS_UNDEFINED(condition)) { + Debug.changeBreakPointCondition(break_point, condition); + } + + // Change ignore count if supplied + if (!IS_UNDEFINED(ignoreCount)) { + Debug.changeBreakPointIgnoreCount(break_point, ignoreCount); + } +} + + +DebugCommandProcessor.prototype.clearBreakPointGroupRequest_ = function(request, response) { + // Check for legal request. + if (!request.arguments) { + response.failed('Missing arguments'); + return; + } + + // Pull out arguments. + var group_id = request.arguments.groupId; + + // Check for legal arguments. + if (!group_id) { + response.failed('Missing argument "groupId"'); + return; + } + + var cleared_break_points = []; + var new_script_break_points = []; + for (var i = 0; i < script_break_points.length; i++) { + var next_break_point = script_break_points[i]; + if (next_break_point.groupId() == group_id) { + cleared_break_points.push(next_break_point.number()); + next_break_point.clear(); + } else { + new_script_break_points.push(next_break_point); + } + } + script_break_points = new_script_break_points; + + // Add the cleared break point numbers to the response. + response.body = { breakpoints: cleared_break_points }; +} + + +DebugCommandProcessor.prototype.clearBreakPointRequest_ = function(request, response) { + // Check for legal request. + if (!request.arguments) { + response.failed('Missing arguments'); + return; + } + + // Pull out arguments. + var break_point = %ToNumber(request.arguments.breakpoint); + + // Check for legal arguments. + if (!break_point) { + response.failed('Missing argument "breakpoint"'); + return; + } + + // Clear break point. + Debug.clearBreakPoint(break_point); + + // Add the cleared break point number to the response. + response.body = { breakpoint: break_point } +} + + +DebugCommandProcessor.prototype.backtraceRequest_ = function(request, response) { + // Get the number of frames. + var total_frames = this.exec_state_.frameCount(); + + // Create simple response if there are no frames. + if (total_frames == 0) { + response.body = { + totalFrames: total_frames + } + return; + } + + // Default frame range to include in backtrace. + var from_index = 0 + var to_index = kDefaultBacktraceLength; + + // Get the range from the arguments. + if (request.arguments) { + if (request.arguments.fromFrame) { + from_index = request.arguments.fromFrame; + } + if (request.arguments.toFrame) { + to_index = request.arguments.toFrame; + } + if (request.arguments.bottom) { + var tmp_index = total_frames - from_index; + from_index = total_frames - to_index + to_index = tmp_index; + } + if (from_index < 0 || to_index < 0) { + return response.failed('Invalid frame number'); + } + } + + // Adjust the index. + to_index = Math.min(total_frames, to_index); + + if (to_index <= from_index) { + var error = 'Invalid frame range'; + return response.failed(error); + } + + // Create the response body. + var frames = []; + for (var i = from_index; i < to_index; i++) { + frames.push(this.exec_state_.frame(i)); + } + response.body = { + fromFrame: from_index, + toFrame: to_index, + totalFrames: total_frames, + frames: frames + } +}; + + +DebugCommandProcessor.prototype.backtracec = function(cmd, args) { + return this.exec_state_.cframesValue(); +}; + + +DebugCommandProcessor.prototype.frameRequest_ = function(request, response) { + // No frames no source. + if (this.exec_state_.frameCount() == 0) { + return response.failed('No frames'); + } + + // With no arguments just keep the selected frame. + if (request.arguments) { + var index = request.arguments.number; + if (index < 0 || this.exec_state_.frameCount() <= index) { + return response.failed('Invalid frame number'); + } + + this.exec_state_.setSelectedFrame(request.arguments.number); + } + response.body = this.exec_state_.frame(); +}; + + +DebugCommandProcessor.prototype.frameForScopeRequest_ = function(request) { + // Get the frame for which the scope or scopes are requested. With no frameNumber + // argument use the currently selected frame. + if (request.arguments && !IS_UNDEFINED(request.arguments.frameNumber)) { + frame_index = request.arguments.frameNumber; + if (frame_index < 0 || this.exec_state_.frameCount() <= frame_index) { + return response.failed('Invalid frame number'); + } + return this.exec_state_.frame(frame_index); + } else { + return this.exec_state_.frame(); + } +} + + +DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) { + // No frames no scopes. + if (this.exec_state_.frameCount() == 0) { + return response.failed('No scopes'); + } + + // Get the frame for which the scopes are requested. + var frame = this.frameForScopeRequest_(request); + + // Fill all scopes for this frame. + var total_scopes = frame.scopeCount(); + var scopes = []; + for (var i = 0; i < total_scopes; i++) { + scopes.push(frame.scope(i)); + } + response.body = { + fromScope: 0, + toScope: total_scopes, + totalScopes: total_scopes, + scopes: scopes + } +}; + + +DebugCommandProcessor.prototype.scopeRequest_ = function(request, response) { + // No frames no scopes. + if (this.exec_state_.frameCount() == 0) { + return response.failed('No scopes'); + } + + // Get the frame for which the scope is requested. + var frame = this.frameForScopeRequest_(request); + + // With no scope argument just return top scope. + var scope_index = 0; + if (request.arguments && !IS_UNDEFINED(request.arguments.number)) { + scope_index = %ToNumber(request.arguments.number); + if (scope_index < 0 || frame.scopeCount() <= scope_index) { + return response.failed('Invalid scope number'); + } + } + + response.body = frame.scope(scope_index); +}; + + +DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) { + if (!request.arguments) { + return response.failed('Missing arguments'); + } + + // Pull out arguments. + var expression = request.arguments.expression; + var frame = request.arguments.frame; + var global = request.arguments.global; + var disable_break = request.arguments.disable_break; + + // The expression argument could be an integer so we convert it to a + // string. + try { + expression = String(expression); + } catch(e) { + return response.failed('Failed to convert expression argument to string'); + } + + // Check for legal arguments. + if (!IS_UNDEFINED(frame) && global) { + return response.failed('Arguments "frame" and "global" are exclusive'); + } + + // Global evaluate. + if (global) { + // Evaluate in the global context. + response.body = + this.exec_state_.evaluateGlobal(expression, Boolean(disable_break)); + return; + } + + // Default value for disable_break is true. + if (IS_UNDEFINED(disable_break)) { + disable_break = true; + } + + // No frames no evaluate in frame. + if (this.exec_state_.frameCount() == 0) { + return response.failed('No frames'); + } + + // Check whether a frame was specified. + if (!IS_UNDEFINED(frame)) { + var frame_number = %ToNumber(frame); + if (frame_number < 0 || frame_number >= this.exec_state_.frameCount()) { + return response.failed('Invalid frame "' + frame + '"'); + } + // Evaluate in the specified frame. + response.body = this.exec_state_.frame(frame_number).evaluate( + expression, Boolean(disable_break)); + return; + } else { + // Evaluate in the selected frame. + response.body = this.exec_state_.frame().evaluate( + expression, Boolean(disable_break)); + return; + } +}; + + +DebugCommandProcessor.prototype.lookupRequest_ = function(request, response) { + if (!request.arguments) { + return response.failed('Missing arguments'); + } + + // Pull out arguments. + var handles = request.arguments.handles; + + // Check for legal arguments. + if (IS_UNDEFINED(handles)) { + return response.failed('Argument "handles" missing'); + } + + // Set 'includeSource' option for script lookup. + if (!IS_UNDEFINED(request.arguments.includeSource)) { + includeSource = %ToBoolean(request.arguments.includeSource); + response.setOption('includeSource', includeSource); + } + + // Lookup handles. + var mirrors = {}; + for (var i = 0; i < handles.length; i++) { + var handle = handles[i]; + var mirror = LookupMirror(handle); + if (!mirror) { + return response.failed('Object #' + handle + '# not found'); + } + mirrors[handle] = mirror; + } + response.body = mirrors; +}; + + +DebugCommandProcessor.prototype.referencesRequest_ = + function(request, response) { + if (!request.arguments) { + return response.failed('Missing arguments'); + } + + // Pull out arguments. + var type = request.arguments.type; + var handle = request.arguments.handle; + + // Check for legal arguments. + if (IS_UNDEFINED(type)) { + return response.failed('Argument "type" missing'); + } + if (IS_UNDEFINED(handle)) { + return response.failed('Argument "handle" missing'); + } + if (type != 'referencedBy' && type != 'constructedBy') { + return response.failed('Invalid type "' + type + '"'); + } + + // Lookup handle and return objects with references the object. + var mirror = LookupMirror(handle); + if (mirror) { + if (type == 'referencedBy') { + response.body = mirror.referencedBy(); + } else { + response.body = mirror.constructedBy(); + } + } else { + return response.failed('Object #' + handle + '# not found'); + } +}; + + +DebugCommandProcessor.prototype.sourceRequest_ = function(request, response) { + // No frames no source. + if (this.exec_state_.frameCount() == 0) { + return response.failed('No source'); + } + + var from_line; + var to_line; + var frame = this.exec_state_.frame(); + if (request.arguments) { + // Pull out arguments. + from_line = request.arguments.fromLine; + to_line = request.arguments.toLine; + + if (!IS_UNDEFINED(request.arguments.frame)) { + var frame_number = %ToNumber(request.arguments.frame); + if (frame_number < 0 || frame_number >= this.exec_state_.frameCount()) { + return response.failed('Invalid frame "' + frame + '"'); + } + frame = this.exec_state_.frame(frame_number); + } + } + + // Get the script selected. + var script = frame.func().script(); + if (!script) { + return response.failed('No source'); + } + + // Get the source slice and fill it into the response. + var slice = script.sourceSlice(from_line, to_line); + if (!slice) { + return response.failed('Invalid line interval'); + } + response.body = {}; + response.body.source = slice.sourceText(); + response.body.fromLine = slice.from_line; + response.body.toLine = slice.to_line; + response.body.fromPosition = slice.from_position; + response.body.toPosition = slice.to_position; + response.body.totalLines = script.lineCount(); +}; + + +DebugCommandProcessor.prototype.scriptsRequest_ = function(request, response) { + var types = ScriptTypeFlag(Debug.ScriptType.Normal); + var includeSource = false; + var idsToInclude = null; + if (request.arguments) { + // Pull out arguments. + if (!IS_UNDEFINED(request.arguments.types)) { + types = %ToNumber(request.arguments.types); + if (isNaN(types) || types < 0) { + return response.failed('Invalid types "' + request.arguments.types + '"'); + } + } + + if (!IS_UNDEFINED(request.arguments.includeSource)) { + includeSource = %ToBoolean(request.arguments.includeSource); + response.setOption('includeSource', includeSource); + } + + if (IS_ARRAY(request.arguments.ids)) { + idsToInclude = {}; + var ids = request.arguments.ids; + for (var i = 0; i < ids.length; i++) { + idsToInclude[ids[i]] = true; + } + } + } + + // Collect all scripts in the heap. + var scripts = %DebugGetLoadedScripts(); + + response.body = []; + + for (var i = 0; i < scripts.length; i++) { + if (idsToInclude && !idsToInclude[scripts[i].id]) { + continue; + } + if (types & ScriptTypeFlag(scripts[i].type)) { + response.body.push(MakeMirror(scripts[i])); + } + } +}; + + +DebugCommandProcessor.prototype.threadsRequest_ = function(request, response) { + // Get the number of threads. + var total_threads = this.exec_state_.threadCount(); + + // Get information for all threads. + var threads = []; + for (var i = 0; i < total_threads; i++) { + var details = %GetThreadDetails(this.exec_state_.break_id, i); + var thread_info = { current: details[0], + id: details[1] + } + threads.push(thread_info); + } + + // Create the response body. + response.body = { + totalThreads: total_threads, + threads: threads + } +}; + + +DebugCommandProcessor.prototype.suspendRequest_ = function(request, response) { + response.running = false; +}; + + +DebugCommandProcessor.prototype.versionRequest_ = function(request, response) { + response.body = { + V8Version: %GetV8Version() + } +}; + + +DebugCommandProcessor.prototype.profileRequest_ = function(request, response) { + if (!request.arguments) { + return response.failed('Missing arguments'); + } + var modules = parseInt(request.arguments.modules); + if (isNaN(modules)) { + return response.failed('Modules is not an integer'); + } + var tag = parseInt(request.arguments.tag); + if (isNaN(tag)) { + tag = 0; + } + if (request.arguments.command == 'resume') { + %ProfilerResume(modules, tag); + } else if (request.arguments.command == 'pause') { + %ProfilerPause(modules, tag); + } else { + return response.failed('Unknown command'); + } + response.body = {}; +}; + + +DebugCommandProcessor.prototype.changeLiveRequest_ = function(request, response) { + if (!Debug.LiveEditChangeScript) { + return response.failed('LiveEdit feature is not supported'); + } + if (!request.arguments) { + return response.failed('Missing arguments'); + } + var script_id = request.arguments.script_id; + var change_pos = parseInt(request.arguments.change_pos); + var change_len = parseInt(request.arguments.change_len); + var new_string = request.arguments.new_string; + if (!IS_STRING(new_string)) { + response.failed('Argument "new_string" is not a string value'); + return; + } + + var scripts = %DebugGetLoadedScripts(); + + var the_script = null; + for (var i = 0; i < scripts.length; i++) { + if (scripts[i].id == script_id) { + the_script = scripts[i]; + } + } + if (!the_script) { + response.failed('Script not found'); + return; + } + + var change_log = new Array(); + try { + Debug.LiveEditChangeScript(the_script, change_pos, change_len, new_string, + change_log); + } catch (e) { + if (e instanceof Debug.LiveEditChangeScript.Failure) { + // Let's treat it as a "success" so that body with change_log will be + // sent back. "change_log" will have "failure" field set. + change_log.push( { failure: true } ); + } else { + throw e; + } + } + response.body = {change_log: change_log}; +}; + + +// Check whether the previously processed command caused the VM to become +// running. +DebugCommandProcessor.prototype.isRunning = function() { + return this.running_; +} + + +DebugCommandProcessor.prototype.systemBreak = function(cmd, args) { + return %SystemBreak(); +}; + + +function NumberToHex8Str(n) { + var r = ""; + for (var i = 0; i < 8; ++i) { + var c = hexCharArray[n & 0x0F]; // hexCharArray is defined in uri.js + r = c + r; + n = n >>> 4; + } + return r; +}; + +DebugCommandProcessor.prototype.formatCFrames = function(cframes_value) { + var result = ""; + if (cframes_value == null || cframes_value.length == 0) { + result += "(stack empty)"; + } else { + for (var i = 0; i < cframes_value.length; ++i) { + if (i != 0) result += "\n"; + result += this.formatCFrame(cframes_value[i]); + } + } + return result; +}; + + +DebugCommandProcessor.prototype.formatCFrame = function(cframe_value) { + var result = ""; + result += "0x" + NumberToHex8Str(cframe_value.address); + if (!IS_UNDEFINED(cframe_value.text)) { + result += " " + cframe_value.text; + } + return result; +} + + +/** + * Convert an Object to its debugger protocol representation. The representation + * may be serilized to a JSON object using JSON.stringify(). + * This implementation simply runs through all string property names, converts + * each property value to a protocol value and adds the property to the result + * object. For type "object" the function will be called recursively. Note that + * circular structures will cause infinite recursion. + * @param {Object} object The object to format as protocol object. + * @param {MirrorSerializer} mirror_serializer The serializer to use if any + * mirror objects are encountered. + * @return {Object} Protocol object value. + */ +function ObjectToProtocolObject_(object, mirror_serializer) { + var content = {}; + for (var key in object) { + // Only consider string keys. + if (typeof key == 'string') { + // Format the value based on its type. + var property_value_json = ValueToProtocolValue_(object[key], + mirror_serializer); + // Add the property if relevant. + if (!IS_UNDEFINED(property_value_json)) { + content[key] = property_value_json; + } + } + } + + return content; +} + + +/** + * Convert an array to its debugger protocol representation. It will convert + * each array element to a protocol value. + * @param {Array} array The array to format as protocol array. + * @param {MirrorSerializer} mirror_serializer The serializer to use if any + * mirror objects are encountered. + * @return {Array} Protocol array value. + */ +function ArrayToProtocolArray_(array, mirror_serializer) { + var json = []; + for (var i = 0; i < array.length; i++) { + json.push(ValueToProtocolValue_(array[i], mirror_serializer)); + } + return json; +} + + +/** + * Convert a value to its debugger protocol representation. + * @param {*} value The value to format as protocol value. + * @param {MirrorSerializer} mirror_serializer The serializer to use if any + * mirror objects are encountered. + * @return {*} Protocol value. + */ +function ValueToProtocolValue_(value, mirror_serializer) { + // Format the value based on its type. + var json; + switch (typeof value) { + case 'object': + if (value instanceof Mirror) { + json = mirror_serializer.serializeValue(value); + } else if (IS_ARRAY(value)){ + json = ArrayToProtocolArray_(value, mirror_serializer); + } else { + json = ObjectToProtocolObject_(value, mirror_serializer); + } + break; + + case 'boolean': + case 'string': + case 'number': + json = value; + break + + default: + json = null; + } + return json; +} diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc index 2a7a9c8290..4dce4cff05 100644 --- a/deps/v8/src/debug.cc +++ b/deps/v8/src/debug.cc @@ -685,29 +685,26 @@ bool Debug::CompileDebuggerScript(int index) { // Compile the script. bool allow_natives_syntax = FLAG_allow_natives_syntax; FLAG_allow_natives_syntax = true; - Handle<JSFunction> boilerplate; - boilerplate = Compiler::Compile(source_code, - script_name, - 0, - 0, - NULL, - NULL, - Handle<String>::null(), - NATIVES_CODE); + Handle<SharedFunctionInfo> function_info; + function_info = Compiler::Compile(source_code, + script_name, + 0, 0, NULL, NULL, + Handle<String>::null(), + NATIVES_CODE); FLAG_allow_natives_syntax = allow_natives_syntax; // Silently ignore stack overflows during compilation. - if (boilerplate.is_null()) { + if (function_info.is_null()) { ASSERT(Top::has_pending_exception()); Top::clear_pending_exception(); return false; } - // Execute the boilerplate function in the debugger context. + // Execute the shared function in the debugger context. Handle<Context> context = Top::global_context(); bool caught_exception = false; Handle<JSFunction> function = - Factory::NewFunctionFromBoilerplate(boilerplate, context); + Factory::NewFunctionFromSharedFunctionInfo(function_info, context); Handle<Object> result = Execution::TryCall(function, Handle<Object>(context->global()), 0, NULL, &caught_exception); @@ -1685,7 +1682,7 @@ void Debug::CreateScriptCache() { // Perform two GCs to get rid of all unreferenced scripts. The first GC gets // rid of all the cached script wrappers and the second gets rid of the - // scripts which is no longer referenced. + // scripts which are no longer referenced. Heap::CollectAllGarbage(false); Heap::CollectAllGarbage(false); @@ -1999,7 +1996,7 @@ void Debugger::OnAfterCompile(Handle<Script> script, // If debugging there might be script break points registered for this // script. Make sure that these break points are set. - // Get the function UpdateScriptBreakPoints (defined in debug-delay.js). + // Get the function UpdateScriptBreakPoints (defined in debug-debugger.js). Handle<Object> update_script_break_points = Handle<Object>(Debug::debug_context()->global()->GetProperty( *Factory::LookupAsciiSymbol("UpdateScriptBreakPoints"))); @@ -2042,31 +2039,6 @@ void Debugger::OnAfterCompile(Handle<Script> script, } -void Debugger::OnNewFunction(Handle<JSFunction> function) { - return; - HandleScope scope; - - // Bail out based on state or if there is no listener for this event - if (Debug::InDebugger()) return; - if (compiling_natives()) return; - if (!Debugger::EventActive(v8::NewFunction)) return; - - // Enter the debugger. - EnterDebugger debugger; - if (debugger.FailedToEnter()) return; - - // Create the event object. - bool caught_exception = false; - Handle<Object> event_data = MakeNewFunctionEvent(function, &caught_exception); - // Bail out and don't call debugger if exception. - if (caught_exception) { - return; - } - // Process debug event. - ProcessDebugEvent(v8::NewFunction, Handle<JSObject>::cast(event_data), true); -} - - void Debugger::OnScriptCollected(int id) { HandleScope scope; @@ -2476,7 +2448,7 @@ Handle<Object> Debugger::Call(Handle<JSFunction> fun, // Enter the debugger. EnterDebugger debugger; - if (debugger.FailedToEnter() || !debugger.HasJavaScriptFrames()) { + if (debugger.FailedToEnter()) { return Factory::undefined_value(); } @@ -2489,8 +2461,12 @@ Handle<Object> Debugger::Call(Handle<JSFunction> fun, static const int kArgc = 2; Object** argv[kArgc] = { exec_state.location(), data.location() }; - Handle<Object> result = Execution::Call(fun, Factory::undefined_value(), - kArgc, argv, pending_exception); + Handle<Object> result = Execution::Call( + fun, + Handle<Object>(Debug::debug_context_->global_proxy()), + kArgc, + argv, + pending_exception); return result; } diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc index 45124e6451..dbcb4ec8d3 100644 --- a/deps/v8/src/factory.cc +++ b/deps/v8/src/factory.cc @@ -282,31 +282,26 @@ Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) { } -Handle<JSFunction> Factory::BaseNewFunctionFromBoilerplate( - Handle<JSFunction> boilerplate, +Handle<JSFunction> Factory::BaseNewFunctionFromSharedFunctionInfo( + Handle<SharedFunctionInfo> function_info, Handle<Map> function_map, PretenureFlag pretenure) { - ASSERT(boilerplate->IsBoilerplate()); - ASSERT(!boilerplate->has_initial_map()); - ASSERT(!boilerplate->has_prototype()); - ASSERT(boilerplate->properties() == Heap::empty_fixed_array()); - ASSERT(boilerplate->elements() == Heap::empty_fixed_array()); CALL_HEAP_FUNCTION(Heap::AllocateFunction(*function_map, - boilerplate->shared(), + *function_info, Heap::the_hole_value(), pretenure), JSFunction); } -Handle<JSFunction> Factory::NewFunctionFromBoilerplate( - Handle<JSFunction> boilerplate, +Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo( + Handle<SharedFunctionInfo> function_info, Handle<Context> context, PretenureFlag pretenure) { - Handle<JSFunction> result = BaseNewFunctionFromBoilerplate( - boilerplate, Top::function_map(), pretenure); + Handle<JSFunction> result = BaseNewFunctionFromSharedFunctionInfo( + function_info, Top::function_map(), pretenure); result->set_context(*context); - int number_of_literals = boilerplate->NumberOfLiterals(); + int number_of_literals = function_info->num_literals(); Handle<FixedArray> literals = Factory::NewFixedArray(number_of_literals, pretenure); if (number_of_literals > 0) { @@ -490,36 +485,6 @@ Handle<JSFunction> Factory::NewFunction(Handle<String> name, } -Handle<JSFunction> Factory::NewFunctionBoilerplate(Handle<String> name, - int number_of_literals, - Handle<Code> code) { - Handle<JSFunction> function = NewFunctionBoilerplate(name); - function->set_code(*code); - int literals_array_size = number_of_literals; - // If the function contains object, regexp or array literals, - // allocate extra space for a literals array prefix containing the - // object, regexp and array constructor functions. - if (number_of_literals > 0) { - literals_array_size += JSFunction::kLiteralsPrefixSize; - } - Handle<FixedArray> literals = - Factory::NewFixedArray(literals_array_size, TENURED); - function->set_literals(*literals); - ASSERT(!function->has_initial_map()); - ASSERT(!function->has_prototype()); - return function; -} - - -Handle<JSFunction> Factory::NewFunctionBoilerplate(Handle<String> name) { - Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(name); - CALL_HEAP_FUNCTION(Heap::AllocateFunction(Heap::boilerplate_function_map(), - *shared, - Heap::the_hole_value()), - JSFunction); -} - - Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name, InstanceType type, int instance_size, @@ -686,6 +651,22 @@ Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArray> elements, } +Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo( + Handle<String> name, int number_of_literals, Handle<Code> code) { + Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(name); + shared->set_code(*code); + int literals_array_size = number_of_literals; + // If the function contains object, regexp or array literals, + // allocate extra space for a literals array prefix containing the + // context. + if (number_of_literals > 0) { + literals_array_size += JSFunction::kLiteralsPrefixSize; + } + shared->set_num_literals(literals_array_size); + return shared; +} + + Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(Handle<String> name) { CALL_HEAP_FUNCTION(Heap::AllocateSharedFunctionInfo(*name), SharedFunctionInfo); diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h index cc96e11a89..4307289dbd 100644 --- a/deps/v8/src/factory.h +++ b/deps/v8/src/factory.h @@ -218,8 +218,13 @@ class Factory : public AllStatic { static Handle<JSFunction> NewFunction(Handle<Object> super, bool is_global); - static Handle<JSFunction> NewFunctionFromBoilerplate( - Handle<JSFunction> boilerplate, + static Handle<JSFunction> BaseNewFunctionFromSharedFunctionInfo( + Handle<SharedFunctionInfo> function_info, + Handle<Map> function_map, + PretenureFlag pretenure); + + static Handle<JSFunction> NewFunctionFromSharedFunctionInfo( + Handle<SharedFunctionInfo> function_info, Handle<Context> context, PretenureFlag pretenure = TENURED); @@ -273,12 +278,6 @@ class Factory : public AllStatic { Handle<Code> code, bool force_initial_map); - static Handle<JSFunction> NewFunctionBoilerplate(Handle<String> name, - int number_of_literals, - Handle<Code> code); - - static Handle<JSFunction> NewFunctionBoilerplate(Handle<String> name); - static Handle<JSFunction> NewFunction(Handle<Map> function_map, Handle<SharedFunctionInfo> shared, Handle<Object> prototype); @@ -337,6 +336,8 @@ class Factory : public AllStatic { return Handle<String>(&Heap::hidden_symbol_); } + static Handle<SharedFunctionInfo> NewSharedFunctionInfo( + Handle<String> name, int number_of_literals, Handle<Code> code); static Handle<SharedFunctionInfo> NewSharedFunctionInfo(Handle<String> name); static Handle<NumberDictionary> DictionaryAtNumberPut( @@ -377,11 +378,6 @@ class Factory : public AllStatic { Handle<DescriptorArray> array, Handle<Object> descriptors); - static Handle<JSFunction> BaseNewFunctionFromBoilerplate( - Handle<JSFunction> boilerplate, - Handle<Map> function_map, - PretenureFlag pretenure); - // Create a new map cache. static Handle<MapCache> NewMapCache(int at_least_space_for); diff --git a/deps/v8/src/fast-codegen.cc b/deps/v8/src/fast-codegen.cc index 5d0b9c1dbb..832cf7465f 100644 --- a/deps/v8/src/fast-codegen.cc +++ b/deps/v8/src/fast-codegen.cc @@ -195,9 +195,9 @@ void FastCodeGenSyntaxChecker::VisitFunctionLiteral(FunctionLiteral* expr) { } -void FastCodeGenSyntaxChecker::VisitFunctionBoilerplateLiteral( - FunctionBoilerplateLiteral* expr) { - BAILOUT("FunctionBoilerplateLiteral"); +void FastCodeGenSyntaxChecker::VisitSharedFunctionInfoLiteral( + SharedFunctionInfoLiteral* expr) { + BAILOUT("SharedFunctionInfoLiteral"); } @@ -560,8 +560,8 @@ void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) { } -void FastCodeGenerator::VisitFunctionBoilerplateLiteral( - FunctionBoilerplateLiteral* expr) { +void FastCodeGenerator::VisitSharedFunctionInfoLiteral( + SharedFunctionInfoLiteral* expr) { UNREACHABLE(); } diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h index 021362dea5..b32ee9fb87 100644 --- a/deps/v8/src/flag-definitions.h +++ b/deps/v8/src/flag-definitions.h @@ -122,9 +122,9 @@ DEFINE_bool(enable_armv7, true, // bootstrapper.cc DEFINE_string(expose_natives_as, NULL, "expose natives in global object") DEFINE_string(expose_debug_as, NULL, "expose debug in global object") -DEFINE_string(natives_file, NULL, "alternative natives file") DEFINE_bool(expose_gc, false, "expose gc extension") DEFINE_int(stack_trace_limit, 10, "number of stack frames to capture") +DEFINE_bool(disable_native_files, false, "disable builtin natives files") // builtins-ia32.cc DEFINE_bool(inline_new, true, "use fast inline allocation") @@ -160,6 +160,9 @@ DEFINE_bool(use_flow_graph, false, "perform flow-graph based optimizations") // compilation-cache.cc DEFINE_bool(compilation_cache, true, "enable compilation cache") +// data-flow.cc +DEFINE_bool(loop_peeling, false, "Peel off the first iteration of loops.") + // debug.cc DEFINE_bool(remote_debugging, false, "enable remote debugging") DEFINE_bool(trace_debug_json, false, "trace debugging JSON request/response") @@ -202,6 +205,9 @@ DEFINE_bool(cleanup_ics_at_gc, true, "Flush inline caches prior to mark compact collection.") DEFINE_bool(cleanup_caches_in_maps_at_gc, true, "Flush code caches in maps during mark compact cycle.") +DEFINE_int(random_seed, 0, + "Default seed for initializing random generator " + "(0, the default, means to use system random).") DEFINE_bool(canonicalize_object_literal_maps, true, "Canonicalize maps for object literals.") diff --git a/deps/v8/src/frame-element.h b/deps/v8/src/frame-element.h index 2bbc2884e6..83db5c3342 100644 --- a/deps/v8/src/frame-element.h +++ b/deps/v8/src/frame-element.h @@ -28,7 +28,7 @@ #ifndef V8_FRAME_ELEMENT_H_ #define V8_FRAME_ELEMENT_H_ -#include "number-info.h" +#include "type-info-inl.h" #include "macro-assembler.h" #include "zone.h" @@ -54,25 +54,19 @@ class FrameElement BASE_EMBEDDED { SYNCED }; - inline NumberInfo number_info() { - // Copied elements do not have number info. Instead + inline TypeInfo type_info() { + // Copied elements do not have type info. Instead // we have to inspect their backing element in the frame. ASSERT(!is_copy()); - if (!is_constant()) { - return NumberInfo::FromInt(NumberInfoField::decode(value_)); - } - Handle<Object> value = handle(); - if (value->IsSmi()) return NumberInfo::Smi(); - if (value->IsHeapNumber()) return NumberInfo::HeapNumber(); - return NumberInfo::Unknown(); + return TypeInfo::FromInt(TypeInfoField::decode(value_)); } - inline void set_number_info(NumberInfo info) { - // Copied elements do not have number info. Instead + inline void set_type_info(TypeInfo info) { + // Copied elements do not have type info. Instead // we have to inspect their backing element in the frame. ASSERT(!is_copy()); - value_ = value_ & ~NumberInfoField::mask(); - value_ = value_ | NumberInfoField::encode(info.ToInt()); + value_ = value_ & ~TypeInfoField::mask(); + value_ = value_ | TypeInfoField::encode(info.ToInt()); } // The default constructor creates an invalid frame element. @@ -80,7 +74,7 @@ class FrameElement BASE_EMBEDDED { value_ = TypeField::encode(INVALID) | CopiedField::encode(false) | SyncedField::encode(false) - | NumberInfoField::encode(NumberInfo::Uninitialized().ToInt()) + | TypeInfoField::encode(TypeInfo::Uninitialized().ToInt()) | DataField::encode(0); } @@ -91,7 +85,7 @@ class FrameElement BASE_EMBEDDED { } // Factory function to construct an in-memory frame element. - static FrameElement MemoryElement(NumberInfo info) { + static FrameElement MemoryElement(TypeInfo info) { FrameElement result(MEMORY, no_reg, SYNCED, info); return result; } @@ -99,7 +93,7 @@ class FrameElement BASE_EMBEDDED { // Factory function to construct an in-register frame element. static FrameElement RegisterElement(Register reg, SyncFlag is_synced, - NumberInfo info) { + TypeInfo info) { return FrameElement(REGISTER, reg, is_synced, info); } @@ -107,7 +101,8 @@ class FrameElement BASE_EMBEDDED { // compile time. static FrameElement ConstantElement(Handle<Object> value, SyncFlag is_synced) { - FrameElement result(value, is_synced); + TypeInfo info = TypeInfo::TypeFromValue(value); + FrameElement result(value, is_synced, info); return result; } @@ -223,20 +218,20 @@ class FrameElement BASE_EMBEDDED { FrameElement(Type type, Register reg, SyncFlag is_synced, - NumberInfo info) { + TypeInfo info) { value_ = TypeField::encode(type) | CopiedField::encode(false) | SyncedField::encode(is_synced != NOT_SYNCED) - | NumberInfoField::encode(info.ToInt()) + | TypeInfoField::encode(info.ToInt()) | DataField::encode(reg.code_ > 0 ? reg.code_ : 0); } // Used to construct constant elements. - FrameElement(Handle<Object> value, SyncFlag is_synced) { + FrameElement(Handle<Object> value, SyncFlag is_synced, TypeInfo info) { value_ = TypeField::encode(CONSTANT) | CopiedField::encode(false) | SyncedField::encode(is_synced != NOT_SYNCED) - | NumberInfoField::encode(NumberInfo::Uninitialized().ToInt()) + | TypeInfoField::encode(info.ToInt()) | DataField::encode(ConstantList()->length()); ConstantList()->Add(value); } @@ -262,12 +257,13 @@ class FrameElement BASE_EMBEDDED { // Encode type, copied, synced and data in one 32 bit integer. uint32_t value_; + // Declare BitFields with template parameters <type, start, size>. class TypeField: public BitField<Type, 0, 3> {}; class CopiedField: public BitField<bool, 3, 1> {}; class SyncedField: public BitField<bool, 4, 1> {}; class UntaggedInt32Field: public BitField<bool, 5, 1> {}; - class NumberInfoField: public BitField<int, 6, 4> {}; - class DataField: public BitField<uint32_t, 10, 32 - 10> {}; + class TypeInfoField: public BitField<int, 6, 6> {}; + class DataField: public BitField<uint32_t, 12, 32 - 12> {}; friend class VirtualFrame; }; diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc index 24550a2ed0..5e81a54d8d 100644 --- a/deps/v8/src/frames.cc +++ b/deps/v8/src/frames.cc @@ -346,6 +346,7 @@ void StackFrame::UncookFramesForThread(ThreadLocalTop* thread) { void StackFrame::Cook() { Code* code = this->code(); + ASSERT(code->IsCode()); for (StackHandlerIterator it(this, top_handler()); !it.done(); it.Advance()) { it.handler()->Cook(code); } @@ -356,6 +357,7 @@ void StackFrame::Cook() { void StackFrame::Uncook() { Code* code = this->code(); + ASSERT(code->IsCode()); for (StackHandlerIterator it(this, top_handler()); !it.done(); it.Advance()) { it.handler()->Uncook(code); } diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc index 6e9a3ff00e..2d6deb3247 100644 --- a/deps/v8/src/full-codegen.cc +++ b/deps/v8/src/full-codegen.cc @@ -212,9 +212,9 @@ void FullCodeGenSyntaxChecker::VisitFunctionLiteral(FunctionLiteral* expr) { } -void FullCodeGenSyntaxChecker::VisitFunctionBoilerplateLiteral( - FunctionBoilerplateLiteral* expr) { - BAILOUT("FunctionBoilerplateLiteral"); +void FullCodeGenSyntaxChecker::VisitSharedFunctionInfoLiteral( + SharedFunctionInfoLiteral* expr) { + BAILOUT("SharedFunctionInfoLiteral"); } @@ -524,8 +524,8 @@ void FullCodeGenerator::VisitDeclarations( array->set_undefined(j++); } } else { - Handle<JSFunction> function = - Compiler::BuildBoilerplate(decl->fun(), script(), this); + Handle<SharedFunctionInfo> function = + Compiler::BuildFunctionInfo(decl->fun(), script(), this); // Check for stack-overflow exception. if (HasStackOverflow()) return; array->set(j++, *function); @@ -998,8 +998,8 @@ void FullCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) { } -void FullCodeGenerator::VisitFunctionBoilerplateLiteral( - FunctionBoilerplateLiteral* expr) { +void FullCodeGenerator::VisitSharedFunctionInfoLiteral( + SharedFunctionInfoLiteral* expr) { UNREACHABLE(); } diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h index cb7f27ee7d..b85e19d517 100644 --- a/deps/v8/src/globals.h +++ b/deps/v8/src/globals.h @@ -174,6 +174,15 @@ const int kBitsPerByteLog2 = 3; const int kBitsPerPointer = kPointerSize * kBitsPerByte; const int kBitsPerInt = kIntSize * kBitsPerByte; +// IEEE 754 single precision floating point number bit layout. +const uint32_t kBinary32SignMask = 0x80000000u; +const uint32_t kBinary32ExponentMask = 0x7f800000u; +const uint32_t kBinary32MantissaMask = 0x007fffffu; +const int kBinary32ExponentBias = 127; +const int kBinary32MaxExponent = 0xFE; +const int kBinary32MinExponent = 0x01; +const int kBinary32MantissaBits = 23; +const int kBinary32ExponentShift = 23; // Zap-value: The value used for zapping dead objects. // Should be a recognizable hex value tagged as a heap object pointer. @@ -195,6 +204,10 @@ const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdad); // gives 8K bytes per page. const int kPageSizeBits = 13; +// On Intel architecture, cache line size is 64 bytes. +// On ARM it may be less (32 bytes), but as far this constant is +// used for aligning data, it doesn't hurt to align on a greater value. +const int kProcessorCacheLineSize = 64; // Constants relevant to double precision floating point numbers. @@ -321,7 +334,6 @@ enum Executability { NOT_EXECUTABLE, EXECUTABLE }; enum VisitMode { VISIT_ALL, VISIT_ALL_IN_SCAVENGE, VISIT_ONLY_STRONG }; - // Flag indicating whether code is built into the VM (one of the natives files). enum NativesFlag { NOT_NATIVES_CODE, NATIVES_CODE }; @@ -404,7 +416,7 @@ enum CallFunctionFlags { // Type of properties. // Order of properties is significant. // Must fit in the BitField PropertyDetails::TypeField. -// A copy of this is in mirror-delay.js. +// A copy of this is in mirror-debugger.js. enum PropertyType { NORMAL = 0, // only in slow mode FIELD = 1, // only in fast mode diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc index 4ebeaa79c0..f8a679b5a5 100644 --- a/deps/v8/src/handles.cc +++ b/deps/v8/src/handles.cc @@ -174,13 +174,6 @@ void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared, } -void SetExpectedNofPropertiesFromEstimate(Handle<JSFunction> func, - int estimate) { - SetExpectedNofProperties( - func, ExpectedNofPropertiesFromEstimate(estimate)); -} - - void NormalizeProperties(Handle<JSObject> object, PropertyNormalizationMode mode, int expected_additional_properties) { @@ -243,6 +236,15 @@ Handle<Object> ForceSetProperty(Handle<JSObject> object, } +Handle<Object> SetNormalizedProperty(Handle<JSObject> object, + Handle<String> key, + Handle<Object> value, + PropertyDetails details) { + CALL_HEAP_FUNCTION(object->SetNormalizedProperty(*key, *value, details), + Object); +} + + Handle<Object> ForceDeleteProperty(Handle<JSObject> object, Handle<Object> key) { CALL_HEAP_FUNCTION(Runtime::ForceDeleteObjectProperty(object, key), Object); @@ -784,88 +786,4 @@ OptimizedObjectForAddingMultipleProperties:: } } - -void LoadLazy(Handle<JSObject> obj, bool* pending_exception) { - HandleScope scope; - Handle<FixedArray> info(FixedArray::cast(obj->map()->constructor())); - int index = Smi::cast(info->get(0))->value(); - ASSERT(index >= 0); - Handle<Context> compile_context(Context::cast(info->get(1))); - Handle<Context> function_context(Context::cast(info->get(2))); - Handle<Object> receiver(compile_context->global()->builtins()); - - Vector<const char> name = Natives::GetScriptName(index); - - Handle<JSFunction> boilerplate; - - if (!Bootstrapper::NativesCacheLookup(name, &boilerplate)) { - Handle<String> source_code = Bootstrapper::NativesSourceLookup(index); - Handle<String> script_name = Factory::NewStringFromAscii(name); - bool allow_natives_syntax = FLAG_allow_natives_syntax; - FLAG_allow_natives_syntax = true; - boilerplate = Compiler::Compile(source_code, script_name, 0, 0, NULL, NULL, - Handle<String>::null(), NATIVES_CODE); - FLAG_allow_natives_syntax = allow_natives_syntax; - // If the compilation failed (possibly due to stack overflows), we - // should never enter the result in the natives cache. Instead we - // return from the function without marking the function as having - // been lazily loaded. - if (boilerplate.is_null()) { - *pending_exception = true; - return; - } - Bootstrapper::NativesCacheAdd(name, boilerplate); - } - - // We shouldn't get here if compiling the script failed. - ASSERT(!boilerplate.is_null()); - -#ifdef ENABLE_DEBUGGER_SUPPORT - // When the debugger running in its own context touches lazy loaded - // functions loading can be triggered. In that case ensure that the - // execution of the boilerplate is in the correct context. - SaveContext save; - if (!Debug::debug_context().is_null() && - Top::context() == *Debug::debug_context()) { - Top::set_context(*compile_context); - } -#endif - - // Reset the lazy load data before running the script to make sure - // not to get recursive lazy loading. - obj->map()->set_needs_loading(false); - obj->map()->set_constructor(info->get(3)); - - // Run the script. - Handle<JSFunction> script_fun( - Factory::NewFunctionFromBoilerplate(boilerplate, function_context)); - Execution::Call(script_fun, receiver, 0, NULL, pending_exception); - - // If lazy loading failed, restore the unloaded state of obj. - if (*pending_exception) { - obj->map()->set_needs_loading(true); - obj->map()->set_constructor(*info); - } -} - - -void SetupLazy(Handle<JSObject> obj, - int index, - Handle<Context> compile_context, - Handle<Context> function_context) { - Handle<FixedArray> arr = Factory::NewFixedArray(4); - arr->set(0, Smi::FromInt(index)); - arr->set(1, *compile_context); // Compile in this context - arr->set(2, *function_context); // Set function context to this - arr->set(3, obj->map()->constructor()); // Remember the constructor - Handle<Map> old_map(obj->map()); - Handle<Map> new_map = Factory::CopyMapDropTransitions(old_map); - obj->set_map(*new_map); - new_map->set_needs_loading(true); - // Store the lazy loading info in the constructor field. We'll - // reestablish the constructor from the fixed array after loading. - new_map->set_constructor(*arr); - ASSERT(!obj->IsLoaded()); -} - } } // namespace v8::internal diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h index f241da2036..54c3b45f75 100644 --- a/deps/v8/src/handles.h +++ b/deps/v8/src/handles.h @@ -210,6 +210,11 @@ Handle<Object> ForceSetProperty(Handle<JSObject> object, Handle<Object> value, PropertyAttributes attributes); +Handle<Object> SetNormalizedProperty(Handle<JSObject> object, + Handle<String> key, + Handle<Object> value, + PropertyDetails details); + Handle<Object> ForceDeleteProperty(Handle<JSObject> object, Handle<Object> key); @@ -307,8 +312,6 @@ void SetPrototypeProperty(Handle<JSFunction> func, Handle<JSObject> value); // Sets the expected number of properties based on estimate from compiler. void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared, int estimate); -void SetExpectedNofPropertiesFromEstimate(Handle<JSFunction> func, - int estimate); Handle<JSGlobalProxy> ReinitializeJSGlobalProxy( @@ -340,13 +343,6 @@ bool CompileLazyInLoop(Handle<JSFunction> function, // Returns the lazy compilation stub for argc arguments. Handle<Code> ComputeLazyCompile(int argc); -// These deal with lazily loaded properties. -void SetupLazy(Handle<JSObject> obj, - int index, - Handle<Context> compile_context, - Handle<Context> function_context); -void LoadLazy(Handle<JSObject> obj, bool* pending_exception); - class NoHandleAllocation BASE_EMBEDDED { public: #ifndef DEBUG diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h index c4676fd74c..892c2892ba 100644 --- a/deps/v8/src/heap-inl.h +++ b/deps/v8/src/heap-inl.h @@ -283,11 +283,11 @@ Object* Heap::PrepareForCompare(String* str) { const int length = str->length(); Object* obj = str->TryFlatten(); if (length <= kMaxAlwaysFlattenLength || - unflattended_strings_length_ >= kFlattenLongThreshold) { + unflattened_strings_length_ >= kFlattenLongThreshold) { return obj; } if (obj->IsFailure()) { - unflattended_strings_length_ += length; + unflattened_strings_length_ += length; } return str; } diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc index b477b45e07..a9754ce057 100644 --- a/deps/v8/src/heap.cc +++ b/deps/v8/src/heap.cc @@ -98,6 +98,9 @@ size_t Heap::code_range_size_ = 0; // set up by ConfigureHeap otherwise. int Heap::reserved_semispace_size_ = Heap::max_semispace_size_; +List<Heap::GCPrologueCallbackPair> Heap::gc_prologue_callbacks_; +List<Heap::GCEpilogueCallbackPair> Heap::gc_epilogue_callbacks_; + GCCallback Heap::global_gc_prologue_callback_ = NULL; GCCallback Heap::global_gc_epilogue_callback_ = NULL; @@ -114,7 +117,7 @@ Heap::HeapState Heap::gc_state_ = NOT_IN_GC; int Heap::mc_count_ = 0; int Heap::gc_count_ = 0; -int Heap::unflattended_strings_length_ = 0; +int Heap::unflattened_strings_length_ = 0; int Heap::always_allocate_scope_depth_ = 0; int Heap::linear_allocation_scope_depth_ = 0; @@ -304,7 +307,7 @@ void Heap::ReportStatisticsAfterGC() { void Heap::GarbageCollectionPrologue() { TranscendentalCache::Clear(); gc_count_++; - unflattended_strings_length_ = 0; + unflattened_strings_length_ = 0; #ifdef DEBUG ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC); allow_allocation(false); @@ -547,6 +550,16 @@ void Heap::PerformGarbageCollection(AllocationSpace space, GCTracer::ExternalScope scope(tracer); global_gc_prologue_callback_(); } + + GCType gc_type = + collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge; + + for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) { + if (gc_type & gc_prologue_callbacks_[i].gc_type) { + gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags); + } + } + EnsureFromSpaceIsCommitted(); // Perform mark-sweep with optional compaction. @@ -585,6 +598,15 @@ void Heap::PerformGarbageCollection(AllocationSpace space, amount_of_external_allocated_memory_; } + GCCallbackFlags callback_flags = tracer->is_compacting() + ? kGCCallbackFlagCompacted + : kNoGCCallbackFlags; + for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) { + if (gc_type & gc_epilogue_callbacks_[i].gc_type) { + gc_epilogue_callbacks_[i].callback(gc_type, callback_flags); + } + } + if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) { ASSERT(!allocation_allowed_); GCTracer::ExternalScope scope(tracer); @@ -1269,7 +1291,7 @@ bool Heap::CreateInitialMaps() { if (obj->IsFailure()) return false; set_oddball_map(Map::cast(obj)); - // Allocate the empty array + // Allocate the empty array. obj = AllocateEmptyFixedArray(); if (obj->IsFailure()) return false; set_empty_fixed_array(FixedArray::cast(obj)); @@ -1415,7 +1437,8 @@ bool Heap::CreateInitialMaps() { if (obj->IsFailure()) return false; set_boilerplate_function_map(Map::cast(obj)); - obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kSize); + obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE, + SharedFunctionInfo::kAlignedSize); if (obj->IsFailure()) return false; set_shared_function_info_map(Map::cast(obj)); @@ -2625,7 +2648,7 @@ Object* Heap::CopyJSObject(JSObject* source) { // Update write barrier for all fields that lie beyond the header. RecordWrites(clone_address, JSObject::kHeaderSize, - object_size - JSObject::kHeaderSize); + (object_size - JSObject::kHeaderSize) / kPointerSize); } else { clone = new_space_.AllocateRaw(object_size); if (clone->IsFailure()) return clone; @@ -3786,6 +3809,46 @@ void Heap::Unprotect() { #endif +void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) { + ASSERT(callback != NULL); + GCPrologueCallbackPair pair(callback, gc_type); + ASSERT(!gc_prologue_callbacks_.Contains(pair)); + return gc_prologue_callbacks_.Add(pair); +} + + +void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) { + ASSERT(callback != NULL); + for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) { + if (gc_prologue_callbacks_[i].callback == callback) { + gc_prologue_callbacks_.Remove(i); + return; + } + } + UNREACHABLE(); +} + + +void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) { + ASSERT(callback != NULL); + GCEpilogueCallbackPair pair(callback, gc_type); + ASSERT(!gc_epilogue_callbacks_.Contains(pair)); + return gc_epilogue_callbacks_.Add(pair); +} + + +void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) { + ASSERT(callback != NULL); + for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) { + if (gc_epilogue_callbacks_[i].callback == callback) { + gc_epilogue_callbacks_.Remove(i); + return; + } + } + UNREACHABLE(); +} + + #ifdef DEBUG class PrintHandleVisitor: public ObjectVisitor { diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h index 1aee8749a1..d37641ccf3 100644 --- a/deps/v8/src/heap.h +++ b/deps/v8/src/heap.h @@ -108,6 +108,7 @@ class ZoneScopeInfo; V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \ V(FixedArray, natives_source_cache, NativesSourceCache) \ V(Object, last_script_id, LastScriptId) \ + V(Script, empty_script, EmptyScript) \ V(Smi, real_stack_limit, RealStackLimit) \ #if V8_TARGET_ARCH_ARM && V8_NATIVE_REGEXP @@ -673,10 +674,20 @@ class Heap : public AllStatic { static bool GarbageCollectionGreedyCheck(); #endif + static void AddGCPrologueCallback( + GCEpilogueCallback callback, GCType gc_type_filter); + static void RemoveGCPrologueCallback(GCEpilogueCallback callback); + + static void AddGCEpilogueCallback( + GCEpilogueCallback callback, GCType gc_type_filter); + static void RemoveGCEpilogueCallback(GCEpilogueCallback callback); + static void SetGlobalGCPrologueCallback(GCCallback callback) { + ASSERT((callback == NULL) ^ (global_gc_prologue_callback_ == NULL)); global_gc_prologue_callback_ = callback; } static void SetGlobalGCEpilogueCallback(GCCallback callback) { + ASSERT((callback == NULL) ^ (global_gc_epilogue_callback_ == NULL)); global_gc_epilogue_callback_ = callback; } @@ -758,6 +769,10 @@ class Heap : public AllStatic { roots_[kNonMonomorphicCacheRootIndex] = value; } + static void public_set_empty_script(Script* script) { + roots_[kEmptyScriptRootIndex] = script; + } + // Update the next script id. static inline void SetLastScriptId(Object* last_script_id); @@ -965,7 +980,7 @@ class Heap : public AllStatic { static int gc_count_; // how many gc happened // Total length of the strings we failed to flatten since the last GC. - static int unflattended_strings_length_; + static int unflattened_strings_length_; #define ROOT_ACCESSOR(type, name, camel_name) \ static inline void set_##name(type* value) { \ @@ -1041,6 +1056,30 @@ class Heap : public AllStatic { // GC callback function, called before and after mark-compact GC. // Allocations in the callback function are disallowed. + struct GCPrologueCallbackPair { + GCPrologueCallbackPair(GCPrologueCallback callback, GCType gc_type) + : callback(callback), gc_type(gc_type) { + } + bool operator==(const GCPrologueCallbackPair& pair) const { + return pair.callback == callback; + } + GCPrologueCallback callback; + GCType gc_type; + }; + static List<GCPrologueCallbackPair> gc_prologue_callbacks_; + + struct GCEpilogueCallbackPair { + GCEpilogueCallbackPair(GCEpilogueCallback callback, GCType gc_type) + : callback(callback), gc_type(gc_type) { + } + bool operator==(const GCEpilogueCallbackPair& pair) const { + return pair.callback == callback; + } + GCEpilogueCallback callback; + GCType gc_type; + }; + static List<GCEpilogueCallbackPair> gc_epilogue_callbacks_; + static GCCallback global_gc_prologue_callback_; static GCCallback global_gc_epilogue_callback_; @@ -1583,6 +1622,7 @@ class GCTracer BASE_EMBEDDED { // Sets the flag that this is a compacting full GC. void set_is_compacting() { is_compacting_ = true; } + bool is_compacting() const { return is_compacting_; } // Increment and decrement the count of marked objects. void increment_marked_count() { ++marked_count_; } diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc index 67c9cc1302..20b64636d9 100644 --- a/deps/v8/src/ia32/codegen-ia32.cc +++ b/deps/v8/src/ia32/codegen-ia32.cc @@ -490,7 +490,11 @@ void CodeGenerator::LoadInSafeInt32Mode(Expression* expr, Load(expr); Result value = frame_->Pop(); ASSERT(frame_->HasNoUntaggedInt32Elements()); - ConvertInt32ResultToNumber(&value); + if (expr->GuaranteedSmiResult()) { + ConvertInt32ResultToSmi(&value); + } else { + ConvertInt32ResultToNumber(&value); + } set_in_safe_int32_mode(false); set_unsafe_bailout(NULL); frame_->Push(&value); @@ -504,6 +508,19 @@ void CodeGenerator::LoadWithSafeInt32ModeDisabled(Expression* expr) { } +void CodeGenerator::ConvertInt32ResultToSmi(Result* value) { + ASSERT(value->is_untagged_int32()); + if (value->is_register()) { + __ add(value->reg(), Operand(value->reg())); + } else { + ASSERT(value->is_constant()); + ASSERT(value->handle()->IsSmi()); + } + value->set_untagged_int32(false); + value->set_type_info(TypeInfo::Smi()); +} + + void CodeGenerator::ConvertInt32ResultToNumber(Result* value) { ASSERT(value->is_untagged_int32()); if (value->is_register()) { @@ -552,6 +569,7 @@ void CodeGenerator::ConvertInt32ResultToNumber(Result* value) { ASSERT(value->is_constant()); } value->set_untagged_int32(false); + value->set_type_info(TypeInfo::Integer32()); } @@ -850,7 +868,7 @@ void CodeGenerator::ToBoolean(ControlDestination* dest) { dest->Split(not_zero); } else if (value.is_number()) { Comment cmnt(masm_, "ONLY_NUMBER"); - // Fast case if NumberInfo indicates only numbers. + // Fast case if TypeInfo indicates only numbers. if (FLAG_debug_code) { __ AbortIfNotNumber(value.reg()); } @@ -911,6 +929,7 @@ class FloatingPointHelper : public AllStatic { // operand in register number. Returns operand as floating point number // on FPU stack. static void LoadFloatOperand(MacroAssembler* masm, Register number); + // Code pattern for loading floating point values. Input values must // be either smi or heap number objects (fp values). Requirements: // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax. @@ -929,14 +948,15 @@ class FloatingPointHelper : public AllStatic { static void CheckFloatOperands(MacroAssembler* masm, Label* non_float, Register scratch); + // Takes the operands in edx and eax and loads them as integers in eax // and ecx. static void LoadAsIntegers(MacroAssembler* masm, - NumberInfo number_info, + TypeInfo type_info, bool use_sse3, Label* operand_conversion_failure); static void LoadNumbersAsIntegers(MacroAssembler* masm, - NumberInfo number_info, + TypeInfo type_info, bool use_sse3, Label* operand_conversion_failure); static void LoadUnknownsAsIntegers(MacroAssembler* masm, @@ -947,6 +967,7 @@ class FloatingPointHelper : public AllStatic { // into xmm0 and xmm1 if they are. Operands are in edx and eax. // Leaves operands unchanged. static void LoadSSE2Operands(MacroAssembler* masm); + // Test if operands are numbers (smi or HeapNumber objects), and load // them into xmm0 and xmm1 if they are. Jump to label not_numbers if // either operand is not a number. Operands are in edx and eax. @@ -993,8 +1014,8 @@ class DeferredInlineBinaryOperation: public DeferredCode { Register dst, Register left, Register right, - NumberInfo left_info, - NumberInfo right_info, + TypeInfo left_info, + TypeInfo right_info, OverwriteMode mode) : op_(op), dst_(dst), left_(left), right_(right), left_info_(left_info), right_info_(right_info), mode_(mode) { @@ -1008,8 +1029,8 @@ class DeferredInlineBinaryOperation: public DeferredCode { Register dst_; Register left_; Register right_; - NumberInfo left_info_; - NumberInfo right_info_; + TypeInfo left_info_; + TypeInfo right_info_; OverwriteMode mode_; }; @@ -1103,23 +1124,23 @@ void DeferredInlineBinaryOperation::Generate() { GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB, - NumberInfo::Combine(left_info_, right_info_)); + TypeInfo::Combine(left_info_, right_info_)); stub.GenerateCall(masm_, left_, right_); if (!dst_.is(eax)) __ mov(dst_, eax); __ bind(&done); } -static NumberInfo CalculateNumberInfo(NumberInfo operands_type, +static TypeInfo CalculateTypeInfo(TypeInfo operands_type, Token::Value op, const Result& right, const Result& left) { - // Set NumberInfo of result according to the operation performed. + // Set TypeInfo of result according to the operation performed. // Rely on the fact that smis have a 31 bit payload on ia32. ASSERT(kSmiValueSize == 31); switch (op) { case Token::COMMA: - return right.number_info(); + return right.type_info(); case Token::OR: case Token::AND: // Result type can be either of the two input types. @@ -1128,74 +1149,74 @@ static NumberInfo CalculateNumberInfo(NumberInfo operands_type, // Anding with positive Smis will give you a Smi. if (right.is_constant() && right.handle()->IsSmi() && Smi::cast(*right.handle())->value() >= 0) { - return NumberInfo::Smi(); + return TypeInfo::Smi(); } else if (left.is_constant() && left.handle()->IsSmi() && Smi::cast(*left.handle())->value() >= 0) { - return NumberInfo::Smi(); + return TypeInfo::Smi(); } return (operands_type.IsSmi()) - ? NumberInfo::Smi() - : NumberInfo::Integer32(); + ? TypeInfo::Smi() + : TypeInfo::Integer32(); } case Token::BIT_OR: { // Oring with negative Smis will give you a Smi. if (right.is_constant() && right.handle()->IsSmi() && Smi::cast(*right.handle())->value() < 0) { - return NumberInfo::Smi(); + return TypeInfo::Smi(); } else if (left.is_constant() && left.handle()->IsSmi() && Smi::cast(*left.handle())->value() < 0) { - return NumberInfo::Smi(); + return TypeInfo::Smi(); } return (operands_type.IsSmi()) - ? NumberInfo::Smi() - : NumberInfo::Integer32(); + ? TypeInfo::Smi() + : TypeInfo::Integer32(); } case Token::BIT_XOR: // Result is always a 32 bit integer. Smi property of inputs is preserved. return (operands_type.IsSmi()) - ? NumberInfo::Smi() - : NumberInfo::Integer32(); + ? TypeInfo::Smi() + : TypeInfo::Integer32(); case Token::SAR: - if (left.is_smi()) return NumberInfo::Smi(); + if (left.is_smi()) return TypeInfo::Smi(); // Result is a smi if we shift by a constant >= 1, otherwise an integer32. return (right.is_constant() && right.handle()->IsSmi() && Smi::cast(*right.handle())->value() >= 1) - ? NumberInfo::Smi() - : NumberInfo::Integer32(); + ? TypeInfo::Smi() + : TypeInfo::Integer32(); case Token::SHR: // Result is a smi if we shift by a constant >= 2, otherwise an integer32. return (right.is_constant() && right.handle()->IsSmi() && Smi::cast(*right.handle())->value() >= 2) - ? NumberInfo::Smi() - : NumberInfo::Integer32(); + ? TypeInfo::Smi() + : TypeInfo::Integer32(); case Token::ADD: if (operands_type.IsSmi()) { // The Integer32 range is big enough to take the sum of any two Smis. - return NumberInfo::Integer32(); + return TypeInfo::Integer32(); } else { // Result could be a string or a number. Check types of inputs. return operands_type.IsNumber() - ? NumberInfo::Number() - : NumberInfo::Unknown(); + ? TypeInfo::Number() + : TypeInfo::Unknown(); } case Token::SHL: - return NumberInfo::Integer32(); + return TypeInfo::Integer32(); case Token::SUB: // The Integer32 range is big enough to take the difference of any two // Smis. return (operands_type.IsSmi()) ? - NumberInfo::Integer32() : - NumberInfo::Number(); + TypeInfo::Integer32() : + TypeInfo::Number(); case Token::MUL: case Token::DIV: case Token::MOD: // Result is always a number. - return NumberInfo::Number(); + return TypeInfo::Number(); default: UNREACHABLE(); } UNREACHABLE(); - return NumberInfo::Unknown(); + return TypeInfo::Unknown(); } @@ -1255,10 +1276,10 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op, } // Get number type of left and right sub-expressions. - NumberInfo operands_type = - NumberInfo::Combine(left.number_info(), right.number_info()); + TypeInfo operands_type = + TypeInfo::Combine(left.type_info(), right.type_info()); - NumberInfo result_type = CalculateNumberInfo(operands_type, op, right, left); + TypeInfo result_type = CalculateTypeInfo(operands_type, op, right, left); Result answer; if (left_is_non_smi_constant || right_is_non_smi_constant) { @@ -1297,7 +1318,7 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op, } } - answer.set_number_info(result_type); + answer.set_type_info(result_type); frame_->Push(&answer); } @@ -1385,7 +1406,7 @@ bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) { static void CheckTwoForSminess(MacroAssembler* masm, Register left, Register right, Register scratch, - NumberInfo left_info, NumberInfo right_info, + TypeInfo left_info, TypeInfo right_info, DeferredInlineBinaryOperation* deferred); @@ -1470,8 +1491,8 @@ Result CodeGenerator::LikelySmiBinaryOperation(Token::Value op, (op == Token::DIV) ? eax : edx, left->reg(), right->reg(), - left->number_info(), - right->number_info(), + left->type_info(), + right->type_info(), overwrite_mode); if (left->reg().is(right->reg())) { __ test(left->reg(), Immediate(kSmiTagMask)); @@ -1574,18 +1595,18 @@ Result CodeGenerator::LikelySmiBinaryOperation(Token::Value op, answer.reg(), left->reg(), ecx, - left->number_info(), - right->number_info(), + left->type_info(), + right->type_info(), overwrite_mode); Label do_op, left_nonsmi; // If right is a smi we make a fast case if left is either a smi // or a heapnumber. - if (CpuFeatures::IsSupported(SSE2) && right->number_info().IsSmi()) { + if (CpuFeatures::IsSupported(SSE2) && right->type_info().IsSmi()) { CpuFeatures::Scope use_sse2(SSE2); __ mov(answer.reg(), left->reg()); // Fast case - both are actually smis. - if (!left->number_info().IsSmi()) { + if (!left->type_info().IsSmi()) { __ test(answer.reg(), Immediate(kSmiTagMask)); __ j(not_zero, &left_nonsmi); } else { @@ -1609,7 +1630,7 @@ Result CodeGenerator::LikelySmiBinaryOperation(Token::Value op, deferred->Branch(negative); } else { CheckTwoForSminess(masm_, left->reg(), right->reg(), answer.reg(), - left->number_info(), right->number_info(), deferred); + left->type_info(), right->type_info(), deferred); // Untag both operands. __ mov(answer.reg(), left->reg()); @@ -1682,11 +1703,11 @@ Result CodeGenerator::LikelySmiBinaryOperation(Token::Value op, answer.reg(), left->reg(), right->reg(), - left->number_info(), - right->number_info(), + left->type_info(), + right->type_info(), overwrite_mode); CheckTwoForSminess(masm_, left->reg(), right->reg(), answer.reg(), - left->number_info(), right->number_info(), deferred); + left->type_info(), right->type_info(), deferred); __ mov(answer.reg(), left->reg()); switch (op) { @@ -1758,16 +1779,16 @@ class DeferredInlineSmiOperation: public DeferredCode { DeferredInlineSmiOperation(Token::Value op, Register dst, Register src, - NumberInfo number_info, + TypeInfo type_info, Smi* value, OverwriteMode overwrite_mode) : op_(op), dst_(dst), src_(src), - number_info_(number_info), + type_info_(type_info), value_(value), overwrite_mode_(overwrite_mode) { - if (number_info.IsSmi()) overwrite_mode_ = NO_OVERWRITE; + if (type_info.IsSmi()) overwrite_mode_ = NO_OVERWRITE; set_comment("[ DeferredInlineSmiOperation"); } @@ -1777,7 +1798,7 @@ class DeferredInlineSmiOperation: public DeferredCode { Token::Value op_; Register dst_; Register src_; - NumberInfo number_info_; + TypeInfo type_info_; Smi* value_; OverwriteMode overwrite_mode_; }; @@ -1789,7 +1810,7 @@ void DeferredInlineSmiOperation::Generate() { op_, overwrite_mode_, (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB, - NumberInfo::Combine(NumberInfo::Smi(), number_info_)); + TypeInfo::Combine(TypeInfo::Smi(), type_info_)); stub.GenerateCall(masm_, src_, value_); if (!dst_.is(eax)) __ mov(dst_, eax); } @@ -1803,11 +1824,11 @@ class DeferredInlineSmiOperationReversed: public DeferredCode { Register dst, Smi* value, Register src, - NumberInfo number_info, + TypeInfo type_info, OverwriteMode overwrite_mode) : op_(op), dst_(dst), - number_info_(number_info), + type_info_(type_info), value_(value), src_(src), overwrite_mode_(overwrite_mode) { @@ -1819,7 +1840,7 @@ class DeferredInlineSmiOperationReversed: public DeferredCode { private: Token::Value op_; Register dst_; - NumberInfo number_info_; + TypeInfo type_info_; Smi* value_; Register src_; OverwriteMode overwrite_mode_; @@ -1831,7 +1852,7 @@ void DeferredInlineSmiOperationReversed::Generate() { op_, overwrite_mode_, NO_SMI_CODE_IN_STUB, - NumberInfo::Combine(NumberInfo::Smi(), number_info_)); + TypeInfo::Combine(TypeInfo::Smi(), type_info_)); igostub.GenerateCall(masm_, value_, src_); if (!dst_.is(eax)) __ mov(dst_, eax); } @@ -1843,14 +1864,14 @@ void DeferredInlineSmiOperationReversed::Generate() { class DeferredInlineSmiAdd: public DeferredCode { public: DeferredInlineSmiAdd(Register dst, - NumberInfo number_info, + TypeInfo type_info, Smi* value, OverwriteMode overwrite_mode) : dst_(dst), - number_info_(number_info), + type_info_(type_info), value_(value), overwrite_mode_(overwrite_mode) { - if (number_info_.IsSmi()) overwrite_mode_ = NO_OVERWRITE; + if (type_info_.IsSmi()) overwrite_mode_ = NO_OVERWRITE; set_comment("[ DeferredInlineSmiAdd"); } @@ -1858,7 +1879,7 @@ class DeferredInlineSmiAdd: public DeferredCode { private: Register dst_; - NumberInfo number_info_; + TypeInfo type_info_; Smi* value_; OverwriteMode overwrite_mode_; }; @@ -1871,7 +1892,7 @@ void DeferredInlineSmiAdd::Generate() { Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB, - NumberInfo::Combine(NumberInfo::Smi(), number_info_)); + TypeInfo::Combine(TypeInfo::Smi(), type_info_)); igostub.GenerateCall(masm_, dst_, value_); if (!dst_.is(eax)) __ mov(dst_, eax); } @@ -1883,11 +1904,11 @@ void DeferredInlineSmiAdd::Generate() { class DeferredInlineSmiAddReversed: public DeferredCode { public: DeferredInlineSmiAddReversed(Register dst, - NumberInfo number_info, + TypeInfo type_info, Smi* value, OverwriteMode overwrite_mode) : dst_(dst), - number_info_(number_info), + type_info_(type_info), value_(value), overwrite_mode_(overwrite_mode) { set_comment("[ DeferredInlineSmiAddReversed"); @@ -1897,7 +1918,7 @@ class DeferredInlineSmiAddReversed: public DeferredCode { private: Register dst_; - NumberInfo number_info_; + TypeInfo type_info_; Smi* value_; OverwriteMode overwrite_mode_; }; @@ -1910,7 +1931,7 @@ void DeferredInlineSmiAddReversed::Generate() { Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB, - NumberInfo::Combine(NumberInfo::Smi(), number_info_)); + TypeInfo::Combine(TypeInfo::Smi(), type_info_)); igostub.GenerateCall(masm_, value_, dst_); if (!dst_.is(eax)) __ mov(dst_, eax); } @@ -1923,14 +1944,14 @@ void DeferredInlineSmiAddReversed::Generate() { class DeferredInlineSmiSub: public DeferredCode { public: DeferredInlineSmiSub(Register dst, - NumberInfo number_info, + TypeInfo type_info, Smi* value, OverwriteMode overwrite_mode) : dst_(dst), - number_info_(number_info), + type_info_(type_info), value_(value), overwrite_mode_(overwrite_mode) { - if (number_info.IsSmi()) overwrite_mode_ = NO_OVERWRITE; + if (type_info.IsSmi()) overwrite_mode_ = NO_OVERWRITE; set_comment("[ DeferredInlineSmiSub"); } @@ -1938,7 +1959,7 @@ class DeferredInlineSmiSub: public DeferredCode { private: Register dst_; - NumberInfo number_info_; + TypeInfo type_info_; Smi* value_; OverwriteMode overwrite_mode_; }; @@ -1951,7 +1972,7 @@ void DeferredInlineSmiSub::Generate() { Token::SUB, overwrite_mode_, NO_SMI_CODE_IN_STUB, - NumberInfo::Combine(NumberInfo::Smi(), number_info_)); + TypeInfo::Combine(TypeInfo::Smi(), type_info_)); igostub.GenerateCall(masm_, dst_, value_); if (!dst_.is(eax)) __ mov(dst_, eax); } @@ -1996,18 +2017,18 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, DeferredCode* deferred = NULL; if (reversed) { deferred = new DeferredInlineSmiAddReversed(operand->reg(), - operand->number_info(), + operand->type_info(), smi_value, overwrite_mode); } else { deferred = new DeferredInlineSmiAdd(operand->reg(), - operand->number_info(), + operand->type_info(), smi_value, overwrite_mode); } __ add(Operand(operand->reg()), Immediate(value)); deferred->Branch(overflow); - if (!operand->number_info().IsSmi()) { + if (!operand->type_info().IsSmi()) { __ test(operand->reg(), Immediate(kSmiTagMask)); deferred->Branch(not_zero); } else { @@ -2032,7 +2053,7 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, answer.reg(), smi_value, operand->reg(), - operand->number_info(), + operand->type_info(), overwrite_mode); __ sub(answer.reg(), Operand(operand->reg())); } else { @@ -2040,13 +2061,13 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, frame_->Spill(operand->reg()); answer = *operand; deferred = new DeferredInlineSmiSub(operand->reg(), - operand->number_info(), + operand->type_info(), smi_value, overwrite_mode); __ sub(Operand(operand->reg()), Immediate(value)); } deferred->Branch(overflow); - if (!operand->number_info().IsSmi()) { + if (!operand->type_info().IsSmi()) { __ test(answer.reg(), Immediate(kSmiTagMask)); deferred->Branch(not_zero); } else { @@ -2068,12 +2089,12 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, int shift_value = int_value & 0x1f; operand->ToRegister(); frame_->Spill(operand->reg()); - if (!operand->number_info().IsSmi()) { + if (!operand->type_info().IsSmi()) { DeferredInlineSmiOperation* deferred = new DeferredInlineSmiOperation(op, operand->reg(), operand->reg(), - operand->number_info(), + operand->type_info(), smi_value, overwrite_mode); __ test(operand->reg(), Immediate(kSmiTagMask)); @@ -2110,10 +2131,10 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, new DeferredInlineSmiOperation(op, answer.reg(), operand->reg(), - operand->number_info(), + operand->type_info(), smi_value, overwrite_mode); - if (!operand->number_info().IsSmi()) { + if (!operand->type_info().IsSmi()) { __ test(operand->reg(), Immediate(kSmiTagMask)); deferred->Branch(not_zero); } else { @@ -2160,11 +2181,11 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, answer.reg(), smi_value, right.reg(), - right.number_info(), + right.type_info(), overwrite_mode); __ mov(answer.reg(), Immediate(int_value)); __ sar(ecx, kSmiTagSize); - if (!right.number_info().IsSmi()) { + if (!right.type_info().IsSmi()) { deferred->Branch(carry); } else { if (FLAG_debug_code) __ AbortIfNotSmi(right.reg()); @@ -2187,7 +2208,7 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, new DeferredInlineSmiOperation(op, operand->reg(), operand->reg(), - operand->number_info(), + operand->type_info(), smi_value, overwrite_mode); __ test(operand->reg(), Immediate(kSmiTagMask)); @@ -2202,10 +2223,10 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, new DeferredInlineSmiOperation(op, answer.reg(), operand->reg(), - operand->number_info(), + operand->type_info(), smi_value, overwrite_mode); - if (!operand->number_info().IsSmi()) { + if (!operand->type_info().IsSmi()) { __ test(operand->reg(), Immediate(kSmiTagMask)); deferred->Branch(not_zero); } else { @@ -2239,17 +2260,17 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, operand->reg(), smi_value, operand->reg(), - operand->number_info(), + operand->type_info(), overwrite_mode); } else { deferred = new DeferredInlineSmiOperation(op, operand->reg(), operand->reg(), - operand->number_info(), + operand->type_info(), smi_value, overwrite_mode); } - if (!operand->number_info().IsSmi()) { + if (!operand->type_info().IsSmi()) { __ test(operand->reg(), Immediate(kSmiTagMask)); deferred->Branch(not_zero); } else { @@ -2281,7 +2302,7 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, new DeferredInlineSmiOperation(op, operand->reg(), operand->reg(), - operand->number_info(), + operand->type_info(), smi_value, overwrite_mode); // Check that lowest log2(value) bits of operand are zero, and test @@ -2317,7 +2338,7 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, new DeferredInlineSmiOperation(op, operand->reg(), operand->reg(), - operand->number_info(), + operand->type_info(), smi_value, overwrite_mode); // Check for negative or non-Smi left hand side. @@ -2353,14 +2374,30 @@ Result CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, static bool CouldBeNaN(const Result& result) { - if (result.number_info().IsSmi()) return false; - if (result.number_info().IsInteger32()) return false; + if (result.type_info().IsSmi()) return false; + if (result.type_info().IsInteger32()) return false; if (!result.is_constant()) return true; if (!result.handle()->IsHeapNumber()) return false; return isnan(HeapNumber::cast(*result.handle())->value()); } +// Convert from signed to unsigned comparison to match the way EFLAGS are set +// by FPU and XMM compare instructions. +static Condition DoubleCondition(Condition cc) { + switch (cc) { + case less: return below; + case equal: return equal; + case less_equal: return below_equal; + case greater: return above; + case greater_equal: return above_equal; + default: UNREACHABLE(); + } + UNREACHABLE(); + return equal; +} + + void CodeGenerator::Comparison(AstNode* node, Condition cc, bool strict, @@ -2431,7 +2468,7 @@ void CodeGenerator::Comparison(AstNode* node, left_side = right_side; right_side = temp; cc = ReverseCondition(cc); - // This may reintroduce greater or less_equal as the value of cc. + // This may re-introduce greater or less_equal as the value of cc. // CompareStub and the inline code both support all values of cc. } // Implement comparison against a constant Smi, inlining the case @@ -2453,11 +2490,11 @@ void CodeGenerator::Comparison(AstNode* node, __ test(left_side.reg(), Immediate(kSmiTagMask)); is_smi.Branch(zero, taken); - bool is_for_loop_compare = (node->AsCompareOperation() != NULL) - && node->AsCompareOperation()->is_for_loop_condition(); - if (!is_for_loop_compare - && CpuFeatures::IsSupported(SSE2) - && right_val->IsSmi()) { + bool is_loop_condition = (node->AsExpression() != NULL) && + node->AsExpression()->is_loop_condition(); + if (!is_loop_condition && + CpuFeatures::IsSupported(SSE2) && + right_val->IsSmi()) { // Right side is a constant smi and left side has been checked // not to be a smi. CpuFeatures::Scope use_sse2(SSE2); @@ -2480,16 +2517,7 @@ void CodeGenerator::Comparison(AstNode* node, // Jump to builtin for NaN. not_number.Branch(parity_even, &left_side); left_side.Unuse(); - Condition double_cc = cc; - switch (cc) { - case less: double_cc = below; break; - case equal: double_cc = equal; break; - case less_equal: double_cc = below_equal; break; - case greater: double_cc = above; break; - case greater_equal: double_cc = above_equal; break; - default: UNREACHABLE(); - } - dest->true_target()->Branch(double_cc); + dest->true_target()->Branch(DoubleCondition(cc)); dest->false_target()->Jump(); not_number.Bind(&left_side); } @@ -2688,21 +2716,53 @@ void CodeGenerator::Comparison(AstNode* node, dest->Split(cc); } } else { - // Neither side is a constant Smi or null. - // If either side is a non-smi constant, skip the smi check. + // Neither side is a constant Smi, constant 1-char string or constant null. + // If either side is a non-smi constant, or known to be a heap number skip + // the smi check. bool known_non_smi = (left_side.is_constant() && !left_side.handle()->IsSmi()) || - (right_side.is_constant() && !right_side.handle()->IsSmi()); + (right_side.is_constant() && !right_side.handle()->IsSmi()) || + left_side.type_info().IsDouble() || + right_side.type_info().IsDouble(); NaNInformation nan_info = (CouldBeNaN(left_side) && CouldBeNaN(right_side)) ? kBothCouldBeNaN : kCantBothBeNaN; + + // Inline number comparison handling any combination of smi's and heap + // numbers if: + // code is in a loop + // the compare operation is different from equal + // compare is not a for-loop comparison + // The reason for excluding equal is that it will most likely be done + // with smi's (not heap numbers) and the code to comparing smi's is inlined + // separately. The same reason applies for for-loop comparison which will + // also most likely be smi comparisons. + bool is_loop_condition = (node->AsExpression() != NULL) + && node->AsExpression()->is_loop_condition(); + bool inline_number_compare = + loop_nesting() > 0 && cc != equal && !is_loop_condition; + + // Left and right needed in registers for the following code. left_side.ToRegister(); right_side.ToRegister(); if (known_non_smi) { - // When non-smi, call out to the compare stub. - CompareStub stub(cc, strict, nan_info); + // Inline the equality check if both operands can't be a NaN. If both + // objects are the same they are equal. + if (nan_info == kCantBothBeNaN && cc == equal) { + __ cmp(left_side.reg(), Operand(right_side.reg())); + dest->true_target()->Branch(equal); + } + + // Inline number comparison. + if (inline_number_compare) { + GenerateInlineNumberComparison(&left_side, &right_side, cc, dest); + } + + // End of in-line compare, call out to the compare stub. Don't include + // number comparison in the stub if it was inlined. + CompareStub stub(cc, strict, nan_info, !inline_number_compare); Result answer = frame_->CallStub(&stub, &left_side, &right_side); if (cc == equal) { __ test(answer.reg(), Operand(answer.reg())); @@ -2721,6 +2781,7 @@ void CodeGenerator::Comparison(AstNode* node, Register left_reg = left_side.reg(); Register right_reg = right_side.reg(); + // In-line check for comparing two smis. Result temp = allocator_->Allocate(); ASSERT(temp.is_valid()); __ mov(temp.reg(), left_side.reg()); @@ -2728,8 +2789,22 @@ void CodeGenerator::Comparison(AstNode* node, __ test(temp.reg(), Immediate(kSmiTagMask)); temp.Unuse(); is_smi.Branch(zero, taken); - // When non-smi, call out to the compare stub. - CompareStub stub(cc, strict, nan_info); + + // Inline the equality check if both operands can't be a NaN. If both + // objects are the same they are equal. + if (nan_info == kCantBothBeNaN && cc == equal) { + __ cmp(left_side.reg(), Operand(right_side.reg())); + dest->true_target()->Branch(equal); + } + + // Inline number comparison. + if (inline_number_compare) { + GenerateInlineNumberComparison(&left_side, &right_side, cc, dest); + } + + // End of in-line compare, call out to the compare stub. Don't include + // number comparison in the stub if it was inlined. + CompareStub stub(cc, strict, nan_info, !inline_number_compare); Result answer = frame_->CallStub(&stub, &left_side, &right_side); if (cc == equal) { __ test(answer.reg(), Operand(answer.reg())); @@ -2752,6 +2827,148 @@ void CodeGenerator::Comparison(AstNode* node, } +// Check that the comparison operand is a number. Jump to not_numbers jump +// target passing the left and right result if the operand is not a number. +static void CheckComparisonOperand(MacroAssembler* masm_, + Result* operand, + Result* left_side, + Result* right_side, + JumpTarget* not_numbers) { + // Perform check if operand is not known to be a number. + if (!operand->type_info().IsNumber()) { + Label done; + __ test(operand->reg(), Immediate(kSmiTagMask)); + __ j(zero, &done); + __ cmp(FieldOperand(operand->reg(), HeapObject::kMapOffset), + Immediate(Factory::heap_number_map())); + not_numbers->Branch(not_equal, left_side, right_side, not_taken); + __ bind(&done); + } +} + + +// Load a comparison operand to the FPU stack. This assumes that the operand has +// already been checked and is a number. +static void LoadComparisonOperand(MacroAssembler* masm_, + Result* operand) { + Label done; + if (operand->type_info().IsDouble()) { + // Operand is known to be a heap number, just load it. + __ fld_d(FieldOperand(operand->reg(), HeapNumber::kValueOffset)); + } else if (operand->type_info().IsSmi()) { + // Operand is known to be a smi. Convert it to double and keep the original + // smi. + __ SmiUntag(operand->reg()); + __ push(operand->reg()); + __ fild_s(Operand(esp, 0)); + __ pop(operand->reg()); + __ SmiTag(operand->reg()); + } else { + // Operand type not known, check for smi otherwise assume heap number. + Label smi; + __ test(operand->reg(), Immediate(kSmiTagMask)); + __ j(zero, &smi); + __ fld_d(FieldOperand(operand->reg(), HeapNumber::kValueOffset)); + __ jmp(&done); + __ bind(&smi); + __ SmiUntag(operand->reg()); + __ push(operand->reg()); + __ fild_s(Operand(esp, 0)); + __ pop(operand->reg()); + __ SmiTag(operand->reg()); + __ jmp(&done); + } + __ bind(&done); +} + + +// Load a comparison operand into into a XMM register. Jump to not_numbers jump +// target passing the left and right result if the operand is not a number. +static void LoadComparisonOperandSSE2(MacroAssembler* masm_, + Result* operand, + XMMRegister reg, + Result* left_side, + Result* right_side, + JumpTarget* not_numbers) { + Label done; + if (operand->type_info().IsDouble()) { + // Operand is known to be a heap number, just load it. + __ movdbl(reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset)); + } else if (operand->type_info().IsSmi()) { + // Operand is known to be a smi. Convert it to double and keep the original + // smi. + __ SmiUntag(operand->reg()); + __ cvtsi2sd(reg, Operand(operand->reg())); + __ SmiTag(operand->reg()); + } else { + // Operand type not known, check for smi or heap number. + Label smi; + __ test(operand->reg(), Immediate(kSmiTagMask)); + __ j(zero, &smi); + if (!operand->type_info().IsNumber()) { + __ cmp(FieldOperand(operand->reg(), HeapObject::kMapOffset), + Immediate(Factory::heap_number_map())); + not_numbers->Branch(not_equal, left_side, right_side, taken); + } + __ movdbl(reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset)); + __ jmp(&done); + + __ bind(&smi); + // Comvert smi to float and keep the original smi. + __ SmiUntag(operand->reg()); + __ cvtsi2sd(reg, Operand(operand->reg())); + __ SmiTag(operand->reg()); + __ jmp(&done); + } + __ bind(&done); +} + + +void CodeGenerator::GenerateInlineNumberComparison(Result* left_side, + Result* right_side, + Condition cc, + ControlDestination* dest) { + ASSERT(left_side->is_register()); + ASSERT(right_side->is_register()); + + JumpTarget not_numbers; + if (CpuFeatures::IsSupported(SSE2)) { + CpuFeatures::Scope use_sse2(SSE2); + + // Load left and right operand into registers xmm0 and xmm1 and compare. + LoadComparisonOperandSSE2(masm_, left_side, xmm0, left_side, right_side, + ¬_numbers); + LoadComparisonOperandSSE2(masm_, right_side, xmm1, left_side, right_side, + ¬_numbers); + __ comisd(xmm0, xmm1); + } else { + Label check_right, compare; + + // Make sure that both comparison operands are numbers. + CheckComparisonOperand(masm_, left_side, left_side, right_side, + ¬_numbers); + CheckComparisonOperand(masm_, right_side, left_side, right_side, + ¬_numbers); + + // Load right and left operand to FPU stack and compare. + LoadComparisonOperand(masm_, right_side); + LoadComparisonOperand(masm_, left_side); + __ FCmp(); + } + + // Bail out if a NaN is involved. + not_numbers.Branch(parity_even, left_side, right_side, not_taken); + + // Split to destination targets based on comparison. + left_side->Unuse(); + right_side->Unuse(); + dest->true_target()->Branch(DoubleCondition(cc)); + dest->false_target()->Jump(); + + not_numbers.Bind(left_side, right_side); +} + + // Call the function just below TOS on the stack with the given // arguments. The receiver is the TOS. void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args, @@ -3652,7 +3869,7 @@ void CodeGenerator::VisitWhileStatement(WhileStatement* node) { } -void CodeGenerator::SetTypeForStackSlot(Slot* slot, NumberInfo info) { +void CodeGenerator::SetTypeForStackSlot(Slot* slot, TypeInfo info) { ASSERT(slot->type() == Slot::LOCAL || slot->type() == Slot::PARAMETER); if (slot->type() == Slot::LOCAL) { frame_->SetTypeForLocalAt(slot->index(), info); @@ -3772,7 +3989,7 @@ void CodeGenerator::VisitForStatement(ForStatement* node) { // the bottom check of the loop condition. if (node->is_fast_smi_loop()) { // Set number type of the loop variable to smi. - SetTypeForStackSlot(node->loop_variable()->slot(), NumberInfo::Smi()); + SetTypeForStackSlot(node->loop_variable()->slot(), TypeInfo::Smi()); } Visit(node->body()); @@ -3798,7 +4015,7 @@ void CodeGenerator::VisitForStatement(ForStatement* node) { // expression if we are in a fast smi loop condition. if (node->is_fast_smi_loop() && has_valid_frame()) { // Set number type of the loop variable to smi. - SetTypeForStackSlot(node->loop_variable()->slot(), NumberInfo::Smi()); + SetTypeForStackSlot(node->loop_variable()->slot(), TypeInfo::Smi()); } // Based on the condition analysis, compile the backward jump as @@ -4421,9 +4638,8 @@ void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) { } -Result CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) { - ASSERT(boilerplate->IsBoilerplate()); - +Result CodeGenerator::InstantiateFunction( + Handle<SharedFunctionInfo> function_info) { // The inevitable call will sync frame elements to memory anyway, so // we do it eagerly to allow us to push the arguments directly into // place. @@ -4431,15 +4647,15 @@ Result CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) { // Use the fast case closure allocation code that allocates in new // space for nested functions that don't need literals cloning. - if (scope()->is_function_scope() && boilerplate->NumberOfLiterals() == 0) { + if (scope()->is_function_scope() && function_info->num_literals() == 0) { FastNewClosureStub stub; - frame()->EmitPush(Immediate(boilerplate)); + frame()->EmitPush(Immediate(function_info)); return frame()->CallStub(&stub, 1); } else { // Call the runtime to instantiate the function boilerplate // object. frame()->EmitPush(esi); - frame()->EmitPush(Immediate(boilerplate)); + frame()->EmitPush(Immediate(function_info)); return frame()->CallRuntime(Runtime::kNewClosure, 2); } } @@ -4448,21 +4664,21 @@ Result CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) { void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) { Comment cmnt(masm_, "[ FunctionLiteral"); ASSERT(!in_safe_int32_mode()); - // Build the function boilerplate and instantiate it. - Handle<JSFunction> boilerplate = - Compiler::BuildBoilerplate(node, script(), this); + // Build the function info and instantiate it. + Handle<SharedFunctionInfo> function_info = + Compiler::BuildFunctionInfo(node, script(), this); // Check for stack-overflow exception. if (HasStackOverflow()) return; - Result result = InstantiateBoilerplate(boilerplate); + Result result = InstantiateFunction(function_info); frame()->Push(&result); } -void CodeGenerator::VisitFunctionBoilerplateLiteral( - FunctionBoilerplateLiteral* node) { +void CodeGenerator::VisitSharedFunctionInfoLiteral( + SharedFunctionInfoLiteral* node) { ASSERT(!in_safe_int32_mode()); - Comment cmnt(masm_, "[ FunctionBoilerplateLiteral"); - Result result = InstantiateBoilerplate(node->boilerplate()); + Comment cmnt(masm_, "[ SharedFunctionInfoLiteral"); + Result result = InstantiateFunction(node->shared_function_info()); frame()->Push(&result); } @@ -6702,7 +6918,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { GenericUnaryOpStub stub(Token::SUB, overwrite); Result operand = frame_->Pop(); Result answer = frame_->CallStub(&stub, &operand); - answer.set_number_info(NumberInfo::Number()); + answer.set_type_info(TypeInfo::Number()); frame_->Push(&answer); break; } @@ -6711,7 +6927,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { JumpTarget smi_label; JumpTarget continue_label; Result operand = frame_->Pop(); - NumberInfo operand_info = operand.number_info(); + TypeInfo operand_info = operand.type_info(); operand.ToRegister(); if (operand_info.IsSmi()) { if (FLAG_debug_code) __ AbortIfNotSmi(operand.reg()); @@ -6720,7 +6936,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { __ lea(operand.reg(), Operand(operand.reg(), kSmiTagMask)); __ not_(operand.reg()); Result answer = operand; - answer.set_number_info(NumberInfo::Smi()); + answer.set_type_info(TypeInfo::Smi()); frame_->Push(&answer); } else { __ test(operand.reg(), Immediate(kSmiTagMask)); @@ -6739,9 +6955,9 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { continue_label.Bind(&answer); if (operand_info.IsInteger32()) { - answer.set_number_info(NumberInfo::Integer32()); + answer.set_type_info(TypeInfo::Integer32()); } else { - answer.set_number_info(NumberInfo::Number()); + answer.set_type_info(TypeInfo::Number()); } frame_->Push(&answer); } @@ -6751,7 +6967,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { // Smi check. JumpTarget continue_label; Result operand = frame_->Pop(); - NumberInfo operand_info = operand.number_info(); + TypeInfo operand_info = operand.type_info(); operand.ToRegister(); __ test(operand.reg(), Immediate(kSmiTagMask)); continue_label.Branch(zero, &operand, taken); @@ -6762,11 +6978,11 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { continue_label.Bind(&answer); if (operand_info.IsSmi()) { - answer.set_number_info(NumberInfo::Smi()); + answer.set_type_info(TypeInfo::Smi()); } else if (operand_info.IsInteger32()) { - answer.set_number_info(NumberInfo::Integer32()); + answer.set_type_info(TypeInfo::Integer32()); } else { - answer.set_number_info(NumberInfo::Number()); + answer.set_type_info(TypeInfo::Number()); } frame_->Push(&answer); break; @@ -6906,7 +7122,7 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) { // The return value for postfix operations is the // same as the input, and has the same number info. - old_value.set_number_info(new_value.number_info()); + old_value.set_type_info(new_value.type_info()); } // Ensure the new value is writable. @@ -6973,9 +7189,9 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) { // The result of ++ or -- is an Integer32 if the // input is a smi. Otherwise it is a number. if (new_value.is_smi()) { - new_value.set_number_info(NumberInfo::Integer32()); + new_value.set_type_info(TypeInfo::Integer32()); } else { - new_value.set_number_info(NumberInfo::Number()); + new_value.set_type_info(TypeInfo::Number()); } // Postfix: store the old value in the allocated slot under the @@ -7023,15 +7239,39 @@ void CodeGenerator::Int32BinaryOperation(BinaryOperation* node) { case Token::BIT_OR: case Token::BIT_XOR: case Token::BIT_AND: - left.ToRegister(); - right.ToRegister(); - if (op == Token::BIT_OR) { - __ or_(left.reg(), Operand(right.reg())); - } else if (op == Token::BIT_XOR) { - __ xor_(left.reg(), Operand(right.reg())); + if (left.is_constant() || right.is_constant()) { + int32_t value; // Put constant in value, non-constant in left. + // Constants are known to be int32 values, from static analysis, + // or else will be converted to int32 by implicit ECMA [[ToInt32]]. + if (left.is_constant()) { + ASSERT(left.handle()->IsSmi() || left.handle()->IsHeapNumber()); + value = NumberToInt32(*left.handle()); + left = right; + } else { + ASSERT(right.handle()->IsSmi() || right.handle()->IsHeapNumber()); + value = NumberToInt32(*right.handle()); + } + + left.ToRegister(); + if (op == Token::BIT_OR) { + __ or_(Operand(left.reg()), Immediate(value)); + } else if (op == Token::BIT_XOR) { + __ xor_(Operand(left.reg()), Immediate(value)); + } else { + ASSERT(op == Token::BIT_AND); + __ and_(Operand(left.reg()), Immediate(value)); + } } else { - ASSERT(op == Token::BIT_AND); - __ and_(left.reg(), Operand(right.reg())); + ASSERT(left.is_register()); + ASSERT(right.is_register()); + if (op == Token::BIT_OR) { + __ or_(left.reg(), Operand(right.reg())); + } else if (op == Token::BIT_XOR) { + __ xor_(left.reg(), Operand(right.reg())); + } else { + ASSERT(op == Token::BIT_AND); + __ and_(left.reg(), Operand(right.reg())); + } } frame_->Push(&left); right.Unuse(); @@ -7090,16 +7330,39 @@ void CodeGenerator::Int32BinaryOperation(BinaryOperation* node) { case Token::ADD: case Token::SUB: case Token::MUL: - left.ToRegister(); - right.ToRegister(); - if (op == Token::ADD) { - __ add(left.reg(), Operand(right.reg())); - } else if (op == Token::SUB) { - __ sub(left.reg(), Operand(right.reg())); + if ((left.is_constant() && op != Token::SUB) || right.is_constant()) { + int32_t value; // Put constant in value, non-constant in left. + if (right.is_constant()) { + ASSERT(right.handle()->IsSmi() || right.handle()->IsHeapNumber()); + value = NumberToInt32(*right.handle()); + } else { + ASSERT(left.handle()->IsSmi() || left.handle()->IsHeapNumber()); + value = NumberToInt32(*left.handle()); + left = right; + } + + left.ToRegister(); + if (op == Token::ADD) { + __ add(Operand(left.reg()), Immediate(value)); + } else if (op == Token::SUB) { + __ sub(Operand(left.reg()), Immediate(value)); + } else { + ASSERT(op == Token::MUL); + __ imul(left.reg(), left.reg(), value); + } } else { - ASSERT(op == Token::MUL); - // We have statically verified that a negative zero can be ignored. - __ imul(left.reg(), Operand(right.reg())); + left.ToRegister(); + ASSERT(left.is_register()); + ASSERT(right.is_register()); + if (op == Token::ADD) { + __ add(left.reg(), Operand(right.reg())); + } else if (op == Token::SUB) { + __ sub(left.reg(), Operand(right.reg())); + } else { + ASSERT(op == Token::MUL); + // We have statically verified that a negative zero can be ignored. + __ imul(left.reg(), Operand(right.reg())); + } } right.Unuse(); frame_->Push(&left); @@ -7133,14 +7396,23 @@ void CodeGenerator::Int32BinaryOperation(BinaryOperation* node) { __ cdq(); // Sign-extend eax into edx:eax __ idiv(right_reg); if (op == Token::MOD) { - Result edx_result(edx, NumberInfo::Integer32()); + // Negative zero can arise as a negative divident with a zero result. + if (!node->no_negative_zero()) { + Label not_negative_zero; + __ test(edx, Operand(edx)); + __ j(not_zero, ¬_negative_zero); + __ test(eax, Operand(eax)); + unsafe_bailout_->Branch(negative); + __ bind(¬_negative_zero); + } + Result edx_result(edx, TypeInfo::Integer32()); edx_result.set_untagged_int32(true); frame_->Push(&edx_result); } else { ASSERT(op == Token::DIV); __ test(edx, Operand(edx)); unsafe_bailout_->Branch(not_equal); - Result eax_result(eax, NumberInfo::Integer32()); + Result eax_result(eax, TypeInfo::Integer32()); eax_result.set_untagged_int32(true); frame_->Push(&eax_result); } @@ -8027,7 +8299,7 @@ Result CodeGenerator::EmitKeyedStore(StaticType* key_type) { static void CheckTwoForSminess(MacroAssembler* masm, Register left, Register right, Register scratch, - NumberInfo left_info, NumberInfo right_info, + TypeInfo left_info, TypeInfo right_info, DeferredInlineBinaryOperation* deferred) { if (left.is(right)) { if (!left_info.IsSmi()) { @@ -8205,12 +8477,12 @@ void Reference::SetValue(InitState init_state) { void FastNewClosureStub::Generate(MacroAssembler* masm) { - // Clone the boilerplate in new space. Set the context to the - // current context in esi. + // Create a new closure from the given function info in new + // space. Set the context to the current context in esi. Label gc; __ AllocateInNewSpace(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT); - // Get the boilerplate function from the stack. + // Get the function info from the stack. __ mov(edx, Operand(esp, 1 * kPointerSize)); // Compute the function map in the current global context and set that @@ -8220,18 +8492,16 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) { __ mov(ecx, Operand(ecx, Context::SlotOffset(Context::FUNCTION_MAP_INDEX))); __ mov(FieldOperand(eax, JSObject::kMapOffset), ecx); - // Clone the rest of the boilerplate fields. We don't have to update - // the write barrier because the allocated object is in new space. - for (int offset = kPointerSize; - offset < JSFunction::kSize; - offset += kPointerSize) { - if (offset == JSFunction::kContextOffset) { - __ mov(FieldOperand(eax, offset), esi); - } else { - __ mov(ebx, FieldOperand(edx, offset)); - __ mov(FieldOperand(eax, offset), ebx); - } - } + // Initialize the rest of the function. We don't have to update the + // write barrier because the allocated object is in new space. + __ mov(ebx, Immediate(Factory::empty_fixed_array())); + __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ebx); + __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx); + __ mov(FieldOperand(eax, JSFunction::kPrototypeOrInitialMapOffset), + Immediate(Factory::the_hole_value())); + __ mov(FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset), edx); + __ mov(FieldOperand(eax, JSFunction::kContextOffset), esi); + __ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx); // Return and remove the on-stack parameter. __ ret(1 * kPointerSize); @@ -9530,14 +9800,14 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) { // trashed registers. void IntegerConvert(MacroAssembler* masm, Register source, - NumberInfo number_info, + TypeInfo type_info, bool use_sse3, Label* conversion_failure) { ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx)); Label done, right_exponent, normal_exponent; Register scratch = ebx; Register scratch2 = edi; - if (!number_info.IsInteger32() || !use_sse3) { + if (!type_info.IsInteger32() || !use_sse3) { // Get exponent word. __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset)); // Get exponent alone in scratch2. @@ -9546,7 +9816,7 @@ void IntegerConvert(MacroAssembler* masm, } if (use_sse3) { CpuFeatures::Scope scope(SSE3); - if (!number_info.IsInteger32()) { + if (!type_info.IsInteger32()) { // Check whether the exponent is too big for a 64 bit signed integer. static const uint32_t kTooBigExponent = (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift; @@ -9667,7 +9937,7 @@ void IntegerConvert(MacroAssembler* masm, // Input: edx, eax are the left and right objects of a bit op. // Output: eax, ecx are left and right integers for a bit op. void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm, - NumberInfo number_info, + TypeInfo type_info, bool use_sse3, Label* conversion_failure) { // Check float operands. @@ -9675,8 +9945,8 @@ void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm, Label arg2_is_object, check_undefined_arg2; Label load_arg2, done; - if (!number_info.IsHeapNumber()) { - if (!number_info.IsSmi()) { + if (!type_info.IsDouble()) { + if (!type_info.IsSmi()) { __ test(edx, Immediate(kSmiTagMask)); __ j(not_zero, &arg1_is_object); } else { @@ -9689,14 +9959,14 @@ void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm, __ bind(&arg1_is_object); // Get the untagged integer version of the edx heap number in ecx. - IntegerConvert(masm, edx, number_info, use_sse3, conversion_failure); + IntegerConvert(masm, edx, type_info, use_sse3, conversion_failure); __ mov(edx, ecx); // Here edx has the untagged integer, eax has a Smi or a heap number. __ bind(&load_arg2); - if (!number_info.IsHeapNumber()) { + if (!type_info.IsDouble()) { // Test if arg2 is a Smi. - if (!number_info.IsSmi()) { + if (!type_info.IsSmi()) { __ test(eax, Immediate(kSmiTagMask)); __ j(not_zero, &arg2_is_object); } else { @@ -9710,7 +9980,7 @@ void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm, __ bind(&arg2_is_object); // Get the untagged integer version of the eax heap number in ecx. - IntegerConvert(masm, eax, number_info, use_sse3, conversion_failure); + IntegerConvert(masm, eax, type_info, use_sse3, conversion_failure); __ bind(&done); __ mov(eax, edx); } @@ -9748,7 +10018,7 @@ void FloatingPointHelper::LoadUnknownsAsIntegers(MacroAssembler* masm, // Get the untagged integer version of the edx heap number in ecx. IntegerConvert(masm, edx, - NumberInfo::Unknown(), + TypeInfo::Unknown(), use_sse3, conversion_failure); __ mov(edx, ecx); @@ -9779,7 +10049,7 @@ void FloatingPointHelper::LoadUnknownsAsIntegers(MacroAssembler* masm, // Get the untagged integer version of the eax heap number in ecx. IntegerConvert(masm, eax, - NumberInfo::Unknown(), + TypeInfo::Unknown(), use_sse3, conversion_failure); __ bind(&done); @@ -9788,11 +10058,11 @@ void FloatingPointHelper::LoadUnknownsAsIntegers(MacroAssembler* masm, void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm, - NumberInfo number_info, + TypeInfo type_info, bool use_sse3, Label* conversion_failure) { - if (number_info.IsNumber()) { - LoadNumbersAsIntegers(masm, number_info, use_sse3, conversion_failure); + if (type_info.IsNumber()) { + LoadNumbersAsIntegers(masm, type_info, use_sse3, conversion_failure); } else { LoadUnknownsAsIntegers(masm, use_sse3, conversion_failure); } @@ -10037,7 +10307,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) { // Convert the heap number in eax to an untagged integer in ecx. IntegerConvert(masm, eax, - NumberInfo::Unknown(), + TypeInfo::Unknown(), CpuFeatures::IsSupported(SSE3), &slow); @@ -10824,63 +11094,70 @@ void CompareStub::Generate(MacroAssembler* masm) { __ push(edx); __ push(ecx); - // Inlined floating point compare. - // Call builtin if operands are not floating point or smi. - Label check_for_symbols; - Label unordered; - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); - CpuFeatures::Scope use_cmov(CMOV); - - FloatingPointHelper::LoadSSE2Operands(masm, &check_for_symbols); - __ comisd(xmm0, xmm1); + // Generate the number comparison code. + if (include_number_compare_) { + Label non_number_comparison; + Label unordered; + if (CpuFeatures::IsSupported(SSE2)) { + CpuFeatures::Scope use_sse2(SSE2); + CpuFeatures::Scope use_cmov(CMOV); + + FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison); + __ comisd(xmm0, xmm1); + + // Don't base result on EFLAGS when a NaN is involved. + __ j(parity_even, &unordered, not_taken); + // Return a result of -1, 0, or 1, based on EFLAGS. + __ mov(eax, 0); // equal + __ mov(ecx, Immediate(Smi::FromInt(1))); + __ cmov(above, eax, Operand(ecx)); + __ mov(ecx, Immediate(Smi::FromInt(-1))); + __ cmov(below, eax, Operand(ecx)); + __ ret(2 * kPointerSize); + } else { + FloatingPointHelper::CheckFloatOperands( + masm, &non_number_comparison, ebx); + FloatingPointHelper::LoadFloatOperands(masm, ecx); + __ FCmp(); - // Jump to builtin for NaN. - __ j(parity_even, &unordered, not_taken); - __ mov(eax, 0); // equal - __ mov(ecx, Immediate(Smi::FromInt(1))); - __ cmov(above, eax, Operand(ecx)); - __ mov(ecx, Immediate(Smi::FromInt(-1))); - __ cmov(below, eax, Operand(ecx)); - __ ret(2 * kPointerSize); - } else { - FloatingPointHelper::CheckFloatOperands(masm, &check_for_symbols, ebx); - FloatingPointHelper::LoadFloatOperands(masm, ecx); - __ FCmp(); + // Don't base result on EFLAGS when a NaN is involved. + __ j(parity_even, &unordered, not_taken); - // Jump to builtin for NaN. - __ j(parity_even, &unordered, not_taken); + Label below_label, above_label; + // Return a result of -1, 0, or 1, based on EFLAGS. In all cases remove + // two arguments from the stack as they have been pushed in preparation + // of a possible runtime call. + __ j(below, &below_label, not_taken); + __ j(above, &above_label, not_taken); - Label below_lbl, above_lbl; - // Return a result of -1, 0, or 1, to indicate result of comparison. - __ j(below, &below_lbl, not_taken); - __ j(above, &above_lbl, not_taken); + __ xor_(eax, Operand(eax)); + __ ret(2 * kPointerSize); - __ xor_(eax, Operand(eax)); // equal - // Both arguments were pushed in case a runtime call was needed. - __ ret(2 * kPointerSize); + __ bind(&below_label); + __ mov(eax, Immediate(Smi::FromInt(-1))); + __ ret(2 * kPointerSize); - __ bind(&below_lbl); - __ mov(eax, Immediate(Smi::FromInt(-1))); - __ ret(2 * kPointerSize); + __ bind(&above_label); + __ mov(eax, Immediate(Smi::FromInt(1))); + __ ret(2 * kPointerSize); + } - __ bind(&above_lbl); - __ mov(eax, Immediate(Smi::FromInt(1))); + // If one of the numbers was NaN, then the result is always false. + // The cc is never not-equal. + __ bind(&unordered); + ASSERT(cc_ != not_equal); + if (cc_ == less || cc_ == less_equal) { + __ mov(eax, Immediate(Smi::FromInt(1))); + } else { + __ mov(eax, Immediate(Smi::FromInt(-1))); + } __ ret(2 * kPointerSize); // eax, edx were pushed + + // The number comparison code did not provide a valid result. + __ bind(&non_number_comparison); } - // If one of the numbers was NaN, then the result is always false. - // The cc is never not-equal. - __ bind(&unordered); - ASSERT(cc_ != not_equal); - if (cc_ == less || cc_ == less_equal) { - __ mov(eax, Immediate(Smi::FromInt(1))); - } else { - __ mov(eax, Immediate(Smi::FromInt(-1))); - } - __ ret(2 * kPointerSize); // eax, edx were pushed // Fast negative check for symbol-to-symbol equality. - __ bind(&check_for_symbols); Label check_for_strings; if (cc_ == equal) { BranchIfNonSymbol(masm, &check_for_strings, eax, ecx); @@ -11490,55 +11767,59 @@ void InstanceofStub::Generate(MacroAssembler* masm) { } +int CompareStub::MinorKey() { + // Encode the three parameters in a unique 16 bit value. To avoid duplicate + // stubs the never NaN NaN condition is only taken into account if the + // condition is equals. + ASSERT(static_cast<unsigned>(cc_) < (1 << 13)); + return ConditionField::encode(static_cast<unsigned>(cc_)) + | StrictField::encode(strict_) + | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false) + | IncludeNumberCompareField::encode(include_number_compare_); +} + + // Unfortunately you have to run without snapshots to see most of these // names in the profile since most compare stubs end up in the snapshot. const char* CompareStub::GetName() { + if (name_ != NULL) return name_; + const int kMaxNameLength = 100; + name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength); + if (name_ == NULL) return "OOM"; + + const char* cc_name; switch (cc_) { - case less: return "CompareStub_LT"; - case greater: return "CompareStub_GT"; - case less_equal: return "CompareStub_LE"; - case greater_equal: return "CompareStub_GE"; - case not_equal: { - if (strict_) { - if (never_nan_nan_) { - return "CompareStub_NE_STRICT_NO_NAN"; - } else { - return "CompareStub_NE_STRICT"; - } - } else { - if (never_nan_nan_) { - return "CompareStub_NE_NO_NAN"; - } else { - return "CompareStub_NE"; - } - } - } - case equal: { - if (strict_) { - if (never_nan_nan_) { - return "CompareStub_EQ_STRICT_NO_NAN"; - } else { - return "CompareStub_EQ_STRICT"; - } - } else { - if (never_nan_nan_) { - return "CompareStub_EQ_NO_NAN"; - } else { - return "CompareStub_EQ"; - } - } - } - default: return "CompareStub"; + case less: cc_name = "LT"; break; + case greater: cc_name = "GT"; break; + case less_equal: cc_name = "LE"; break; + case greater_equal: cc_name = "GE"; break; + case equal: cc_name = "EQ"; break; + case not_equal: cc_name = "NE"; break; + default: cc_name = "UnknownCondition"; break; } -} + const char* strict_name = ""; + if (strict_ && (cc_ == equal || cc_ == not_equal)) { + strict_name = "_STRICT"; + } -int CompareStub::MinorKey() { - // Encode the three parameters in a unique 16 bit value. - ASSERT(static_cast<unsigned>(cc_) < (1 << 14)); - int nnn_value = (never_nan_nan_ ? 2 : 0); - if (cc_ != equal) nnn_value = 0; // Avoid duplicate stubs. - return (static_cast<unsigned>(cc_) << 2) | nnn_value | (strict_ ? 1 : 0); + const char* never_nan_nan_name = ""; + if (never_nan_nan_ && (cc_ == equal || cc_ == not_equal)) { + never_nan_nan_name = "_NO_NAN"; + } + + const char* include_number_compare_name = ""; + if (!include_number_compare_) { + include_number_compare_name = "_NO_NUMBER"; + } + + OS::SNPrintF(Vector<char>(name_, kMaxNameLength), + "CompareStub_%s%s%s%s", + cc_name, + strict_name, + never_nan_nan_name, + include_number_compare_name); + return name_; } @@ -12172,6 +12453,9 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, Label result_not_equal; Label result_greater; Label compare_lengths; + + __ IncrementCounter(&Counters::string_compare_native, 1); + // Find minimum length. Label left_shorter; __ mov(scratch1, FieldOperand(left, String::kLengthOffset)); @@ -12269,7 +12553,6 @@ void StringCompareStub::Generate(MacroAssembler* masm) { __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime); // Compare flat ascii strings. - __ IncrementCounter(&Counters::string_compare_native, 1); GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi); // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) diff --git a/deps/v8/src/ia32/codegen-ia32.h b/deps/v8/src/ia32/codegen-ia32.h index c7ff2e8753..ca4a44b85e 100644 --- a/deps/v8/src/ia32/codegen-ia32.h +++ b/deps/v8/src/ia32/codegen-ia32.h @@ -373,6 +373,7 @@ class CodeGenerator: public AstVisitor { // Take the Result that is an untagged int32, and convert it to a tagged // Smi or HeapNumber. Remove the untagged_int32 flag from the result. void ConvertInt32ResultToNumber(Result* value); + void ConvertInt32ResultToSmi(Result* value); // Track loop nesting level. int loop_nesting() const { return loop_nesting_; } @@ -528,6 +529,10 @@ class CodeGenerator: public AstVisitor { Condition cc, bool strict, ControlDestination* destination); + void GenerateInlineNumberComparison(Result* left_side, + Result* right_side, + Condition cc, + ControlDestination* dest); // To prevent long attacker-controlled byte sequences, integer constants // from the JavaScript source are loaded in two parts if they are larger @@ -574,8 +579,8 @@ class CodeGenerator: public AstVisitor { // name/value pairs. void DeclareGlobals(Handle<FixedArray> pairs); - // Instantiate the function boilerplate. - Result InstantiateBoilerplate(Handle<JSFunction> boilerplate); + // Instantiate the function based on the shared function info. + Result InstantiateFunction(Handle<SharedFunctionInfo> function_info); // Support for type checks. void GenerateIsSmi(ZoneList<Expression*>* args); @@ -652,7 +657,7 @@ class CodeGenerator: public AstVisitor { void CodeForDoWhileConditionPosition(DoWhileStatement* stmt); void CodeForSourcePosition(int pos); - void SetTypeForStackSlot(Slot* slot, NumberInfo info); + void SetTypeForStackSlot(Slot* slot, TypeInfo info); #ifdef DEBUG // True if the registers are valid for entry to a block. There should @@ -736,7 +741,7 @@ class GenericBinaryOpStub: public CodeStub { GenericBinaryOpStub(Token::Value op, OverwriteMode mode, GenericBinaryFlags flags, - NumberInfo operands_type) + TypeInfo operands_type) : op_(op), mode_(mode), flags_(flags), @@ -759,7 +764,7 @@ class GenericBinaryOpStub: public CodeStub { args_in_registers_(ArgsInRegistersBits::decode(key)), args_reversed_(ArgsReversedBits::decode(key)), use_sse3_(SSE3Bits::decode(key)), - static_operands_type_(NumberInfo::ExpandedRepresentation( + static_operands_type_(TypeInfo::ExpandedRepresentation( StaticTypeInfoBits::decode(key))), runtime_operands_type_(runtime_operands_type), name_(NULL) { @@ -786,7 +791,7 @@ class GenericBinaryOpStub: public CodeStub { bool use_sse3_; // Number type information of operands, determined by code generator. - NumberInfo static_operands_type_; + TypeInfo static_operands_type_; // Operand type information determined at runtime. BinaryOpIC::TypeInfo runtime_operands_type_; @@ -798,7 +803,7 @@ class GenericBinaryOpStub: public CodeStub { #ifdef DEBUG void Print() { PrintF("GenericBinaryOpStub %d (op %s), " - "(mode %d, flags %d, registers %d, reversed %d, number_info %s)\n", + "(mode %d, flags %d, registers %d, reversed %d, type_info %s)\n", MinorKey(), Token::String(op_), static_cast<int>(mode_), diff --git a/deps/v8/src/ia32/fast-codegen-ia32.cc b/deps/v8/src/ia32/fast-codegen-ia32.cc index 01190a5985..61e2b5edfc 100644 --- a/deps/v8/src/ia32/fast-codegen-ia32.cc +++ b/deps/v8/src/ia32/fast-codegen-ia32.cc @@ -195,9 +195,9 @@ void FastCodeGenSyntaxChecker::VisitFunctionLiteral(FunctionLiteral* expr) { } -void FastCodeGenSyntaxChecker::VisitFunctionBoilerplateLiteral( - FunctionBoilerplateLiteral* expr) { - BAILOUT("FunctionBoilerplateLiteral"); +void FastCodeGenSyntaxChecker::VisitSharedFunctionInfoLiteral( + SharedFunctionInfoLiteral* expr) { + BAILOUT("SharedFunctionInfoLiteral"); } @@ -764,8 +764,8 @@ void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) { } -void FastCodeGenerator::VisitFunctionBoilerplateLiteral( - FunctionBoilerplateLiteral* expr) { +void FastCodeGenerator::VisitSharedFunctionInfoLiteral( + SharedFunctionInfoLiteral* expr) { UNREACHABLE(); } diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc index cedf9c95fd..e59dc512df 100644 --- a/deps/v8/src/ia32/full-codegen-ia32.cc +++ b/deps/v8/src/ia32/full-codegen-ia32.cc @@ -777,15 +777,13 @@ void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) { Comment cmnt(masm_, "[ FunctionLiteral"); // Build the function boilerplate and instantiate it. - Handle<JSFunction> boilerplate = - Compiler::BuildBoilerplate(expr, script(), this); + Handle<SharedFunctionInfo> function_info = + Compiler::BuildFunctionInfo(expr, script(), this); if (HasStackOverflow()) return; - ASSERT(boilerplate->IsBoilerplate()); - // Create a new closure. __ push(esi); - __ push(Immediate(boilerplate)); + __ push(Immediate(function_info)); __ CallRuntime(Runtime::kNewClosure, 2); Apply(context_, eax); } @@ -1132,7 +1130,7 @@ void FullCodeGenerator::EmitBinaryOp(Token::Value op, GenericBinaryOpStub stub(op, NO_OVERWRITE, NO_GENERIC_BINARY_FLAGS, - NumberInfo::Unknown()); + TypeInfo::Unknown()); __ CallStub(&stub); Apply(context, eax); } @@ -1747,7 +1745,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { GenericBinaryOpStub stub(expr->binary_op(), NO_OVERWRITE, NO_GENERIC_BINARY_FLAGS, - NumberInfo::Unknown()); + TypeInfo::Unknown()); stub.GenerateCall(masm(), eax, Smi::FromInt(1)); __ bind(&done); diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc index 3928661e9e..8d6c346b31 100644 --- a/deps/v8/src/ia32/ic-ia32.cc +++ b/deps/v8/src/ia32/ic-ia32.cc @@ -254,23 +254,6 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm, } -// Helper function used to check that a value is either not an object -// or is loaded if it is an object. -static void GenerateCheckNonObjectOrLoaded(MacroAssembler* masm, Label* miss, - Register value, Register scratch) { - Label done; - // Check if the value is a Smi. - __ test(value, Immediate(kSmiTagMask)); - __ j(zero, &done, not_taken); - // Check if the object has been loaded. - __ mov(scratch, FieldOperand(value, JSFunction::kMapOffset)); - __ mov(scratch, FieldOperand(scratch, Map::kBitField2Offset)); - __ test(scratch, Immediate(1 << Map::kNeedsLoading)); - __ j(not_zero, miss, not_taken); - __ bind(&done); -} - - // The offset from the inlined patch site to the start of the // inlined load instruction. It is 7 bytes (test eax, imm) plus // 6 bytes (jne slow_label). @@ -495,7 +478,6 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { ecx, edi, DICTIONARY_CHECK_DONE); - GenerateCheckNonObjectOrLoaded(masm, &slow, ecx, ebx); __ mov(eax, ecx); __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1); __ ret(0); @@ -1146,11 +1128,6 @@ static void GenerateNormalHelper(MacroAssembler* masm, __ CmpObjectType(edi, JS_FUNCTION_TYPE, eax); __ j(not_equal, miss, not_taken); - // Check that the function has been loaded. eax holds function's map. - __ mov(eax, FieldOperand(eax, Map::kBitField2Offset)); - __ test(eax, Immediate(1 << Map::kNeedsLoading)); - __ j(not_zero, miss, not_taken); - // Patch the receiver on stack with the global proxy if necessary. if (is_global_object) { __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset)); @@ -1341,7 +1318,6 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) { edi, ebx, CHECK_DICTIONARY); - GenerateCheckNonObjectOrLoaded(masm, &miss, edi, edx); __ mov(eax, edi); __ ret(0); diff --git a/deps/v8/src/ia32/register-allocator-ia32.cc b/deps/v8/src/ia32/register-allocator-ia32.cc index 0129314c87..73fefb3bbf 100644 --- a/deps/v8/src/ia32/register-allocator-ia32.cc +++ b/deps/v8/src/ia32/register-allocator-ia32.cc @@ -75,7 +75,7 @@ void Result::ToRegister() { Immediate(handle())); } // This result becomes a copy of the fresh one. - fresh.set_number_info(number_info()); + fresh.set_type_info(type_info()); *this = fresh; } ASSERT(is_register()); @@ -122,7 +122,7 @@ void Result::ToRegister(Register target) { } } } - fresh.set_number_info(number_info()); + fresh.set_type_info(type_info()); fresh.set_untagged_int32(is_untagged_int32()); *this = fresh; } else if (is_register() && reg().is(target)) { diff --git a/deps/v8/src/ia32/virtual-frame-ia32.cc b/deps/v8/src/ia32/virtual-frame-ia32.cc index de12b64f2c..bc27e1de7a 100644 --- a/deps/v8/src/ia32/virtual-frame-ia32.cc +++ b/deps/v8/src/ia32/virtual-frame-ia32.cc @@ -162,7 +162,7 @@ void VirtualFrame::MakeMergable() { if (element.is_constant() || element.is_copy()) { if (element.is_synced()) { // Just spill. - elements_[i] = FrameElement::MemoryElement(NumberInfo::Unknown()); + elements_[i] = FrameElement::MemoryElement(TypeInfo::Unknown()); } else { // Allocate to a register. FrameElement backing_element; // Invalid if not a copy. @@ -174,7 +174,7 @@ void VirtualFrame::MakeMergable() { elements_[i] = FrameElement::RegisterElement(fresh.reg(), FrameElement::NOT_SYNCED, - NumberInfo::Unknown()); + TypeInfo::Unknown()); Use(fresh.reg(), i); // Emit a move. @@ -207,7 +207,7 @@ void VirtualFrame::MakeMergable() { // The copy flag is not relied on before the end of this loop, // including when registers are spilled. elements_[i].clear_copied(); - elements_[i].set_number_info(NumberInfo::Unknown()); + elements_[i].set_type_info(TypeInfo::Unknown()); } } } @@ -597,12 +597,12 @@ int VirtualFrame::InvalidateFrameSlotAt(int index) { elements_[new_backing_index] = FrameElement::RegisterElement(backing_reg, FrameElement::SYNCED, - original.number_info()); + original.type_info()); } else { elements_[new_backing_index] = FrameElement::RegisterElement(backing_reg, FrameElement::NOT_SYNCED, - original.number_info()); + original.type_info()); } // Update the other copies. for (int i = new_backing_index + 1; i < element_count(); i++) { @@ -634,7 +634,7 @@ void VirtualFrame::TakeFrameSlotAt(int index) { FrameElement new_element = FrameElement::RegisterElement(fresh.reg(), FrameElement::NOT_SYNCED, - original.number_info()); + original.type_info()); Use(fresh.reg(), element_count()); elements_.Add(new_element); __ mov(fresh.reg(), Operand(ebp, fp_relative(index))); @@ -796,7 +796,7 @@ void VirtualFrame::UntaggedPushFrameSlotAt(int index) { FrameElement new_element = FrameElement::RegisterElement(fresh_reg, FrameElement::NOT_SYNCED, - original.number_info()); + original.type_info()); new_element.set_untagged_int32(true); Use(fresh_reg, element_count()); fresh.Unuse(); // BreakTarget does not handle a live Result well. @@ -808,7 +808,7 @@ void VirtualFrame::UntaggedPushFrameSlotAt(int index) { __ mov(fresh_reg, Operand(ebp, fp_relative(index))); } // Now convert the value to int32, or bail out. - if (original.number_info().IsSmi()) { + if (original.type_info().IsSmi()) { __ SmiUntag(fresh_reg); // Pushing the element is completely done. } else { @@ -819,7 +819,7 @@ void VirtualFrame::UntaggedPushFrameSlotAt(int index) { __ jmp(&done); __ bind(¬_smi); - if (!original.number_info().IsNumber()) { + if (!original.type_info().IsNumber()) { __ cmp(FieldOperand(fresh_reg, HeapObject::kMapOffset), Factory::heap_number_map()); cgen()->unsafe_bailout_->Branch(not_equal); @@ -1040,18 +1040,23 @@ Result VirtualFrame::CallKeyedStoreIC() { PrepareForCall(0, 0); if (!cgen()->allocator()->is_used(eax) || (value.is_register() && value.reg().is(eax))) { - value.ToRegister(eax); // No effect if value is in eax already. + if (!cgen()->allocator()->is_used(eax)) { + value.ToRegister(eax); + } MoveResultsToRegisters(&key, &receiver, ecx, edx); value.Unuse(); } else if (!cgen()->allocator()->is_used(ecx) || (key.is_register() && key.reg().is(ecx))) { - // Receiver and/or key are in eax. - key.ToRegister(ecx); + if (!cgen()->allocator()->is_used(ecx)) { + key.ToRegister(ecx); + } MoveResultsToRegisters(&value, &receiver, eax, edx); key.Unuse(); } else if (!cgen()->allocator()->is_used(edx) || (receiver.is_register() && receiver.reg().is(edx))) { - receiver.ToRegister(edx); + if (!cgen()->allocator()->is_used(edx)) { + receiver.ToRegister(edx); + } MoveResultsToRegisters(&key, &value, ecx, eax); receiver.Unuse(); } else { @@ -1146,11 +1151,11 @@ Result VirtualFrame::Pop() { ASSERT(element.is_untagged_int32() == cgen()->in_safe_int32_mode()); // Get number type information of the result. - NumberInfo info; + TypeInfo info; if (!element.is_copy()) { - info = element.number_info(); + info = element.type_info(); } else { - info = elements_[element.index()].number_info(); + info = elements_[element.index()].type_info(); } bool pop_needed = (stack_pointer_ == index); @@ -1160,7 +1165,7 @@ Result VirtualFrame::Pop() { Result temp = cgen()->allocator()->Allocate(); ASSERT(temp.is_valid()); __ pop(temp.reg()); - temp.set_number_info(info); + temp.set_type_info(info); temp.set_untagged_int32(element.is_untagged_int32()); return temp; } @@ -1193,7 +1198,7 @@ Result VirtualFrame::Pop() { FrameElement new_element = FrameElement::RegisterElement(temp.reg(), FrameElement::SYNCED, - element.number_info()); + element.type_info()); // Preserve the copy flag on the element. if (element.is_copied()) new_element.set_copied(); elements_[index] = new_element; @@ -1228,7 +1233,7 @@ void VirtualFrame::EmitPop(Operand operand) { } -void VirtualFrame::EmitPush(Register reg, NumberInfo info) { +void VirtualFrame::EmitPush(Register reg, TypeInfo info) { ASSERT(stack_pointer_ == element_count() - 1); elements_.Add(FrameElement::MemoryElement(info)); stack_pointer_++; @@ -1236,7 +1241,7 @@ void VirtualFrame::EmitPush(Register reg, NumberInfo info) { } -void VirtualFrame::EmitPush(Operand operand, NumberInfo info) { +void VirtualFrame::EmitPush(Operand operand, TypeInfo info) { ASSERT(stack_pointer_ == element_count() - 1); elements_.Add(FrameElement::MemoryElement(info)); stack_pointer_++; @@ -1244,7 +1249,7 @@ void VirtualFrame::EmitPush(Operand operand, NumberInfo info) { } -void VirtualFrame::EmitPush(Immediate immediate, NumberInfo info) { +void VirtualFrame::EmitPush(Immediate immediate, TypeInfo info) { ASSERT(stack_pointer_ == element_count() - 1); elements_.Add(FrameElement::MemoryElement(info)); stack_pointer_++; diff --git a/deps/v8/src/ia32/virtual-frame-ia32.h b/deps/v8/src/ia32/virtual-frame-ia32.h index e622cdfe0a..9b6892a51b 100644 --- a/deps/v8/src/ia32/virtual-frame-ia32.h +++ b/deps/v8/src/ia32/virtual-frame-ia32.h @@ -28,7 +28,7 @@ #ifndef V8_IA32_VIRTUAL_FRAME_IA32_H_ #define V8_IA32_VIRTUAL_FRAME_IA32_H_ -#include "number-info.h" +#include "type-info.h" #include "register-allocator.h" #include "scopes.h" @@ -84,7 +84,7 @@ class VirtualFrame: public ZoneObject { // Create a duplicate of an existing valid frame element. FrameElement CopyElementAt(int index, - NumberInfo info = NumberInfo::Uninitialized()); + TypeInfo info = TypeInfo::Uninitialized()); // The number of elements on the virtual frame. int element_count() { return elements_.length(); } @@ -138,7 +138,7 @@ class VirtualFrame: public ZoneObject { void ForgetElements(int count); // Spill all values from the frame to memory. - void SpillAll(); + inline void SpillAll(); // Spill all occurrences of a specific register from the frame. void Spill(Register reg) { @@ -199,7 +199,7 @@ class VirtualFrame: public ZoneObject { // Prepare for returning from the frame by spilling locals. This // avoids generating unnecessary merge code when jumping to the // shared return site. Emits code for spills. - void PrepareForReturn(); + inline void PrepareForReturn(); // Number of local variables after when we use a loop for allocating. static const int kLocalVarBound = 10; @@ -398,14 +398,14 @@ class VirtualFrame: public ZoneObject { // Push an element on top of the expression stack and emit a // corresponding push instruction. void EmitPush(Register reg, - NumberInfo info = NumberInfo::Unknown()); + TypeInfo info = TypeInfo::Unknown()); void EmitPush(Operand operand, - NumberInfo info = NumberInfo::Unknown()); + TypeInfo info = TypeInfo::Unknown()); void EmitPush(Immediate immediate, - NumberInfo info = NumberInfo::Unknown()); + TypeInfo info = TypeInfo::Unknown()); // Push an element on the virtual frame. - inline void Push(Register reg, NumberInfo info = NumberInfo::Unknown()); + inline void Push(Register reg, TypeInfo info = TypeInfo::Unknown()); inline void Push(Handle<Object> value); inline void Push(Smi* value); @@ -417,7 +417,7 @@ class VirtualFrame: public ZoneObject { // This assert will trigger if you try to push the same value twice. ASSERT(result->is_valid()); if (result->is_register()) { - Push(result->reg(), result->number_info()); + Push(result->reg(), result->type_info()); } else { ASSERT(result->is_constant()); Push(result->handle()); @@ -447,8 +447,8 @@ class VirtualFrame: public ZoneObject { } // Update the type information of a variable frame element directly. - inline void SetTypeForLocalAt(int index, NumberInfo info); - inline void SetTypeForParamAt(int index, NumberInfo info); + inline void SetTypeForLocalAt(int index, TypeInfo info); + inline void SetTypeForParamAt(int index, TypeInfo info); private: static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset; diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc index 2b97a8bf78..a6d2020b44 100644 --- a/deps/v8/src/ic.cc +++ b/deps/v8/src/ic.cc @@ -436,7 +436,7 @@ Object* CallIC::LoadFunction(State state, } // Lookup is valid: Update inline cache and stub cache. - if (FLAG_use_ic && lookup.IsLoaded()) { + if (FLAG_use_ic) { UpdateCaches(&lookup, state, object, name); } @@ -484,7 +484,6 @@ void CallIC::UpdateCaches(LookupResult* lookup, State state, Handle<Object> object, Handle<String> name) { - ASSERT(lookup->IsLoaded()); // Bail out if we didn't find a result. if (!lookup->IsProperty() || !lookup->IsCacheable()) return; @@ -647,7 +646,6 @@ Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) { FLAG_use_ic && state == PREMONOMORPHIC && lookup.IsProperty() && - lookup.IsLoaded() && lookup.IsCacheable() && lookup.holder() == *object && lookup.type() == FIELD && @@ -669,7 +667,7 @@ Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) { } // Update inline cache and stub cache. - if (FLAG_use_ic && lookup.IsLoaded()) { + if (FLAG_use_ic) { UpdateCaches(&lookup, state, object, name); } @@ -695,7 +693,6 @@ void LoadIC::UpdateCaches(LookupResult* lookup, State state, Handle<Object> object, Handle<String> name) { - ASSERT(lookup->IsLoaded()); // Bail out if we didn't find a result. if (!lookup->IsProperty() || !lookup->IsCacheable()) return; @@ -857,7 +854,7 @@ Object* KeyedLoadIC::Load(State state, } } - if (FLAG_use_ic && lookup.IsLoaded()) { + if (FLAG_use_ic) { UpdateCaches(&lookup, state, object, name); } @@ -912,7 +909,6 @@ Object* KeyedLoadIC::Load(State state, void KeyedLoadIC::UpdateCaches(LookupResult* lookup, State state, Handle<Object> object, Handle<String> name) { - ASSERT(lookup->IsLoaded()); // Bail out if we didn't find a result. if (!lookup->IsProperty() || !lookup->IsCacheable()) return; @@ -993,8 +989,6 @@ static bool StoreICableLookup(LookupResult* lookup) { // state. if (lookup->IsReadOnly()) return false; - if (!lookup->IsLoaded()) return false; - return true; } @@ -1073,7 +1067,6 @@ void StoreIC::UpdateCaches(LookupResult* lookup, Handle<JSObject> receiver, Handle<String> name, Handle<Object> value) { - ASSERT(lookup->IsLoaded()); // Skip JSGlobalProxy. ASSERT(!receiver->IsJSGlobalProxy()); @@ -1181,7 +1174,7 @@ Object* KeyedStoreIC::Store(State state, receiver->LocalLookup(*name, &lookup); // Update inline cache and stub cache. - if (FLAG_use_ic && lookup.IsLoaded()) { + if (FLAG_use_ic) { UpdateCaches(&lookup, state, receiver, name, value); } @@ -1215,8 +1208,6 @@ void KeyedStoreIC::UpdateCaches(LookupResult* lookup, Handle<JSObject> receiver, Handle<String> name, Handle<Object> value) { - ASSERT(lookup->IsLoaded()); - // Skip JSGlobalProxy. if (receiver->IsJSGlobalProxy()) return; diff --git a/deps/v8/src/json.js b/deps/v8/src/json.js new file mode 100644 index 0000000000..3e42d36609 --- /dev/null +++ b/deps/v8/src/json.js @@ -0,0 +1,268 @@ +// Copyright 2009 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +var $JSON = global.JSON; + +function ParseJSONUnfiltered(text) { + var s = $String(text); + var f = %CompileString(text, true); + return f(); +} + +function Revive(holder, name, reviver) { + var val = holder[name]; + if (IS_OBJECT(val)) { + if (IS_ARRAY(val)) { + var length = val.length; + for (var i = 0; i < length; i++) { + var newElement = Revive(val, $String(i), reviver); + val[i] = newElement; + } + } else { + for (var p in val) { + if (ObjectHasOwnProperty.call(val, p)) { + var newElement = Revive(val, p, reviver); + if (IS_UNDEFINED(newElement)) { + delete val[p]; + } else { + val[p] = newElement; + } + } + } + } + } + return reviver.call(holder, name, val); +} + +function JSONParse(text, reviver) { + var unfiltered = ParseJSONUnfiltered(text); + if (IS_FUNCTION(reviver)) { + return Revive({'': unfiltered}, '', reviver); + } else { + return unfiltered; + } +} + +var characterQuoteCache = { + '\"': '\\"', + '\\': '\\\\', + '/': '\\/', + '\b': '\\b', + '\f': '\\f', + '\n': '\\n', + '\r': '\\r', + '\t': '\\t', + '\x0B': '\\u000b' +}; + +function QuoteSingleJSONCharacter(c) { + if (c in characterQuoteCache) { + return characterQuoteCache[c]; + } + var charCode = c.charCodeAt(0); + var result; + if (charCode < 16) result = '\\u000'; + else if (charCode < 256) result = '\\u00'; + else if (charCode < 4096) result = '\\u0'; + else result = '\\u'; + result += charCode.toString(16); + characterQuoteCache[c] = result; + return result; +} + +function QuoteJSONString(str) { + var quotable = /[\\\"\x00-\x1f\x80-\uffff]/g; + return '"' + str.replace(quotable, QuoteSingleJSONCharacter) + '"'; +} + +function StackContains(stack, val) { + var length = stack.length; + for (var i = 0; i < length; i++) { + if (stack[i] === val) { + return true; + } + } + return false; +} + +function SerializeArray(value, replacer, stack, indent, gap) { + if (StackContains(stack, value)) { + throw MakeTypeError('circular_structure', []); + } + stack.push(value); + var stepback = indent; + indent += gap; + var partial = []; + var len = value.length; + for (var i = 0; i < len; i++) { + var strP = JSONSerialize($String(i), value, replacer, stack, + indent, gap); + if (IS_UNDEFINED(strP)) { + strP = "null"; + } + partial.push(strP); + } + var final; + if (gap == "") { + final = "[" + partial.join(",") + "]"; + } else if (partial.length > 0) { + var separator = ",\n" + indent; + final = "[\n" + indent + partial.join(separator) + "\n" + + stepback + "]"; + } else { + final = "[]"; + } + stack.pop(); + return final; +} + +function SerializeObject(value, replacer, stack, indent, gap) { + if (StackContains(stack, value)) { + throw MakeTypeError('circular_structure', []); + } + stack.push(value); + var stepback = indent; + indent += gap; + var partial = []; + if (IS_ARRAY(replacer)) { + var length = replacer.length; + for (var i = 0; i < length; i++) { + if (ObjectHasOwnProperty.call(replacer, i)) { + var p = replacer[i]; + var strP = JSONSerialize(p, value, replacer, stack, indent, gap); + if (!IS_UNDEFINED(strP)) { + var member = QuoteJSONString(p) + ":"; + if (gap != "") member += " "; + member += strP; + partial.push(member); + } + } + } + } else { + for (var p in value) { + if (ObjectHasOwnProperty.call(value, p)) { + var strP = JSONSerialize(p, value, replacer, stack, indent, gap); + if (!IS_UNDEFINED(strP)) { + var member = QuoteJSONString(p) + ":"; + if (gap != "") member += " "; + member += strP; + partial.push(member); + } + } + } + } + var final; + if (gap == "") { + final = "{" + partial.join(",") + "}"; + } else if (partial.length > 0) { + var separator = ",\n" + indent; + final = "{\n" + indent + partial.join(separator) + "\n" + + stepback + "}"; + } else { + final = "{}"; + } + stack.pop(); + return final; +} + +function JSONSerialize(key, holder, replacer, stack, indent, gap) { + var value = holder[key]; + if (IS_OBJECT(value) && value) { + var toJSON = value.toJSON; + if (IS_FUNCTION(toJSON)) { + value = toJSON.call(value, key); + } + } + if (IS_FUNCTION(replacer)) { + value = replacer.call(holder, key, value); + } + // Unwrap value if necessary + if (IS_OBJECT(value)) { + if (IS_NUMBER_WRAPPER(value)) { + value = $Number(value); + } else if (IS_STRING_WRAPPER(value)) { + value = $String(value); + } else if (IS_BOOLEAN_WRAPPER(value)) { + value = $Boolean(value); + } + } + switch (typeof value) { + case "string": + return QuoteJSONString(value); + case "object": + if (!value) { + return "null"; + } else if (IS_ARRAY(value)) { + return SerializeArray(value, replacer, stack, indent, gap); + } else { + return SerializeObject(value, replacer, stack, indent, gap); + } + case "number": + return $isFinite(value) ? $String(value) : "null"; + case "boolean": + return value ? "true" : "false"; + } +} + +function JSONStringify(value, replacer, space) { + var stack = []; + var indent = ""; + if (IS_OBJECT(space)) { + // Unwrap 'space' if it is wrapped + if (IS_NUMBER_WRAPPER(space)) { + space = $Number(space); + } else if (IS_STRING_WRAPPER(space)) { + space = $String(space); + } + } + var gap; + if (IS_NUMBER(space)) { + space = $Math.min(space, 10); + gap = ""; + for (var i = 0; i < space; i++) { + gap += " "; + } + } else if (IS_STRING(space)) { + if (space.length > 10) { + gap = space.substring(0, 10); + } else { + gap = space; + } + } else { + gap = ""; + } + return JSONSerialize('', {'': value}, replacer, stack, indent, gap); +} + +function SetupJSON() { + InstallFunctions($JSON, DONT_ENUM, $Array( + "parse", JSONParse, + "stringify", JSONStringify + )); +} + +SetupJSON(); diff --git a/deps/v8/src/jsregexp.cc b/deps/v8/src/jsregexp.cc index f3c801c7e0..dfd9ef64fd 100644 --- a/deps/v8/src/jsregexp.cc +++ b/deps/v8/src/jsregexp.cc @@ -66,11 +66,6 @@ Handle<Object> RegExpImpl::CreateRegExpLiteral(Handle<JSFunction> constructor, Handle<String> pattern, Handle<String> flags, bool* has_pending_exception) { - // Ensure that the constructor function has been loaded. - if (!constructor->IsLoaded()) { - LoadLazy(constructor, has_pending_exception); - if (*has_pending_exception) return Handle<Object>(); - } // Call the construct code with 2 arguments. Object** argv[2] = { Handle<Object>::cast(pattern).location(), Handle<Object>::cast(flags).location() }; @@ -4992,7 +4987,9 @@ int AssertionNode::ComputeFirstCharacterSet(int budget) { case AFTER_WORD_CHARACTER: { ASSERT_NOT_NULL(on_success()); budget = on_success()->ComputeFirstCharacterSet(budget); - set_first_character_set(on_success()->first_character_set()); + if (budget >= 0) { + set_first_character_set(on_success()->first_character_set()); + } break; } } @@ -5018,6 +5015,10 @@ int ActionNode::ComputeFirstCharacterSet(int budget) { int BackReferenceNode::ComputeFirstCharacterSet(int budget) { // We don't know anything about the first character of a backreference // at this point. + // The potential first characters are the first characters of the capture, + // and the first characters of the on_success node, depending on whether the + // capture can be empty and whether it is known to be participating or known + // not to be. return kComputeFirstCharacterSetFail; } @@ -5037,8 +5038,11 @@ int TextNode::ComputeFirstCharacterSet(int budget) { } else { ASSERT(text.type == TextElement::CHAR_CLASS); RegExpCharacterClass* char_class = text.data.u_char_class; + ZoneList<CharacterRange>* ranges = char_class->ranges(); + // TODO(lrn): Canonicalize ranges when they are created + // instead of waiting until now. + CharacterRange::Canonicalize(ranges); if (char_class->is_negated()) { - ZoneList<CharacterRange>* ranges = char_class->ranges(); int length = ranges->length(); int new_length = length + 1; if (length > 0) { @@ -5052,7 +5056,7 @@ int TextNode::ComputeFirstCharacterSet(int budget) { CharacterRange::Negate(ranges, negated_ranges); set_first_character_set(negated_ranges); } else { - set_first_character_set(char_class->ranges()); + set_first_character_set(ranges); } } } diff --git a/deps/v8/src/jump-target-heavy-inl.h b/deps/v8/src/jump-target-heavy-inl.h new file mode 100644 index 0000000000..0a2a5691f5 --- /dev/null +++ b/deps/v8/src/jump-target-heavy-inl.h @@ -0,0 +1,51 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_JUMP_TARGET_HEAVY_INL_H_ +#define V8_JUMP_TARGET_HEAVY_INL_H_ + +#include "virtual-frame-inl.h" + +namespace v8 { +namespace internal { + +void JumpTarget::InitializeEntryElement(int index, FrameElement* target) { + FrameElement* element = &entry_frame_->elements_[index]; + element->clear_copied(); + if (target->is_register()) { + entry_frame_->set_register_location(target->reg(), index); + } else if (target->is_copy()) { + entry_frame_->elements_[target->index()].set_copied(); + } + if (direction_ == BIDIRECTIONAL && !target->is_copy()) { + element->set_type_info(TypeInfo::Unknown()); + } +} + +} } // namespace v8::internal + +#endif // V8_JUMP_TARGET_HEAVY_INL_H_ diff --git a/deps/v8/src/jump-target-heavy.cc b/deps/v8/src/jump-target-heavy.cc new file mode 100644 index 0000000000..85620a2d96 --- /dev/null +++ b/deps/v8/src/jump-target-heavy.cc @@ -0,0 +1,363 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "v8.h" + +#include "codegen-inl.h" +#include "jump-target-inl.h" +#include "register-allocator-inl.h" + +namespace v8 { +namespace internal { + + +void JumpTarget::Jump(Result* arg) { + ASSERT(cgen()->has_valid_frame()); + + cgen()->frame()->Push(arg); + DoJump(); +} + + +void JumpTarget::Branch(Condition cc, Result* arg, Hint hint) { + ASSERT(cgen()->has_valid_frame()); + + // We want to check that non-frame registers at the call site stay in + // the same registers on the fall-through branch. +#ifdef DEBUG + Result::Type arg_type = arg->type(); + Register arg_reg = arg->is_register() ? arg->reg() : no_reg; +#endif + + cgen()->frame()->Push(arg); + DoBranch(cc, hint); + *arg = cgen()->frame()->Pop(); + + ASSERT(arg->type() == arg_type); + ASSERT(!arg->is_register() || arg->reg().is(arg_reg)); +} + + +void JumpTarget::Branch(Condition cc, Result* arg0, Result* arg1, Hint hint) { + ASSERT(cgen()->has_valid_frame()); + + // We want to check that non-frame registers at the call site stay in + // the same registers on the fall-through branch. +#ifdef DEBUG + Result::Type arg0_type = arg0->type(); + Register arg0_reg = arg0->is_register() ? arg0->reg() : no_reg; + Result::Type arg1_type = arg1->type(); + Register arg1_reg = arg1->is_register() ? arg1->reg() : no_reg; +#endif + + cgen()->frame()->Push(arg0); + cgen()->frame()->Push(arg1); + DoBranch(cc, hint); + *arg1 = cgen()->frame()->Pop(); + *arg0 = cgen()->frame()->Pop(); + + ASSERT(arg0->type() == arg0_type); + ASSERT(!arg0->is_register() || arg0->reg().is(arg0_reg)); + ASSERT(arg1->type() == arg1_type); + ASSERT(!arg1->is_register() || arg1->reg().is(arg1_reg)); +} + + +void BreakTarget::Branch(Condition cc, Result* arg, Hint hint) { + ASSERT(cgen()->has_valid_frame()); + + int count = cgen()->frame()->height() - expected_height_; + if (count > 0) { + // We negate and branch here rather than using DoBranch's negate + // and branch. This gives us a hook to remove statement state + // from the frame. + JumpTarget fall_through; + // Branch to fall through will not negate, because it is a + // forward-only target. + fall_through.Branch(NegateCondition(cc), NegateHint(hint)); + Jump(arg); // May emit merge code here. + fall_through.Bind(); + } else { +#ifdef DEBUG + Result::Type arg_type = arg->type(); + Register arg_reg = arg->is_register() ? arg->reg() : no_reg; +#endif + cgen()->frame()->Push(arg); + DoBranch(cc, hint); + *arg = cgen()->frame()->Pop(); + ASSERT(arg->type() == arg_type); + ASSERT(!arg->is_register() || arg->reg().is(arg_reg)); + } +} + + +void JumpTarget::Bind(Result* arg) { + if (cgen()->has_valid_frame()) { + cgen()->frame()->Push(arg); + } + DoBind(); + *arg = cgen()->frame()->Pop(); +} + + +void JumpTarget::Bind(Result* arg0, Result* arg1) { + if (cgen()->has_valid_frame()) { + cgen()->frame()->Push(arg0); + cgen()->frame()->Push(arg1); + } + DoBind(); + *arg1 = cgen()->frame()->Pop(); + *arg0 = cgen()->frame()->Pop(); +} + + +void JumpTarget::ComputeEntryFrame() { + // Given: a collection of frames reaching by forward CFG edges and + // the directionality of the block. Compute: an entry frame for the + // block. + + Counters::compute_entry_frame.Increment(); +#ifdef DEBUG + if (compiling_deferred_code_) { + ASSERT(reaching_frames_.length() > 1); + VirtualFrame* frame = reaching_frames_[0]; + bool all_identical = true; + for (int i = 1; i < reaching_frames_.length(); i++) { + if (!frame->Equals(reaching_frames_[i])) { + all_identical = false; + break; + } + } + ASSERT(!all_identical || all_identical); + } +#endif + + // Choose an initial frame. + VirtualFrame* initial_frame = reaching_frames_[0]; + + // A list of pointers to frame elements in the entry frame. NULL + // indicates that the element has not yet been determined. + int length = initial_frame->element_count(); + ZoneList<FrameElement*> elements(length); + + // Initially populate the list of elements based on the initial + // frame. + for (int i = 0; i < length; i++) { + FrameElement element = initial_frame->elements_[i]; + // We do not allow copies or constants in bidirectional frames. + if (direction_ == BIDIRECTIONAL) { + if (element.is_constant() || element.is_copy()) { + elements.Add(NULL); + continue; + } + } + elements.Add(&initial_frame->elements_[i]); + } + + // Compute elements based on the other reaching frames. + if (reaching_frames_.length() > 1) { + for (int i = 0; i < length; i++) { + FrameElement* element = elements[i]; + for (int j = 1; j < reaching_frames_.length(); j++) { + // Element computation is monotonic: new information will not + // change our decision about undetermined or invalid elements. + if (element == NULL || !element->is_valid()) break; + + FrameElement* other = &reaching_frames_[j]->elements_[i]; + element = element->Combine(other); + if (element != NULL && !element->is_copy()) { + ASSERT(other != NULL); + // We overwrite the number information of one of the incoming frames. + // This is safe because we only use the frame for emitting merge code. + // The number information of incoming frames is not used anymore. + element->set_type_info(TypeInfo::Combine(element->type_info(), + other->type_info())); + } + } + elements[i] = element; + } + } + + // Build the new frame. A freshly allocated frame has memory elements + // for the parameters and some platform-dependent elements (e.g., + // return address). Replace those first. + entry_frame_ = new VirtualFrame(); + int index = 0; + for (; index < entry_frame_->element_count(); index++) { + FrameElement* target = elements[index]; + // If the element is determined, set it now. Count registers. Mark + // elements as copied exactly when they have a copy. Undetermined + // elements are initially recorded as if in memory. + if (target != NULL) { + entry_frame_->elements_[index] = *target; + InitializeEntryElement(index, target); + } + } + // Then fill in the rest of the frame with new elements. + for (; index < length; index++) { + FrameElement* target = elements[index]; + if (target == NULL) { + entry_frame_->elements_.Add( + FrameElement::MemoryElement(TypeInfo::Uninitialized())); + } else { + entry_frame_->elements_.Add(*target); + InitializeEntryElement(index, target); + } + } + + // Allocate any still-undetermined frame elements to registers or + // memory, from the top down. + for (int i = length - 1; i >= 0; i--) { + if (elements[i] == NULL) { + // Loop over all the reaching frames to check whether the element + // is synced on all frames and to count the registers it occupies. + bool is_synced = true; + RegisterFile candidate_registers; + int best_count = kMinInt; + int best_reg_num = RegisterAllocator::kInvalidRegister; + TypeInfo info = TypeInfo::Uninitialized(); + + for (int j = 0; j < reaching_frames_.length(); j++) { + FrameElement element = reaching_frames_[j]->elements_[i]; + if (direction_ == BIDIRECTIONAL) { + info = TypeInfo::Unknown(); + } else if (!element.is_copy()) { + info = TypeInfo::Combine(info, element.type_info()); + } else { + // New elements will not be copies, so get number information from + // backing element in the reaching frame. + info = TypeInfo::Combine(info, + reaching_frames_[j]->elements_[element.index()].type_info()); + } + is_synced = is_synced && element.is_synced(); + if (element.is_register() && !entry_frame_->is_used(element.reg())) { + // Count the register occurrence and remember it if better + // than the previous best. + int num = RegisterAllocator::ToNumber(element.reg()); + candidate_registers.Use(num); + if (candidate_registers.count(num) > best_count) { + best_count = candidate_registers.count(num); + best_reg_num = num; + } + } + } + + // We must have a number type information now (not for copied elements). + ASSERT(entry_frame_->elements_[i].is_copy() + || !info.IsUninitialized()); + + // If the value is synced on all frames, put it in memory. This + // costs nothing at the merge code but will incur a + // memory-to-register move when the value is needed later. + if (is_synced) { + // Already recorded as a memory element. + // Set combined number info. + entry_frame_->elements_[i].set_type_info(info); + continue; + } + + // Try to put it in a register. If there was no best choice + // consider any free register. + if (best_reg_num == RegisterAllocator::kInvalidRegister) { + for (int j = 0; j < RegisterAllocator::kNumRegisters; j++) { + if (!entry_frame_->is_used(j)) { + best_reg_num = j; + break; + } + } + } + + if (best_reg_num != RegisterAllocator::kInvalidRegister) { + // If there was a register choice, use it. Preserve the copied + // flag on the element. + bool is_copied = entry_frame_->elements_[i].is_copied(); + Register reg = RegisterAllocator::ToRegister(best_reg_num); + entry_frame_->elements_[i] = + FrameElement::RegisterElement(reg, FrameElement::NOT_SYNCED, + TypeInfo::Uninitialized()); + if (is_copied) entry_frame_->elements_[i].set_copied(); + entry_frame_->set_register_location(reg, i); + } + // Set combined number info. + entry_frame_->elements_[i].set_type_info(info); + } + } + + // If we have incoming backward edges assert we forget all number information. +#ifdef DEBUG + if (direction_ == BIDIRECTIONAL) { + for (int i = 0; i < length; ++i) { + if (!entry_frame_->elements_[i].is_copy()) { + ASSERT(entry_frame_->elements_[i].type_info().IsUnknown()); + } + } + } +#endif + + // The stack pointer is at the highest synced element or the base of + // the expression stack. + int stack_pointer = length - 1; + while (stack_pointer >= entry_frame_->expression_base_index() && + !entry_frame_->elements_[stack_pointer].is_synced()) { + stack_pointer--; + } + entry_frame_->stack_pointer_ = stack_pointer; +} + + +DeferredCode::DeferredCode() + : masm_(CodeGeneratorScope::Current()->masm()), + statement_position_(masm_->current_statement_position()), + position_(masm_->current_position()) { + ASSERT(statement_position_ != RelocInfo::kNoPosition); + ASSERT(position_ != RelocInfo::kNoPosition); + + CodeGeneratorScope::Current()->AddDeferred(this); +#ifdef DEBUG + comment_ = ""; +#endif + + // Copy the register locations from the code generator's frame. + // These are the registers that will be spilled on entry to the + // deferred code and restored on exit. + VirtualFrame* frame = CodeGeneratorScope::Current()->frame(); + int sp_offset = frame->fp_relative(frame->stack_pointer_); + for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) { + int loc = frame->register_location(i); + if (loc == VirtualFrame::kIllegalIndex) { + registers_[i] = kIgnore; + } else if (frame->elements_[loc].is_synced()) { + // Needs to be restored on exit but not saved on entry. + registers_[i] = frame->fp_relative(loc) | kSyncedFlag; + } else { + int offset = frame->fp_relative(loc); + registers_[i] = (offset < sp_offset) ? kPush : offset; + } + } +} + +} } // namespace v8::internal diff --git a/deps/v8/src/jump-target-inl.h b/deps/v8/src/jump-target-inl.h index 6db0081536..4c9ee5bc43 100644 --- a/deps/v8/src/jump-target-inl.h +++ b/deps/v8/src/jump-target-inl.h @@ -30,6 +30,12 @@ #include "virtual-frame-inl.h" +#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 +#include "jump-target-heavy-inl.h" +#else +#include "jump-target-light-inl.h" +#endif + namespace v8 { namespace internal { @@ -37,19 +43,6 @@ CodeGenerator* JumpTarget::cgen() { return CodeGeneratorScope::Current(); } -void JumpTarget::InitializeEntryElement(int index, FrameElement* target) { - FrameElement* element = &entry_frame_->elements_[index]; - element->clear_copied(); - if (target->is_register()) { - entry_frame_->set_register_location(target->reg(), index); - } else if (target->is_copy()) { - entry_frame_->elements_[target->index()].set_copied(); - } - if (direction_ == BIDIRECTIONAL && !target->is_copy()) { - element->set_number_info(NumberInfo::Unknown()); - } -} - } } // namespace v8::internal #endif // V8_JUMP_TARGET_INL_H_ diff --git a/deps/v8/src/jump-target-light-inl.h b/deps/v8/src/jump-target-light-inl.h new file mode 100644 index 0000000000..8d6c3ac516 --- /dev/null +++ b/deps/v8/src/jump-target-light-inl.h @@ -0,0 +1,42 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_JUMP_TARGET_LIGHT_INL_H_ +#define V8_JUMP_TARGET_LIGHT_INL_H_ + +#include "virtual-frame-inl.h" + +namespace v8 { +namespace internal { + +void JumpTarget::InitializeEntryElement(int index, FrameElement* target) { + UNIMPLEMENTED(); +} + +} } // namespace v8::internal + +#endif // V8_JUMP_TARGET_LIGHT_INL_H_ diff --git a/deps/v8/src/jump-target-light.cc b/deps/v8/src/jump-target-light.cc new file mode 100644 index 0000000000..098d97deef --- /dev/null +++ b/deps/v8/src/jump-target-light.cc @@ -0,0 +1,99 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "v8.h" + +#include "codegen-inl.h" +#include "jump-target-inl.h" + +namespace v8 { +namespace internal { + + +void JumpTarget::Jump(Result* arg) { + UNIMPLEMENTED(); +} + + +void JumpTarget::Branch(Condition cc, Result* arg, Hint hint) { + UNIMPLEMENTED(); +} + + +void JumpTarget::Branch(Condition cc, Result* arg0, Result* arg1, Hint hint) { + UNIMPLEMENTED(); +} + + +void BreakTarget::Branch(Condition cc, Result* arg, Hint hint) { + UNIMPLEMENTED(); +} + + +void JumpTarget::Bind(Result* arg) { + UNIMPLEMENTED(); +} + + +void JumpTarget::Bind(Result* arg0, Result* arg1) { + UNIMPLEMENTED(); +} + + +void JumpTarget::ComputeEntryFrame() { + UNIMPLEMENTED(); +} + + +DeferredCode::DeferredCode() + : masm_(CodeGeneratorScope::Current()->masm()), + statement_position_(masm_->current_statement_position()), + position_(masm_->current_position()) { + ASSERT(statement_position_ != RelocInfo::kNoPosition); + ASSERT(position_ != RelocInfo::kNoPosition); + + CodeGeneratorScope::Current()->AddDeferred(this); +#ifdef DEBUG + comment_ = ""; +#endif + + // Copy the register locations from the code generator's frame. + // These are the registers that will be spilled on entry to the + // deferred code and restored on exit. + VirtualFrame* frame = CodeGeneratorScope::Current()->frame(); + for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) { + int loc = frame->register_location(i); + if (loc == VirtualFrame::kIllegalIndex) { + registers_[i] = kIgnore; + } else { + // Needs to be restored on exit but not saved on entry. + registers_[i] = frame->fp_relative(loc) | kSyncedFlag; + } + } +} + +} } // namespace v8::internal diff --git a/deps/v8/src/jump-target.cc b/deps/v8/src/jump-target.cc index 7b1ced7eb3..8b2999549e 100644 --- a/deps/v8/src/jump-target.cc +++ b/deps/v8/src/jump-target.cc @@ -48,289 +48,21 @@ void JumpTarget::Unuse() { } -void JumpTarget::ComputeEntryFrame() { - // Given: a collection of frames reaching by forward CFG edges and - // the directionality of the block. Compute: an entry frame for the - // block. - - Counters::compute_entry_frame.Increment(); -#ifdef DEBUG - if (compiling_deferred_code_) { - ASSERT(reaching_frames_.length() > 1); - VirtualFrame* frame = reaching_frames_[0]; - bool all_identical = true; - for (int i = 1; i < reaching_frames_.length(); i++) { - if (!frame->Equals(reaching_frames_[i])) { - all_identical = false; - break; - } - } - ASSERT(!all_identical || all_identical); - } -#endif - - // Choose an initial frame. - VirtualFrame* initial_frame = reaching_frames_[0]; - - // A list of pointers to frame elements in the entry frame. NULL - // indicates that the element has not yet been determined. - int length = initial_frame->element_count(); - ZoneList<FrameElement*> elements(length); - - // Initially populate the list of elements based on the initial - // frame. - for (int i = 0; i < length; i++) { - FrameElement element = initial_frame->elements_[i]; - // We do not allow copies or constants in bidirectional frames. - if (direction_ == BIDIRECTIONAL) { - if (element.is_constant() || element.is_copy()) { - elements.Add(NULL); - continue; - } - } - elements.Add(&initial_frame->elements_[i]); - } - - // Compute elements based on the other reaching frames. - if (reaching_frames_.length() > 1) { - for (int i = 0; i < length; i++) { - FrameElement* element = elements[i]; - for (int j = 1; j < reaching_frames_.length(); j++) { - // Element computation is monotonic: new information will not - // change our decision about undetermined or invalid elements. - if (element == NULL || !element->is_valid()) break; - - FrameElement* other = &reaching_frames_[j]->elements_[i]; - element = element->Combine(other); - if (element != NULL && !element->is_copy()) { - ASSERT(other != NULL); - // We overwrite the number information of one of the incoming frames. - // This is safe because we only use the frame for emitting merge code. - // The number information of incoming frames is not used anymore. - element->set_number_info(NumberInfo::Combine(element->number_info(), - other->number_info())); - } - } - elements[i] = element; - } - } - - // Build the new frame. A freshly allocated frame has memory elements - // for the parameters and some platform-dependent elements (e.g., - // return address). Replace those first. - entry_frame_ = new VirtualFrame(); - int index = 0; - for (; index < entry_frame_->element_count(); index++) { - FrameElement* target = elements[index]; - // If the element is determined, set it now. Count registers. Mark - // elements as copied exactly when they have a copy. Undetermined - // elements are initially recorded as if in memory. - if (target != NULL) { - entry_frame_->elements_[index] = *target; - InitializeEntryElement(index, target); - } - } - // Then fill in the rest of the frame with new elements. - for (; index < length; index++) { - FrameElement* target = elements[index]; - if (target == NULL) { - entry_frame_->elements_.Add( - FrameElement::MemoryElement(NumberInfo::Uninitialized())); - } else { - entry_frame_->elements_.Add(*target); - InitializeEntryElement(index, target); - } - } - - // Allocate any still-undetermined frame elements to registers or - // memory, from the top down. - for (int i = length - 1; i >= 0; i--) { - if (elements[i] == NULL) { - // Loop over all the reaching frames to check whether the element - // is synced on all frames and to count the registers it occupies. - bool is_synced = true; - RegisterFile candidate_registers; - int best_count = kMinInt; - int best_reg_num = RegisterAllocator::kInvalidRegister; - NumberInfo info = NumberInfo::Uninitialized(); - - for (int j = 0; j < reaching_frames_.length(); j++) { - FrameElement element = reaching_frames_[j]->elements_[i]; - if (direction_ == BIDIRECTIONAL) { - info = NumberInfo::Unknown(); - } else if (!element.is_copy()) { - info = NumberInfo::Combine(info, element.number_info()); - } else { - // New elements will not be copies, so get number information from - // backing element in the reaching frame. - info = NumberInfo::Combine(info, - reaching_frames_[j]->elements_[element.index()].number_info()); - } - is_synced = is_synced && element.is_synced(); - if (element.is_register() && !entry_frame_->is_used(element.reg())) { - // Count the register occurrence and remember it if better - // than the previous best. - int num = RegisterAllocator::ToNumber(element.reg()); - candidate_registers.Use(num); - if (candidate_registers.count(num) > best_count) { - best_count = candidate_registers.count(num); - best_reg_num = num; - } - } - } - - // We must have a number type information now (not for copied elements). - ASSERT(entry_frame_->elements_[i].is_copy() - || !info.IsUninitialized()); - - // If the value is synced on all frames, put it in memory. This - // costs nothing at the merge code but will incur a - // memory-to-register move when the value is needed later. - if (is_synced) { - // Already recorded as a memory element. - // Set combined number info. - entry_frame_->elements_[i].set_number_info(info); - continue; - } - - // Try to put it in a register. If there was no best choice - // consider any free register. - if (best_reg_num == RegisterAllocator::kInvalidRegister) { - for (int j = 0; j < RegisterAllocator::kNumRegisters; j++) { - if (!entry_frame_->is_used(j)) { - best_reg_num = j; - break; - } - } - } - - if (best_reg_num != RegisterAllocator::kInvalidRegister) { - // If there was a register choice, use it. Preserve the copied - // flag on the element. - bool is_copied = entry_frame_->elements_[i].is_copied(); - Register reg = RegisterAllocator::ToRegister(best_reg_num); - entry_frame_->elements_[i] = - FrameElement::RegisterElement(reg, FrameElement::NOT_SYNCED, - NumberInfo::Uninitialized()); - if (is_copied) entry_frame_->elements_[i].set_copied(); - entry_frame_->set_register_location(reg, i); - } - // Set combined number info. - entry_frame_->elements_[i].set_number_info(info); - } - } - - // If we have incoming backward edges assert we forget all number information. -#ifdef DEBUG - if (direction_ == BIDIRECTIONAL) { - for (int i = 0; i < length; ++i) { - if (!entry_frame_->elements_[i].is_copy()) { - ASSERT(entry_frame_->elements_[i].number_info().IsUnknown()); - } - } - } -#endif - - // The stack pointer is at the highest synced element or the base of - // the expression stack. - int stack_pointer = length - 1; - while (stack_pointer >= entry_frame_->expression_base_index() && - !entry_frame_->elements_[stack_pointer].is_synced()) { - stack_pointer--; - } - entry_frame_->stack_pointer_ = stack_pointer; -} - - void JumpTarget::Jump() { DoJump(); } -void JumpTarget::Jump(Result* arg) { - ASSERT(cgen()->has_valid_frame()); - - cgen()->frame()->Push(arg); - DoJump(); -} - - void JumpTarget::Branch(Condition cc, Hint hint) { DoBranch(cc, hint); } -#ifdef DEBUG -#define DECLARE_ARGCHECK_VARS(name) \ - Result::Type name##_type = name->type(); \ - Register name##_reg = name->is_register() ? name->reg() : no_reg - -#define ASSERT_ARGCHECK(name) \ - ASSERT(name->type() == name##_type); \ - ASSERT(!name->is_register() || name->reg().is(name##_reg)) - -#else -#define DECLARE_ARGCHECK_VARS(name) do {} while (false) - -#define ASSERT_ARGCHECK(name) do {} while (false) -#endif - -void JumpTarget::Branch(Condition cc, Result* arg, Hint hint) { - ASSERT(cgen()->has_valid_frame()); - - // We want to check that non-frame registers at the call site stay in - // the same registers on the fall-through branch. - DECLARE_ARGCHECK_VARS(arg); - - cgen()->frame()->Push(arg); - DoBranch(cc, hint); - *arg = cgen()->frame()->Pop(); - - ASSERT_ARGCHECK(arg); -} - - -void BreakTarget::Branch(Condition cc, Result* arg, Hint hint) { - ASSERT(cgen()->has_valid_frame()); - - int count = cgen()->frame()->height() - expected_height_; - if (count > 0) { - // We negate and branch here rather than using DoBranch's negate - // and branch. This gives us a hook to remove statement state - // from the frame. - JumpTarget fall_through; - // Branch to fall through will not negate, because it is a - // forward-only target. - fall_through.Branch(NegateCondition(cc), NegateHint(hint)); - Jump(arg); // May emit merge code here. - fall_through.Bind(); - } else { - DECLARE_ARGCHECK_VARS(arg); - cgen()->frame()->Push(arg); - DoBranch(cc, hint); - *arg = cgen()->frame()->Pop(); - ASSERT_ARGCHECK(arg); - } -} - -#undef DECLARE_ARGCHECK_VARS -#undef ASSERT_ARGCHECK - - void JumpTarget::Bind() { DoBind(); } -void JumpTarget::Bind(Result* arg) { - if (cgen()->has_valid_frame()) { - cgen()->frame()->Push(arg); - } - DoBind(); - *arg = cgen()->frame()->Pop(); -} - - void JumpTarget::AddReachingFrame(VirtualFrame* frame) { ASSERT(reaching_frames_.length() == merge_labels_.length()); ASSERT(entry_frame_ == NULL); diff --git a/deps/v8/src/jump-target.h b/deps/v8/src/jump-target.h index db7c115538..db523b55ba 100644 --- a/deps/v8/src/jump-target.h +++ b/deps/v8/src/jump-target.h @@ -117,12 +117,17 @@ class JumpTarget : public ZoneObject { // Shadows are dynamically allocated. // the target and the fall-through. virtual void Branch(Condition cc, Hint hint = no_hint); virtual void Branch(Condition cc, Result* arg, Hint hint = no_hint); + virtual void Branch(Condition cc, + Result* arg0, + Result* arg1, + Hint hint = no_hint); // Bind a jump target. If there is no current frame at the binding // site, there must be at least one frame reaching via a forward // jump. virtual void Bind(); virtual void Bind(Result* arg); + virtual void Bind(Result* arg0, Result* arg1); // Emit a call to a jump target. There must be a current frame at // the call. The frame at the target is the same as the current diff --git a/deps/v8/src/liveedit-debugger.js b/deps/v8/src/liveedit-debugger.js new file mode 100644 index 0000000000..e336db71b9 --- /dev/null +++ b/deps/v8/src/liveedit-debugger.js @@ -0,0 +1,431 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// LiveEdit feature implementation. The script should be executed after +// debug-debugger.js. + + +// Changes script text and recompiles all relevant functions if possible. +// The change is always a substring (change_pos, change_pos + change_len) +// being replaced with a completely different string new_str. +// +// Only one function will have its Code changed in result of this function. +// All nested functions (should they have any instances at the moment) are left +// unchanged and re-linked to a newly created script instance representing old +// version of the source. (Generally speaking, +// during the change all nested functions are erased and completely different +// set of nested functions are introduced.) All other functions just have +// their positions updated. +// +// @param {Script} script that is being changed +// @param {Array} change_log a list that collects engineer-readable description +// of what happened. +Debug.LiveEditChangeScript = function(script, change_pos, change_len, new_str, + change_log) { + + // So far the function works as namespace. + var liveedit = Debug.LiveEditChangeScript; + var Assert = liveedit.Assert; + + // Fully compiles source string as a script. Returns Array of + // FunctionCompileInfo -- a descriptions of all functions of the script. + // Elements of array are ordered by start positions of functions (from top + // to bottom) in the source. Fields outer_index and next_sibling_index help + // to navigate the nesting structure of functions. + // + // The script is used for compilation, because it produces code that + // needs to be linked with some particular script (for nested functions). + function DebugGatherCompileInfo(source) { + // Get function info, elements are partially sorted (it is a tree + // of nested functions serialized as parent followed by serialized children. + var raw_compile_info = %LiveEditGatherCompileInfo(script, source); + + // Sort function infos by start position field. + var compile_info = new Array(); + var old_index_map = new Array(); + for (var i = 0; i < raw_compile_info.length; i++) { + compile_info.push(new liveedit.FunctionCompileInfo(raw_compile_info[i])); + old_index_map.push(i); + } + + for (var i = 0; i < compile_info.length; i++) { + var k = i; + for (var j = i + 1; j < compile_info.length; j++) { + if (compile_info[k].start_position > compile_info[j].start_position) { + k = j; + } + } + if (k != i) { + var temp_info = compile_info[k]; + var temp_index = old_index_map[k]; + compile_info[k] = compile_info[i]; + old_index_map[k] = old_index_map[i]; + compile_info[i] = temp_info; + old_index_map[i] = temp_index; + } + } + + // After sorting update outer_inder field using old_index_map. Also + // set next_sibling_index field. + var current_index = 0; + + // The recursive function, that goes over all children of a particular + // node (i.e. function info). + function ResetIndexes(new_parent_index, old_parent_index) { + var previous_sibling = -1; + while (current_index < compile_info.length && + compile_info[current_index].outer_index == old_parent_index) { + var saved_index = current_index; + compile_info[saved_index].outer_index = new_parent_index; + if (previous_sibling != -1) { + compile_info[previous_sibling].next_sibling_index = saved_index; + } + previous_sibling = saved_index; + current_index++; + ResetIndexes(saved_index, old_index_map[saved_index]); + } + if (previous_sibling != -1) { + compile_info[previous_sibling].next_sibling_index = -1; + } + } + + ResetIndexes(-1, -1); + Assert(current_index == compile_info.length); + + return compile_info; + } + + // Given a positions, finds a function that fully includes the entire change. + function FindChangedFunction(compile_info, offset, len) { + // First condition: function should start before the change region. + // Function #0 (whole-script function) always does, but we want + // one, that is later in this list. + var index = 0; + while (index + 1 < compile_info.length && + compile_info[index + 1].start_position <= offset) { + index++; + } + // Now we are at the last function that begins before the change + // region. The function that covers entire change region is either + // this function or the enclosing one. + for (; compile_info[index].end_position < offset + len; + index = compile_info[index].outer_index) { + Assert(index != -1); + } + return index; + } + + // Variable forward declarations. Preprocessor "Minifier" needs them. + var old_compile_info; + var shared_infos; + // Finds SharedFunctionInfo that corresponds compile info with index + // in old version of the script. + function FindFunctionInfo(index) { + var old_info = old_compile_info[index]; + for (var i = 0; i < shared_infos.length; i++) { + var info = shared_infos[i]; + if (info.start_position == old_info.start_position && + info.end_position == old_info.end_position) { + return info; + } + } + } + + // Replaces function's Code. + function PatchCode(new_info, shared_info) { + %LiveEditReplaceFunctionCode(new_info.raw_array, shared_info.raw_array); + + change_log.push( {function_patched: new_info.function_name} ); + } + + var change_len_old; + var change_len_new; + // Translate position in old version of script into position in new + // version of script. + function PosTranslator(old_pos) { + if (old_pos <= change_pos) { + return old_pos; + } + if (old_pos >= change_pos + change_len_old) { + return old_pos + change_len_new - change_len_old; + } + return -1; + } + + var position_change_array; + var position_patch_report; + function PatchPositions(new_info, shared_info) { + if (!shared_info) { + // TODO: explain what is happening. + return; + } + %LiveEditPatchFunctionPositions(shared_info.raw_array, + position_change_array); + position_patch_report.push( { name: new_info.function_name } ); + } + + var link_to_old_script_report; + var old_script; + // Makes a function associated with another instance of a script (the + // one representing its old version). This way the function still + // may access its own text. + function LinkToOldScript(shared_info) { + %LiveEditRelinkFunctionToScript(shared_info.raw_array, old_script); + + link_to_old_script_report.push( { name: shared_info.function_name } ); + } + + + + var old_source = script.source; + var change_len_old = change_len; + var change_len_new = new_str.length; + + // Prepare new source string. + var new_source = old_source.substring(0, change_pos) + + new_str + old_source.substring(change_pos + change_len); + + // Find all SharedFunctionInfo's that are compiled from this script. + var shared_raw_list = %LiveEditFindSharedFunctionInfosForScript(script); + + var shared_infos = new Array(); + + for (var i = 0; i < shared_raw_list.length; i++) { + shared_infos.push(new liveedit.SharedInfoWrapper(shared_raw_list[i])); + } + + // Gather compile information about old version of script. + var old_compile_info = DebugGatherCompileInfo(old_source); + + // Gather compile information about new version of script. + var new_compile_info; + try { + new_compile_info = DebugGatherCompileInfo(new_source); + } catch (e) { + throw new liveedit.Failure("Failed to compile new version of script: " + e); + } + + // An index of a single function, that is going to have its code replaced. + var function_being_patched = + FindChangedFunction(old_compile_info, change_pos, change_len_old); + + // In old and new script versions function with a change should have the + // same indexes. + var function_being_patched2 = + FindChangedFunction(new_compile_info, change_pos, change_len_new); + Assert(function_being_patched == function_being_patched2, + "inconsistent old/new compile info"); + + // Check that function being patched has the same expectations in a new + // version. Otherwise we cannot safely patch its behavior and should + // choose the outer function instead. + while (!liveedit.CompareFunctionExpectations( + old_compile_info[function_being_patched], + new_compile_info[function_being_patched])) { + + Assert(old_compile_info[function_being_patched].outer_index == + new_compile_info[function_being_patched].outer_index); + function_being_patched = + old_compile_info[function_being_patched].outer_index; + Assert(function_being_patched != -1); + } + + // Check that function being patched is not currently on stack. + liveedit.CheckStackActivations( + [ FindFunctionInfo(function_being_patched) ], change_log ); + + + // Committing all changes. + var old_script_name = liveedit.CreateNameForOldScript(script); + + // Update the script text and create a new script representing an old + // version of the script. + var old_script = %LiveEditReplaceScript(script, new_source, old_script_name); + + PatchCode(new_compile_info[function_being_patched], + FindFunctionInfo(function_being_patched)); + + var position_patch_report = new Array(); + change_log.push( {position_patched: position_patch_report} ); + + var position_change_array = [ change_pos, + change_pos + change_len_old, + change_pos + change_len_new ]; + + // Update positions of all outer functions (i.e. all functions, that + // are partially below the function being patched). + for (var i = new_compile_info[function_being_patched].outer_index; + i != -1; + i = new_compile_info[i].outer_index) { + PatchPositions(new_compile_info[i], FindFunctionInfo(i)); + } + + // Update positions of all functions that are fully below the function + // being patched. + var old_next_sibling = + old_compile_info[function_being_patched].next_sibling_index; + var new_next_sibling = + new_compile_info[function_being_patched].next_sibling_index; + + // We simply go over the tail of both old and new lists. Their tails should + // have an identical structure. + if (old_next_sibling == -1) { + Assert(new_next_sibling == -1); + } else { + Assert(old_compile_info.length - old_next_sibling == + new_compile_info.length - new_next_sibling); + + for (var i = old_next_sibling, j = new_next_sibling; + i < old_compile_info.length; i++, j++) { + PatchPositions(new_compile_info[j], FindFunctionInfo(i)); + } + } + + var link_to_old_script_report = new Array(); + change_log.push( { linked_to_old_script: link_to_old_script_report } ); + + // We need to link to old script all former nested functions. + for (var i = function_being_patched + 1; i < old_next_sibling; i++) { + LinkToOldScript(FindFunctionInfo(i), old_script); + } +} + +Debug.LiveEditChangeScript.Assert = function(condition, message) { + if (!condition) { + if (message) { + throw "Assert " + message; + } else { + throw "Assert"; + } + } +} + +// An object describing function compilation details. Its index fields +// apply to indexes inside array that stores these objects. +Debug.LiveEditChangeScript.FunctionCompileInfo = function(raw_array) { + this.function_name = raw_array[0]; + this.start_position = raw_array[1]; + this.end_position = raw_array[2]; + this.param_num = raw_array[3]; + this.code = raw_array[4]; + this.scope_info = raw_array[5]; + this.outer_index = raw_array[6]; + this.next_sibling_index = null; + this.raw_array = raw_array; +} + +// A structure describing SharedFunctionInfo. +Debug.LiveEditChangeScript.SharedInfoWrapper = function(raw_array) { + this.function_name = raw_array[0]; + this.start_position = raw_array[1]; + this.end_position = raw_array[2]; + this.info = raw_array[3]; + this.raw_array = raw_array; +} + +// Adds a suffix to script name to mark that it is old version. +Debug.LiveEditChangeScript.CreateNameForOldScript = function(script) { + // TODO(635): try better than this; support several changes. + return script.name + " (old)"; +} + +// Compares a function interface old and new version, whether it +// changed or not. +Debug.LiveEditChangeScript.CompareFunctionExpectations = + function(function_info1, function_info2) { + // Check that function has the same number of parameters (there may exist + // an adapter, that won't survive function parameter number change). + if (function_info1.param_num != function_info2.param_num) { + return false; + } + var scope_info1 = function_info1.scope_info; + var scope_info2 = function_info2.scope_info; + + if (!scope_info1) { + return !scope_info2; + } + + if (scope_info1.length != scope_info2.length) { + return false; + } + + // Check that outer scope structure is not changed. Otherwise the function + // will not properly work with existing scopes. + return scope_info1.toString() == scope_info2.toString(); +} + +// For array of wrapped shared function infos checks that none of them +// have activations on stack (of any thread). Throws a Failure exception +// if this proves to be false. +Debug.LiveEditChangeScript.CheckStackActivations = function(shared_wrapper_list, + change_log) { + var liveedit = Debug.LiveEditChangeScript; + + var shared_list = new Array(); + for (var i = 0; i < shared_wrapper_list.length; i++) { + shared_list[i] = shared_wrapper_list[i].info; + } + var result = %LiveEditCheckStackActivations(shared_list); + var problems = new Array(); + for (var i = 0; i < shared_list.length; i++) { + if (result[i] == liveedit.FunctionPatchabilityStatus.FUNCTION_BLOCKED_ON_STACK) { + var shared = shared_list[i]; + var description = { + name: shared.function_name, + start_pos: shared.start_position, + end_pos: shared.end_position + }; + problems.push(description); + } + } + if (problems.length > 0) { + change_log.push( { functions_on_stack: problems } ); + throw new liveedit.Failure("Blocked by functions on stack"); + } +} + +// A copy of the FunctionPatchabilityStatus enum from liveedit.h +Debug.LiveEditChangeScript.FunctionPatchabilityStatus = { + FUNCTION_AVAILABLE_FOR_PATCH: 0, + FUNCTION_BLOCKED_ON_STACK: 1 +} + + +// A logical failure in liveedit process. This means that change_log +// is valid and consistent description of what happened. +Debug.LiveEditChangeScript.Failure = function(message) { + this.message = message; +} + +Debug.LiveEditChangeScript.Failure.prototype.toString = function() { + return "LiveEdit Failure: " + this.message; +} + +// A testing entry. +Debug.LiveEditChangeScript.GetPcFromSourcePos = function(func, source_pos) { + return %GetFunctionCodePositionFromSource(func, source_pos); +} diff --git a/deps/v8/src/liveedit.h b/deps/v8/src/liveedit.h index efbcd7404a..2a9cc628a1 100644 --- a/deps/v8/src/liveedit.h +++ b/deps/v8/src/liveedit.h @@ -91,7 +91,7 @@ class LiveEdit : AllStatic { static void PatchFunctionPositions(Handle<JSArray> shared_info_array, Handle<JSArray> position_change_array); - // A copy of this is in liveedit-delay.js. + // A copy of this is in liveedit-debugger.js. enum FunctionPatchabilityStatus { FUNCTION_AVAILABLE_FOR_PATCH = 0, FUNCTION_BLOCKED_ON_STACK = 1 diff --git a/deps/v8/src/macros.py b/deps/v8/src/macros.py index 122b057a93..414b4c0a35 100644 --- a/deps/v8/src/macros.py +++ b/deps/v8/src/macros.py @@ -120,10 +120,6 @@ macro TO_STRING_INLINE(arg) = (IS_STRING(%IS_VAR(arg)) ? arg : NonStringToString # Macros implemented in Python. python macro CHAR_CODE(str) = ord(str[1]); -# Accessors for original global properties that ensure they have been loaded. -const ORIGINAL_REGEXP = (global.RegExp, $RegExp); -const ORIGINAL_DATE = (global.Date, $Date); - # Constants used on an array to implement the properties of the RegExp object. const REGEXP_NUMBER_OF_CAPTURES = 0; const REGEXP_FIRST_CAPTURE = 3; @@ -132,6 +128,9 @@ const REGEXP_FIRST_CAPTURE = 3; # REGEXP_NUMBER_OF_CAPTURES macro NUMBER_OF_CAPTURES(array) = ((array)[0]); +# Limit according to ECMA 262 15.9.1.1 +const MAX_TIME_MS = 8640000000000000; + # Gets the value of a Date object. If arg is not a Date object # a type error is thrown. macro DATE_VALUE(arg) = (%_ClassOf(arg) === 'Date' ? %_ValueOf(arg) : ThrowDateTypeError()); diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js index cb392ff90f..b8a1070d59 100644 --- a/deps/v8/src/messages.js +++ b/deps/v8/src/messages.js @@ -27,6 +27,16 @@ // ------------------------------------------------------------------- +// +// Matches Script::Type from objects.h +var TYPE_NATIVE = 0; +var TYPE_EXTENSION = 1; +var TYPE_NORMAL = 2; + +// Matches Script::CompilationType from objects.h +var COMPILATION_TYPE_HOST = 0; +var COMPILATION_TYPE_EVAL = 1; +var COMPILATION_TYPE_JSON = 2; // Lazily initialized. var kVowelSounds = 0; @@ -634,7 +644,7 @@ CallSite.prototype.isToplevel = function () { CallSite.prototype.isEval = function () { var script = %FunctionGetScript(this.fun); - return script && script.compilation_type == 1; + return script && script.compilation_type == COMPILATION_TYPE_EVAL; }; CallSite.prototype.getEvalOrigin = function () { @@ -656,7 +666,7 @@ CallSite.prototype.getFunctionName = function () { } // Maybe this is an evaluation? var script = %FunctionGetScript(this.fun); - if (script && script.compilation_type == 1) + if (script && script.compilation_type == COMPILATION_TYPE_EVAL) return "eval"; return null; }; @@ -712,7 +722,7 @@ CallSite.prototype.getColumnNumber = function () { CallSite.prototype.isNative = function () { var script = %FunctionGetScript(this.fun); - return script ? (script.type == 0) : false; + return script ? (script.type == TYPE_NATIVE) : false; }; CallSite.prototype.getPosition = function () { @@ -736,7 +746,7 @@ function FormatEvalOrigin(script) { var eval_from_script = script.eval_from_script; if (eval_from_script) { - if (eval_from_script.compilation_type == 1) { + if (eval_from_script.compilation_type == COMPILATION_TYPE_EVAL) { // eval script originated from another eval. eval_origin += " (eval at " + FormatEvalOrigin(eval_from_script) + ")"; } else { diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc index 3bd42ed6c7..04bcfeb04c 100644 --- a/deps/v8/src/mips/builtins-mips.cc +++ b/deps/v8/src/mips/builtins-mips.cc @@ -74,7 +74,99 @@ void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) { static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, bool is_construct) { - UNIMPLEMENTED_MIPS(); + // Called from JSEntryStub::GenerateBody + + // Registers: + // a0: entry_address + // a1: function + // a2: reveiver_pointer + // a3: argc + // s0: argv + // + // Stack: + // arguments slots + // handler frame + // entry frame + // callee saved registers + ra + // 4 args slots + // args + + // Clear the context before we push it when entering the JS frame. + __ li(cp, Operand(0)); + + // Enter an internal frame. + __ EnterInternalFrame(); + + // Set up the context from the function argument. + __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); + + // Set up the roots register. + ExternalReference roots_address = ExternalReference::roots_address(); + __ li(s6, Operand(roots_address)); + + // Push the function and the receiver onto the stack. + __ MultiPushReversed(a1.bit() | a2.bit()); + + // Copy arguments to the stack in a loop. + // a3: argc + // s0: argv, ie points to first arg + Label loop, entry; + __ sll(t0, a3, kPointerSizeLog2); + __ add(t2, s0, t0); + __ b(&entry); + __ nop(); // Branch delay slot nop. + // t2 points past last arg. + __ bind(&loop); + __ lw(t0, MemOperand(s0)); // Read next parameter. + __ addiu(s0, s0, kPointerSize); + __ lw(t0, MemOperand(t0)); // Dereference handle. + __ Push(t0); // Push parameter. + __ bind(&entry); + __ Branch(ne, &loop, s0, Operand(t2)); + + // Registers: + // a0: entry_address + // a1: function + // a2: reveiver_pointer + // a3: argc + // s0: argv + // s6: roots_address + // + // Stack: + // arguments + // receiver + // function + // arguments slots + // handler frame + // entry frame + // callee saved registers + ra + // 4 args slots + // args + + // Initialize all JavaScript callee-saved registers, since they will be seen + // by the garbage collector as part of handlers. + __ LoadRoot(t4, Heap::kUndefinedValueRootIndex); + __ mov(s1, t4); + __ mov(s2, t4); + __ mov(s3, t4); + __ mov(s4, s4); + __ mov(s5, t4); + // s6 holds the root address. Do not clobber. + // s7 is cp. Do not init. + + // Invoke the code and pass argc as a0. + __ mov(a0, a3); + if (is_construct) { + UNIMPLEMENTED_MIPS(); + __ break_(0x164); + } else { + ParameterCount actual(a0); + __ InvokeFunction(a1, actual, CALL_FUNCTION); + } + + __ LeaveInternalFrame(); + + __ Jump(ra); } @@ -100,6 +192,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { UNIMPLEMENTED_MIPS(); + __ break_(0x201); } diff --git a/deps/v8/src/mips/codegen-mips-inl.h b/deps/v8/src/mips/codegen-mips-inl.h index 904dd74e9d..3a511b80f7 100644 --- a/deps/v8/src/mips/codegen-mips-inl.h +++ b/deps/v8/src/mips/codegen-mips-inl.h @@ -36,7 +36,31 @@ namespace internal { // Platform-specific inline functions. -void DeferredCode::Jump() { __ b(&entry_label_); } +void DeferredCode::Jump() { + __ b(&entry_label_); + __ nop(); +} + + +void Reference::GetValueAndSpill() { + GetValue(); +} + + +void CodeGenerator::VisitAndSpill(Statement* statement) { + Visit(statement); +} + + +void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) { + VisitStatements(statements); +} + + +void CodeGenerator::LoadAndSpill(Expression* expression) { + Load(expression); +} + #undef __ diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc index 7b32180efb..0936a6d7d6 100644 --- a/deps/v8/src/mips/codegen-mips.cc +++ b/deps/v8/src/mips/codegen-mips.cc @@ -30,13 +30,14 @@ #include "bootstrapper.h" #include "codegen-inl.h" +#include "compiler.h" #include "debug.h" #include "ic-inl.h" #include "parser.h" #include "register-allocator-inl.h" #include "runtime.h" #include "scopes.h" -#include "compiler.h" +#include "virtual-frame-inl.h" @@ -47,7 +48,7 @@ namespace internal { -// ------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // Platform-specific DeferredCode functions. @@ -61,13 +62,41 @@ void DeferredCode::RestoreRegisters() { } -// ------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- +// CodeGenState implementation. + +CodeGenState::CodeGenState(CodeGenerator* owner) + : owner_(owner), + true_target_(NULL), + false_target_(NULL), + previous_(NULL) { + owner_->set_state(this); +} + + +CodeGenState::CodeGenState(CodeGenerator* owner, + JumpTarget* true_target, + JumpTarget* false_target) + : owner_(owner), + true_target_(true_target), + false_target_(false_target), + previous_(owner->state()) { + owner_->set_state(this); +} + + +CodeGenState::~CodeGenState() { + ASSERT(owner_->state() == this); + owner_->set_state(previous_); +} + + +// ----------------------------------------------------------------------------- // CodeGenerator implementation CodeGenerator::CodeGenerator(MacroAssembler* masm) : deferred_(8), masm_(masm), - scope_(NULL), frame_(NULL), allocator_(NULL), cc_reg_(cc_always), @@ -77,18 +106,362 @@ CodeGenerator::CodeGenerator(MacroAssembler* masm) // Calling conventions: -// s8_fp: caller's frame pointer +// fp: caller's frame pointer // sp: stack pointer // a1: called JS function // cp: callee's context -void CodeGenerator::Generate(CompilationInfo* infomode) { - UNIMPLEMENTED_MIPS(); +void CodeGenerator::Generate(CompilationInfo* info) { + // Record the position for debugging purposes. + CodeForFunctionPosition(info->function()); + + // Initialize state. + info_ = info; + ASSERT(allocator_ == NULL); + RegisterAllocator register_allocator(this); + allocator_ = ®ister_allocator; + ASSERT(frame_ == NULL); + frame_ = new VirtualFrame(); + cc_reg_ = cc_always; + + { + CodeGenState state(this); + + // Registers: + // a1: called JS function + // ra: return address + // fp: caller's frame pointer + // sp: stack pointer + // cp: callee's context + // + // Stack: + // arguments + // receiver + + frame_->Enter(); + + // Allocate space for locals and initialize them. + frame_->AllocateStackSlots(); + + // Initialize the function return target. + function_return_.set_direction(JumpTarget::BIDIRECTIONAL); + function_return_is_shadowed_ = false; + + VirtualFrame::SpilledScope spilled_scope; + if (scope()->num_heap_slots() > 0) { + UNIMPLEMENTED_MIPS(); + } + + { + Comment cmnt2(masm_, "[ copy context parameters into .context"); + + // Note that iteration order is relevant here! If we have the same + // parameter twice (e.g., function (x, y, x)), and that parameter + // needs to be copied into the context, it must be the last argument + // passed to the parameter that needs to be copied. This is a rare + // case so we don't check for it, instead we rely on the copying + // order: such a parameter is copied repeatedly into the same + // context location and thus the last value is what is seen inside + // the function. + for (int i = 0; i < scope()->num_parameters(); i++) { + UNIMPLEMENTED_MIPS(); + } + } + + // Store the arguments object. This must happen after context + // initialization because the arguments object may be stored in the + // context. + if (scope()->arguments() != NULL) { + UNIMPLEMENTED_MIPS(); + } + + // Generate code to 'execute' declarations and initialize functions + // (source elements). In case of an illegal redeclaration we need to + // handle that instead of processing the declarations. + if (scope()->HasIllegalRedeclaration()) { + Comment cmnt(masm_, "[ illegal redeclarations"); + scope()->VisitIllegalRedeclaration(this); + } else { + Comment cmnt(masm_, "[ declarations"); + ProcessDeclarations(scope()->declarations()); + // Bail out if a stack-overflow exception occurred when processing + // declarations. + if (HasStackOverflow()) return; + } + + if (FLAG_trace) { + UNIMPLEMENTED_MIPS(); + } + + // Compile the body of the function in a vanilla state. Don't + // bother compiling all the code if the scope has an illegal + // redeclaration. + if (!scope()->HasIllegalRedeclaration()) { + Comment cmnt(masm_, "[ function body"); +#ifdef DEBUG + bool is_builtin = Bootstrapper::IsActive(); + bool should_trace = + is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls; + if (should_trace) { + UNIMPLEMENTED_MIPS(); + } +#endif + VisitStatementsAndSpill(info->function()->body()); + } + } + + if (has_valid_frame() || function_return_.is_linked()) { + if (!function_return_.is_linked()) { + CodeForReturnPosition(info->function()); + } + // Registers: + // v0: result + // sp: stack pointer + // fp: frame pointer + // cp: callee's context + + __ LoadRoot(v0, Heap::kUndefinedValueRootIndex); + + function_return_.Bind(); + if (FLAG_trace) { + UNIMPLEMENTED_MIPS(); + } + + // Add a label for checking the size of the code used for returning. + Label check_exit_codesize; + masm_->bind(&check_exit_codesize); + + masm_->mov(sp, fp); + masm_->lw(fp, MemOperand(sp, 0)); + masm_->lw(ra, MemOperand(sp, 4)); + masm_->addiu(sp, sp, 8); + + // Here we use masm_-> instead of the __ macro to avoid the code coverage + // tool from instrumenting as we rely on the code size here. + // TODO(MIPS): Should we be able to use more than 0x1ffe parameters? + masm_->addiu(sp, sp, (scope()->num_parameters() + 1) * kPointerSize); + masm_->Jump(ra); + // The Jump automatically generates a nop in the branch delay slot. + + // Check that the size of the code used for returning matches what is + // expected by the debugger. + ASSERT_EQ(kJSReturnSequenceLength, + masm_->InstructionsGeneratedSince(&check_exit_codesize)); + } + + // Code generation state must be reset. + ASSERT(!has_cc()); + ASSERT(state_ == NULL); + ASSERT(!function_return_is_shadowed_); + function_return_.Unuse(); + DeleteFrame(); + + // Process any deferred code using the register allocator. + if (!HasStackOverflow()) { + ProcessDeferred(); + } + + allocator_ = NULL; +} + + +void CodeGenerator::LoadReference(Reference* ref) { + VirtualFrame::SpilledScope spilled_scope; + Comment cmnt(masm_, "[ LoadReference"); + Expression* e = ref->expression(); + Property* property = e->AsProperty(); + Variable* var = e->AsVariableProxy()->AsVariable(); + + if (property != NULL) { + UNIMPLEMENTED_MIPS(); + } else if (var != NULL) { + // The expression is a variable proxy that does not rewrite to a + // property. Global variables are treated as named property references. + if (var->is_global()) { + LoadGlobal(); + ref->set_type(Reference::NAMED); + } else { + ASSERT(var->slot() != NULL); + ref->set_type(Reference::SLOT); + } + } else { + UNIMPLEMENTED_MIPS(); + } +} + + +void CodeGenerator::UnloadReference(Reference* ref) { + VirtualFrame::SpilledScope spilled_scope; + // Pop a reference from the stack while preserving TOS. + Comment cmnt(masm_, "[ UnloadReference"); + int size = ref->size(); + if (size > 0) { + frame_->EmitPop(a0); + frame_->Drop(size); + frame_->EmitPush(a0); + } + ref->set_unloaded(); +} + + +MemOperand CodeGenerator::SlotOperand(Slot* slot, Register tmp) { + // Currently, this assertion will fail if we try to assign to + // a constant variable that is constant because it is read-only + // (such as the variable referring to a named function expression). + // We need to implement assignments to read-only variables. + // Ideally, we should do this during AST generation (by converting + // such assignments into expression statements); however, in general + // we may not be able to make the decision until past AST generation, + // that is when the entire program is known. + ASSERT(slot != NULL); + int index = slot->index(); + switch (slot->type()) { + case Slot::PARAMETER: + UNIMPLEMENTED_MIPS(); + return MemOperand(no_reg, 0); + + case Slot::LOCAL: + return frame_->LocalAt(index); + + case Slot::CONTEXT: { + UNIMPLEMENTED_MIPS(); + return MemOperand(no_reg, 0); + } + + default: + UNREACHABLE(); + return MemOperand(no_reg, 0); + } +} + + +// Loads a value on TOS. If it is a boolean value, the result may have been +// (partially) translated into branches, or it may have set the condition +// code register. If force_cc is set, the value is forced to set the +// condition code register and no value is pushed. If the condition code +// register was set, has_cc() is true and cc_reg_ contains the condition to +// test for 'true'. +void CodeGenerator::LoadCondition(Expression* x, + JumpTarget* true_target, + JumpTarget* false_target, + bool force_cc) { + ASSERT(!has_cc()); + int original_height = frame_->height(); + + { CodeGenState new_state(this, true_target, false_target); + Visit(x); + + // If we hit a stack overflow, we may not have actually visited + // the expression. In that case, we ensure that we have a + // valid-looking frame state because we will continue to generate + // code as we unwind the C++ stack. + // + // It's possible to have both a stack overflow and a valid frame + // state (eg, a subexpression overflowed, visiting it returned + // with a dummied frame state, and visiting this expression + // returned with a normal-looking state). + if (HasStackOverflow() && + has_valid_frame() && + !has_cc() && + frame_->height() == original_height) { + true_target->Jump(); + } + } + if (force_cc && frame_ != NULL && !has_cc()) { + // Convert the TOS value to a boolean in the condition code register. + UNIMPLEMENTED_MIPS(); + } + ASSERT(!force_cc || !has_valid_frame() || has_cc()); + ASSERT(!has_valid_frame() || + (has_cc() && frame_->height() == original_height) || + (!has_cc() && frame_->height() == original_height + 1)); +} + + +void CodeGenerator::Load(Expression* x) { +#ifdef DEBUG + int original_height = frame_->height(); +#endif + JumpTarget true_target; + JumpTarget false_target; + LoadCondition(x, &true_target, &false_target, false); + + if (has_cc()) { + UNIMPLEMENTED_MIPS(); + } + + if (true_target.is_linked() || false_target.is_linked()) { + UNIMPLEMENTED_MIPS(); + } + ASSERT(has_valid_frame()); + ASSERT(!has_cc()); + ASSERT(frame_->height() == original_height + 1); +} + + +void CodeGenerator::LoadGlobal() { + VirtualFrame::SpilledScope spilled_scope; + __ lw(a0, GlobalObject()); + frame_->EmitPush(a0); +} + + +void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) { + VirtualFrame::SpilledScope spilled_scope; + if (slot->type() == Slot::LOOKUP) { + UNIMPLEMENTED_MIPS(); + } else { + __ lw(a0, SlotOperand(slot, a2)); + frame_->EmitPush(a0); + if (slot->var()->mode() == Variable::CONST) { + UNIMPLEMENTED_MIPS(); + } + } +} + + +void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) { + ASSERT(slot != NULL); + if (slot->type() == Slot::LOOKUP) { + UNIMPLEMENTED_MIPS(); + } else { + ASSERT(!slot->var()->is_dynamic()); + + JumpTarget exit; + if (init_state == CONST_INIT) { + UNIMPLEMENTED_MIPS(); + } + + // We must execute the store. Storing a variable must keep the + // (new) value on the stack. This is necessary for compiling + // assignment expressions. + // + // Note: We will reach here even with slot->var()->mode() == + // Variable::CONST because of const declarations which will + // initialize consts to 'the hole' value and by doing so, end up + // calling this code. a2 may be loaded with context; used below in + // RecordWrite. + frame_->EmitPop(a0); + __ sw(a0, SlotOperand(slot, a2)); + frame_->EmitPush(a0); + if (slot->type() == Slot::CONTEXT) { + UNIMPLEMENTED_MIPS(); + } + // If we definitely did not jump over the assignment, we do not need + // to bind the exit label. Doing so can defeat peephole + // optimization. + if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) { + exit.Bind(); + } + } } void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) { - UNIMPLEMENTED_MIPS(); + VirtualFrame::SpilledScope spilled_scope; + for (int i = 0; frame_ != NULL && i < statements->length(); i++) { + VisitAndSpill(statements->at(i)); + } } @@ -98,7 +471,14 @@ void CodeGenerator::VisitBlock(Block* node) { void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) { - UNIMPLEMENTED_MIPS(); + VirtualFrame::SpilledScope spilled_scope; + frame_->EmitPush(cp); + __ li(t0, Operand(pairs)); + frame_->EmitPush(t0); + __ li(t0, Operand(Smi::FromInt(is_eval() ? 1 : 0))); + frame_->EmitPush(t0); + frame_->CallRuntime(Runtime::kDeclareGlobals, 3); + // The result is discarded. } @@ -108,7 +488,17 @@ void CodeGenerator::VisitDeclaration(Declaration* node) { void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) { - UNIMPLEMENTED_MIPS(); +#ifdef DEBUG + int original_height = frame_->height(); +#endif + VirtualFrame::SpilledScope spilled_scope; + Comment cmnt(masm_, "[ ExpressionStatement"); + CodeForStatementPosition(node); + Expression* expression = node->expression(); + expression->MarkAsStatement(); + LoadAndSpill(expression); + frame_->Drop(); + ASSERT(frame_->height() == original_height); } @@ -133,7 +523,22 @@ void CodeGenerator::VisitBreakStatement(BreakStatement* node) { void CodeGenerator::VisitReturnStatement(ReturnStatement* node) { - UNIMPLEMENTED_MIPS(); + VirtualFrame::SpilledScope spilled_scope; + Comment cmnt(masm_, "[ ReturnStatement"); + + CodeForStatementPosition(node); + LoadAndSpill(node->expression()); + if (function_return_is_shadowed_) { + frame_->EmitPop(v0); + function_return_.Jump(); + } else { + // Pop the result from the frame and prepare the frame for + // returning thus making it easier to merge. + frame_->EmitPop(v0); + frame_->PrepareForReturn(); + + function_return_.Jump(); + } } @@ -192,8 +597,8 @@ void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) { } -void CodeGenerator::VisitFunctionBoilerplateLiteral( - FunctionBoilerplateLiteral* node) { +void CodeGenerator::VisitSharedFunctionInfoLiteral( + SharedFunctionInfoLiteral* node) { UNIMPLEMENTED_MIPS(); } @@ -204,17 +609,45 @@ void CodeGenerator::VisitConditional(Conditional* node) { void CodeGenerator::VisitSlot(Slot* node) { - UNIMPLEMENTED_MIPS(); +#ifdef DEBUG + int original_height = frame_->height(); +#endif + VirtualFrame::SpilledScope spilled_scope; + Comment cmnt(masm_, "[ Slot"); + LoadFromSlot(node, typeof_state()); + ASSERT(frame_->height() == original_height + 1); } void CodeGenerator::VisitVariableProxy(VariableProxy* node) { - UNIMPLEMENTED_MIPS(); +#ifdef DEBUG + int original_height = frame_->height(); +#endif + VirtualFrame::SpilledScope spilled_scope; + Comment cmnt(masm_, "[ VariableProxy"); + + Variable* var = node->var(); + Expression* expr = var->rewrite(); + if (expr != NULL) { + Visit(expr); + } else { + ASSERT(var->is_global()); + Reference ref(this, node); + ref.GetValueAndSpill(); + } + ASSERT(frame_->height() == original_height + 1); } void CodeGenerator::VisitLiteral(Literal* node) { - UNIMPLEMENTED_MIPS(); +#ifdef DEBUG + int original_height = frame_->height(); +#endif + VirtualFrame::SpilledScope spilled_scope; + Comment cmnt(masm_, "[ Literal"); + __ li(t0, Operand(node->handle())); + frame_->EmitPush(t0); + ASSERT(frame_->height() == original_height + 1); } @@ -239,7 +672,47 @@ void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) { void CodeGenerator::VisitAssignment(Assignment* node) { - UNIMPLEMENTED_MIPS(); +#ifdef DEBUG + int original_height = frame_->height(); +#endif + VirtualFrame::SpilledScope spilled_scope; + Comment cmnt(masm_, "[ Assignment"); + + { Reference target(this, node->target()); + if (target.is_illegal()) { + // Fool the virtual frame into thinking that we left the assignment's + // value on the frame. + frame_->EmitPush(zero_reg); + ASSERT(frame_->height() == original_height + 1); + return; + } + + if (node->op() == Token::ASSIGN || + node->op() == Token::INIT_VAR || + node->op() == Token::INIT_CONST) { + LoadAndSpill(node->value()); + } else { + UNIMPLEMENTED_MIPS(); + } + + Variable* var = node->target()->AsVariableProxy()->AsVariable(); + if (var != NULL && + (var->mode() == Variable::CONST) && + node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) { + // Assignment ignored - leave the value on the stack. + } else { + CodeForSourcePosition(node->position()); + if (node->op() == Token::INIT_CONST) { + // Dynamic constant initializations must use the function context + // and initialize the actual constant declared. Dynamic variable + // initializations are simply assignments and use SetValue. + target.SetValue(CONST_INIT); + } else { + target.SetValue(NOT_CONST_INIT); + } + } + } + ASSERT(frame_->height() == original_height + 1); } @@ -254,7 +727,73 @@ void CodeGenerator::VisitProperty(Property* node) { void CodeGenerator::VisitCall(Call* node) { - UNIMPLEMENTED_MIPS(); +#ifdef DEBUG + int original_height = frame_->height(); +#endif + VirtualFrame::SpilledScope spilled_scope; + Comment cmnt(masm_, "[ Call"); + + Expression* function = node->expression(); + ZoneList<Expression*>* args = node->arguments(); + + // Standard function call. + // Check if the function is a variable or a property. + Variable* var = function->AsVariableProxy()->AsVariable(); + Property* property = function->AsProperty(); + + // ------------------------------------------------------------------------ + // Fast-case: Use inline caching. + // --- + // According to ECMA-262, section 11.2.3, page 44, the function to call + // must be resolved after the arguments have been evaluated. The IC code + // automatically handles this by loading the arguments before the function + // is resolved in cache misses (this also holds for megamorphic calls). + // ------------------------------------------------------------------------ + + if (var != NULL && var->is_possibly_eval()) { + UNIMPLEMENTED_MIPS(); + } else if (var != NULL && !var->is_this() && var->is_global()) { + // ---------------------------------- + // JavaScript example: 'foo(1, 2, 3)' // foo is global + // ---------------------------------- + + int arg_count = args->length(); + + // We need sp to be 8 bytes aligned when calling the stub. + __ SetupAlignedCall(t0, arg_count); + + // Pass the global object as the receiver and let the IC stub + // patch the stack to use the global proxy as 'this' in the + // invoked function. + LoadGlobal(); + + // Load the arguments. + for (int i = 0; i < arg_count; i++) { + LoadAndSpill(args->at(i)); + } + + // Setup the receiver register and call the IC initialization code. + __ li(a2, Operand(var->name())); + InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP; + Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop); + CodeForSourcePosition(node->position()); + frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET_CONTEXT, + arg_count + 1); + __ ReturnFromAlignedCall(); + __ lw(cp, frame_->Context()); + // Remove the function from the stack. + frame_->EmitPush(v0); + + } else if (var != NULL && var->slot() != NULL && + var->slot()->type() == Slot::LOOKUP) { + UNIMPLEMENTED_MIPS(); + } else if (property != NULL) { + UNIMPLEMENTED_MIPS(); + } else { + UNIMPLEMENTED_MIPS(); + } + + ASSERT(frame_->height() == original_height + 1); } @@ -439,13 +978,108 @@ bool CodeGenerator::HasValidEntryRegisters() { return true; } #undef __ #define __ ACCESS_MASM(masm) +// ----------------------------------------------------------------------------- +// Reference support -Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { - return Handle<Code>::null(); +Reference::Reference(CodeGenerator* cgen, + Expression* expression, + bool persist_after_get) + : cgen_(cgen), + expression_(expression), + type_(ILLEGAL), + persist_after_get_(persist_after_get) { + cgen->LoadReference(this); +} + + +Reference::~Reference() { + ASSERT(is_unloaded() || is_illegal()); +} + + +Handle<String> Reference::GetName() { + ASSERT(type_ == NAMED); + Property* property = expression_->AsProperty(); + if (property == NULL) { + // Global variable reference treated as a named property reference. + VariableProxy* proxy = expression_->AsVariableProxy(); + ASSERT(proxy->AsVariable() != NULL); + ASSERT(proxy->AsVariable()->is_global()); + return proxy->name(); + } else { + Literal* raw_name = property->key()->AsLiteral(); + ASSERT(raw_name != NULL); + return Handle<String>(String::cast(*raw_name->handle())); + } +} + + +void Reference::GetValue() { + ASSERT(cgen_->HasValidEntryRegisters()); + ASSERT(!is_illegal()); + ASSERT(!cgen_->has_cc()); + Property* property = expression_->AsProperty(); + if (property != NULL) { + cgen_->CodeForSourcePosition(property->position()); + } + + switch (type_) { + case SLOT: { + UNIMPLEMENTED_MIPS(); + break; + } + + case NAMED: { + UNIMPLEMENTED_MIPS(); + break; + } + + case KEYED: { + UNIMPLEMENTED_MIPS(); + break; + } + + default: + UNREACHABLE(); + } } -// On entry a0 and a1 are the things to be compared. On exit v0 is 0, +void Reference::SetValue(InitState init_state) { + ASSERT(!is_illegal()); + ASSERT(!cgen_->has_cc()); + MacroAssembler* masm = cgen_->masm(); + Property* property = expression_->AsProperty(); + if (property != NULL) { + cgen_->CodeForSourcePosition(property->position()); + } + + switch (type_) { + case SLOT: { + Comment cmnt(masm, "[ Store to Slot"); + Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot(); + cgen_->StoreToSlot(slot, init_state); + cgen_->UnloadReference(this); + break; + } + + case NAMED: { + UNIMPLEMENTED_MIPS(); + break; + } + + case KEYED: { + UNIMPLEMENTED_MIPS(); + break; + } + + default: + UNREACHABLE(); + } +} + + +// On entry a0 and a1 are the things to be compared. On exit v0 is 0, // positive or negative to indicate the result of the comparison. void CompareStub::Generate(MacroAssembler* masm) { UNIMPLEMENTED_MIPS(); @@ -453,6 +1087,12 @@ void CompareStub::Generate(MacroAssembler* masm) { } +Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { + UNIMPLEMENTED_MIPS(); + return Handle<Code>::null(); +} + + void StackCheckStub::Generate(MacroAssembler* masm) { UNIMPLEMENTED_MIPS(); __ break_(0x790); @@ -477,55 +1117,274 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, Label* throw_out_of_memory_exception, bool do_gc, bool always_allocate) { - UNIMPLEMENTED_MIPS(); - __ break_(0x826); + // s0: number of arguments including receiver (C callee-saved) + // s1: pointer to the first argument (C callee-saved) + // s2: pointer to builtin function (C callee-saved) + + if (do_gc) { + UNIMPLEMENTED_MIPS(); + } + + ExternalReference scope_depth = + ExternalReference::heap_always_allocate_scope_depth(); + if (always_allocate) { + UNIMPLEMENTED_MIPS(); + } + + // Call C built-in. + // a0 = argc, a1 = argv + __ mov(a0, s0); + __ mov(a1, s1); + + __ CallBuiltin(s2); + + if (always_allocate) { + UNIMPLEMENTED_MIPS(); + } + + // Check for failure result. + Label failure_returned; + ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0); + __ addiu(a2, v0, 1); + __ andi(t0, a2, kFailureTagMask); + __ Branch(eq, &failure_returned, t0, Operand(zero_reg)); + + // Exit C frame and return. + // v0:v1: result + // sp: stack pointer + // fp: frame pointer + __ LeaveExitFrame(mode_); + + // Check if we should retry or throw exception. + Label retry; + __ bind(&failure_returned); + ASSERT(Failure::RETRY_AFTER_GC == 0); + __ andi(t0, v0, ((1 << kFailureTypeTagSize) - 1) << kFailureTagSize); + __ Branch(eq, &retry, t0, Operand(zero_reg)); + + // Special handling of out of memory exceptions. + Failure* out_of_memory = Failure::OutOfMemoryException(); + __ Branch(eq, throw_out_of_memory_exception, + v0, Operand(reinterpret_cast<int32_t>(out_of_memory))); + + // Retrieve the pending exception and clear the variable. + __ LoadExternalReference(t0, ExternalReference::the_hole_value_location()); + __ lw(a3, MemOperand(t0)); + __ LoadExternalReference(t0, + ExternalReference(Top::k_pending_exception_address)); + __ lw(v0, MemOperand(t0)); + __ sw(a3, MemOperand(t0)); + + // Special handling of termination exceptions which are uncatchable + // by javascript code. + __ Branch(eq, throw_termination_exception, + v0, Operand(Factory::termination_exception())); + + // Handle normal exception. + __ b(throw_normal_exception); + __ nop(); // Branch delay slot nop. + + __ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying } void CEntryStub::Generate(MacroAssembler* masm) { - UNIMPLEMENTED_MIPS(); - __ break_(0x831); + // Called from JavaScript; parameters are on stack as if calling JS function + // a0: number of arguments including receiver + // a1: pointer to builtin function + // fp: frame pointer (restored after C call) + // sp: stack pointer (restored as callee's sp after C call) + // cp: current context (C callee-saved) + + // NOTE: Invocations of builtins may return failure objects + // instead of a proper result. The builtin entry handles + // this by performing a garbage collection and retrying the + // builtin once. + + // Enter the exit frame that transitions from JavaScript to C++. + __ EnterExitFrame(mode_, s0, s1, s2); + + // s0: number of arguments (C callee-saved) + // s1: pointer to first argument (C callee-saved) + // s2: pointer to builtin function (C callee-saved) + + Label throw_normal_exception; + Label throw_termination_exception; + Label throw_out_of_memory_exception; + + // Call into the runtime system. + GenerateCore(masm, + &throw_normal_exception, + &throw_termination_exception, + &throw_out_of_memory_exception, + false, + false); + + // Do space-specific GC and retry runtime call. + GenerateCore(masm, + &throw_normal_exception, + &throw_termination_exception, + &throw_out_of_memory_exception, + true, + false); + + // Do full GC and retry runtime call one final time. + Failure* failure = Failure::InternalError(); + __ li(v0, Operand(reinterpret_cast<int32_t>(failure))); + GenerateCore(masm, + &throw_normal_exception, + &throw_termination_exception, + &throw_out_of_memory_exception, + true, + true); + + __ bind(&throw_out_of_memory_exception); + GenerateThrowUncatchable(masm, OUT_OF_MEMORY); + + __ bind(&throw_termination_exception); + GenerateThrowUncatchable(masm, TERMINATION); + + __ bind(&throw_normal_exception); + GenerateThrowTOS(masm); } void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { - UNIMPLEMENTED_MIPS(); + Label invoke, exit; + + // Registers: + // a0: entry address + // a1: function + // a2: reveiver + // a3: argc + // + // Stack: + // 4 args slots + // args // Save callee saved registers on the stack. - __ MultiPush(kCalleeSaved | ra.bit()); + __ MultiPush((kCalleeSaved | ra.bit()) & ~sp.bit()); - // ********** State ********** - // - // * Registers: + // We build an EntryFrame. + __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used. + int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; + __ li(t2, Operand(Smi::FromInt(marker))); + __ li(t1, Operand(Smi::FromInt(marker))); + __ LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address)); + __ lw(t0, MemOperand(t0)); + __ MultiPush(t0.bit() | t1.bit() | t2.bit() | t3.bit()); + + // Setup frame pointer for the frame to be pushed. + __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset); + + // Load argv in s0 register. + __ lw(s0, MemOperand(sp, (kNumCalleeSaved + 1) * kPointerSize + + StandardFrameConstants::kCArgsSlotsSize)); + + // Registers: // a0: entry_address // a1: function // a2: reveiver_pointer // a3: argc + // s0: argv // - // * Stack: - // --------------------------- - // args - // --------------------------- - // 4 args slots - // --------------------------- + // Stack: + // caller fp | + // function slot | entry frame + // context slot | + // bad fp (0xff...f) | // callee saved registers + ra - // --------------------------- + // 4 args slots + // args + + // Call a faked try-block that does the invoke. + __ bal(&invoke); + __ nop(); // Branch delay slot nop. + + // Caught exception: Store result (exception) in the pending + // exception field in the JSEnv and return a failure sentinel. + // Coming in here the fp will be invalid because the PushTryHandler below + // sets it to 0 to signal the existence of the JSEntry frame. + __ LoadExternalReference(t0, + ExternalReference(Top::k_pending_exception_address)); + __ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0. + __ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception()))); + __ b(&exit); + __ nop(); // Branch delay slot nop. + + // Invoke: Link this frame into the handler chain. + __ bind(&invoke); + __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER); + // If an exception not caught by another handler occurs, this handler + // returns control to the code after the bal(&invoke) above, which + // restores all kCalleeSaved registers (including cp and fp) to their + // saved values before returning a failure to C. + + // Clear any pending exceptions. + __ LoadExternalReference(t0, ExternalReference::the_hole_value_location()); + __ lw(t1, MemOperand(t0)); + __ LoadExternalReference(t0, + ExternalReference(Top::k_pending_exception_address)); + __ sw(t1, MemOperand(t0)); + + // Invoke the function by calling through JS entry trampoline builtin. + // Notice that we cannot store a reference to the trampoline code directly in + // this stub, because runtime stubs are not traversed when doing GC. + + // Registers: + // a0: entry_address + // a1: function + // a2: reveiver_pointer + // a3: argc + // s0: argv // - // *************************** + // Stack: + // handler frame + // entry frame + // callee saved registers + ra + // 4 args slots + // args - __ break_(0x1234); + if (is_construct) { + ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline); + __ LoadExternalReference(t0, construct_entry); + } else { + ExternalReference entry(Builtins::JSEntryTrampoline); + __ LoadExternalReference(t0, entry); + } + __ lw(t9, MemOperand(t0)); // deref address + + // Call JSEntryTrampoline. + __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag); + __ CallBuiltin(t9); + + // Unlink this frame from the handler chain. When reading the + // address of the next handler, there is no need to use the address + // displacement since the current stack pointer (sp) points directly + // to the stack handler. + __ lw(t1, MemOperand(sp, StackHandlerConstants::kNextOffset)); + __ LoadExternalReference(t0, ExternalReference(Top::k_handler_address)); + __ sw(t1, MemOperand(t0)); + + // This restores sp to its position before PushTryHandler. + __ addiu(sp, sp, StackHandlerConstants::kSize); + + __ bind(&exit); // v0 holds result + // Restore the top frame descriptors from the stack. + __ Pop(t1); + __ LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address)); + __ sw(t1, MemOperand(t0)); + + // Reset the stack to the callee saved registers. + __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset); // Restore callee saved registers from the stack. - __ MultiPop(kCalleeSaved | ra.bit()); - - // Load a result. - __ li(v0, Operand(0x1234)); - __ jr(ra); - // Return - __ nop(); + __ MultiPop((kCalleeSaved | ra.bit()) & ~sp.bit()); + // Return. + __ Jump(ra); } // This stub performs an instanceof, calling the builtin function if -// necessary. Uses a1 for the object, a0 for the function that it may +// necessary. Uses a1 for the object, a0 for the function that it may // be an instance of (these are fetched from the stack). void InstanceofStub::Generate(MacroAssembler* masm) { UNIMPLEMENTED_MIPS(); diff --git a/deps/v8/src/mips/codegen-mips.h b/deps/v8/src/mips/codegen-mips.h index 3f78fcd5f4..44a4a62f29 100644 --- a/deps/v8/src/mips/codegen-mips.h +++ b/deps/v8/src/mips/codegen-mips.h @@ -42,7 +42,77 @@ enum InitState { CONST_INIT, NOT_CONST_INIT }; enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF }; -// ------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- +// Reference support + +// A reference is a C++ stack-allocated object that keeps an ECMA +// reference on the execution stack while in scope. For variables +// the reference is empty, indicating that it isn't necessary to +// store state on the stack for keeping track of references to those. +// For properties, we keep either one (named) or two (indexed) values +// on the execution stack to represent the reference. +class Reference BASE_EMBEDDED { + public: + // The values of the types is important, see size(). + enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 }; + Reference(CodeGenerator* cgen, + Expression* expression, + bool persist_after_get = false); + ~Reference(); + + Expression* expression() const { return expression_; } + Type type() const { return type_; } + void set_type(Type value) { + ASSERT_EQ(ILLEGAL, type_); + type_ = value; + } + + void set_unloaded() { + ASSERT_NE(ILLEGAL, type_); + ASSERT_NE(UNLOADED, type_); + type_ = UNLOADED; + } + // The size the reference takes up on the stack. + int size() const { + return (type_ < SLOT) ? 0 : type_; + } + + bool is_illegal() const { return type_ == ILLEGAL; } + bool is_slot() const { return type_ == SLOT; } + bool is_property() const { return type_ == NAMED || type_ == KEYED; } + bool is_unloaded() const { return type_ == UNLOADED; } + + // Return the name. Only valid for named property references. + Handle<String> GetName(); + + // Generate code to push the value of the reference on top of the + // expression stack. The reference is expected to be already on top of + // the expression stack, and it is consumed by the call unless the + // reference is for a compound assignment. + // If the reference is not consumed, it is left in place under its value. + void GetValue(); + + // Generate code to pop a reference, push the value of the reference, + // and then spill the stack frame. + inline void GetValueAndSpill(); + + // Generate code to store the value on top of the expression stack in the + // reference. The reference is expected to be immediately below the value + // on the expression stack. The value is stored in the location specified + // by the reference, and is left on top of the stack, after the reference + // is popped from beneath it (unloaded). + void SetValue(InitState init_state); + + private: + CodeGenerator* cgen_; + Expression* expression_; + Type type_; + // Keep the reference on the stack after get, so it can be used by set later. + bool persist_after_get_; +}; + + +// ----------------------------------------------------------------------------- // Code generation state // The state is passed down the AST by the code generator (and back up, in @@ -89,7 +159,7 @@ class CodeGenState BASE_EMBEDDED { -// ------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // CodeGenerator class CodeGenerator: public AstVisitor { @@ -152,7 +222,7 @@ class CodeGenerator: public AstVisitor { // Number of instructions used for the JS return sequence. The constant is // used by the debugger to patch the JS return sequence. - static const int kJSReturnSequenceLength = 6; + static const int kJSReturnSequenceLength = 7; // If the name is an inline runtime function call return the number of // expected arguments. Otherwise return -1. @@ -186,9 +256,51 @@ class CodeGenerator: public AstVisitor { AST_NODE_LIST(DEF_VISIT) #undef DEF_VISIT + // Visit a statement and then spill the virtual frame if control flow can + // reach the end of the statement (ie, it does not exit via break, + // continue, return, or throw). This function is used temporarily while + // the code generator is being transformed. + inline void VisitAndSpill(Statement* statement); + + // Visit a list of statements and then spill the virtual frame if control + // flow can reach the end of the list. + inline void VisitStatementsAndSpill(ZoneList<Statement*>* statements); + // Main code generation function void Generate(CompilationInfo* info); + // The following are used by class Reference. + void LoadReference(Reference* ref); + void UnloadReference(Reference* ref); + + MemOperand ContextOperand(Register context, int index) const { + return MemOperand(context, Context::SlotOffset(index)); + } + + MemOperand SlotOperand(Slot* slot, Register tmp); + + // Expressions + MemOperand GlobalObject() const { + return ContextOperand(cp, Context::GLOBAL_INDEX); + } + + void LoadCondition(Expression* x, + JumpTarget* true_target, + JumpTarget* false_target, + bool force_cc); + void Load(Expression* x); + void LoadGlobal(); + + // Generate code to push the value of an expression on top of the frame + // and then spill the frame fully to memory. This function is used + // temporarily while the code generator is being transformed. + inline void LoadAndSpill(Expression* expression); + + // Read a value from a slot and leave it on top of the expression stack. + void LoadFromSlot(Slot* slot, TypeofState typeof_state); + // Store the value on top of the stack to a slot. + void StoreToSlot(Slot* slot, InitState init_state); + struct InlineRuntimeLUT { void (CodeGenerator::*method)(ZoneList<Expression*>*); const char* name; @@ -290,7 +402,6 @@ class CodeGenerator: public AstVisitor { CompilationInfo* info_; // Code generation state - Scope* scope_; VirtualFrame* frame_; RegisterAllocator* allocator_; Condition cc_reg_; diff --git a/deps/v8/src/mips/frames-mips.cc b/deps/v8/src/mips/frames-mips.cc index d2c717ce39..cdc880dcd3 100644 --- a/deps/v8/src/mips/frames-mips.cc +++ b/deps/v8/src/mips/frames-mips.cc @@ -91,8 +91,7 @@ Address ArgumentsAdaptorFrame::GetCallerStackPointer() const { Address InternalFrame::GetCallerStackPointer() const { - UNIMPLEMENTED_MIPS(); - return static_cast<Address>(NULL); // UNIMPLEMENTED RETURN + return fp() + StandardFrameConstants::kCallerSPOffset; } diff --git a/deps/v8/src/mips/frames-mips.h b/deps/v8/src/mips/frames-mips.h index ec1949d66e..06e9979c26 100644 --- a/deps/v8/src/mips/frames-mips.h +++ b/deps/v8/src/mips/frames-mips.h @@ -104,7 +104,7 @@ class ExitFrameConstants : public AllStatic { static const int kCallerPCOffset = +1 * kPointerSize; // FP-relative displacement of the caller's SP. - static const int kCallerSPDisplacement = +4 * kPointerSize; + static const int kCallerSPDisplacement = +3 * kPointerSize; }; diff --git a/deps/v8/src/mips/ic-mips.cc b/deps/v8/src/mips/ic-mips.cc index 605616626a..8c9092124c 100644 --- a/deps/v8/src/mips/ic-mips.cc +++ b/deps/v8/src/mips/ic-mips.cc @@ -74,6 +74,47 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) { void CallIC::GenerateMiss(MacroAssembler* masm, int argc) { UNIMPLEMENTED_MIPS(); + // Registers: + // a2: name + // ra: return address + + // Get the receiver of the function from the stack. + __ lw(a3, MemOperand(sp, argc*kPointerSize)); + + __ EnterInternalFrame(); + + // Push the receiver and the name of the function. + __ MultiPush(a2.bit() | a3.bit()); + + // Call the entry. + __ li(a0, Operand(2)); + __ li(a1, Operand(ExternalReference(IC_Utility(kCallIC_Miss)))); + + CEntryStub stub(1); + __ CallStub(&stub); + + // Move result to r1 and leave the internal frame. + __ mov(a1, v0); + __ LeaveInternalFrame(); + + // Check if the receiver is a global object of some sort. + Label invoke, global; + __ lw(a2, MemOperand(sp, argc * kPointerSize)); + __ andi(t0, a2, kSmiTagMask); + __ Branch(eq, &invoke, t0, Operand(zero_reg)); + __ GetObjectType(a2, a3, a3); + __ Branch(eq, &global, a3, Operand(JS_GLOBAL_OBJECT_TYPE)); + __ Branch(ne, &invoke, a3, Operand(JS_BUILTINS_OBJECT_TYPE)); + + // Patch the receiver on the stack. + __ bind(&global); + __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset)); + __ sw(a2, MemOperand(sp, argc * kPointerSize)); + + // Invoke the function. + ParameterCount actual(argc); + __ bind(&invoke); + __ InvokeFunction(a1, actual, JUMP_FUNCTION); } // Defined in ic.cc. diff --git a/deps/v8/src/mips/jump-target-mips.cc b/deps/v8/src/mips/jump-target-mips.cc index e8398a849c..4bd91028a7 100644 --- a/deps/v8/src/mips/jump-target-mips.cc +++ b/deps/v8/src/mips/jump-target-mips.cc @@ -42,7 +42,37 @@ namespace internal { #define __ ACCESS_MASM(cgen()->masm()) void JumpTarget::DoJump() { - UNIMPLEMENTED_MIPS(); + ASSERT(cgen()->has_valid_frame()); + // Live non-frame registers are not allowed at unconditional jumps + // because we have no way of invalidating the corresponding results + // which are still live in the C++ code. + ASSERT(cgen()->HasValidEntryRegisters()); + + if (is_bound()) { + // Backward jump. There already a frame expectation at the target. + ASSERT(direction_ == BIDIRECTIONAL); + cgen()->frame()->MergeTo(entry_frame_); + cgen()->DeleteFrame(); + } else { + // Use the current frame as the expected one at the target if necessary. + if (entry_frame_ == NULL) { + entry_frame_ = cgen()->frame(); + RegisterFile empty; + cgen()->SetFrame(NULL, &empty); + } else { + cgen()->frame()->MergeTo(entry_frame_); + cgen()->DeleteFrame(); + } + + // The predicate is_linked() should be made true. Its implementation + // detects the presence of a frame pointer in the reaching_frames_ list. + if (!is_linked()) { + reaching_frames_.Add(NULL); + ASSERT(is_linked()); + } + } + __ b(&entry_label_); + __ nop(); // Branch delay slot nop. } @@ -57,12 +87,47 @@ void JumpTarget::Call() { void JumpTarget::DoBind() { - UNIMPLEMENTED_MIPS(); + ASSERT(!is_bound()); + + // Live non-frame registers are not allowed at the start of a basic + // block. + ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters()); + + if (cgen()->has_valid_frame()) { + // If there is a current frame we can use it on the fall through. + if (entry_frame_ == NULL) { + entry_frame_ = new VirtualFrame(cgen()->frame()); + } else { + ASSERT(cgen()->frame()->Equals(entry_frame_)); + } + } else { + // If there is no current frame we must have an entry frame which we can + // copy. + ASSERT(entry_frame_ != NULL); + RegisterFile empty; + cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty); + } + + // The predicate is_linked() should be made false. Its implementation + // detects the presence (or absence) of frame pointers in the + // reaching_frames_ list. If we inserted a bogus frame to make + // is_linked() true, remove it now. + if (is_linked()) { + reaching_frames_.Clear(); + } + + __ bind(&entry_label_); } void BreakTarget::Jump() { - UNIMPLEMENTED_MIPS(); + // On ARM we do not currently emit merge code for jumps, so we need to do + // it explicitly here. The only merging necessary is to drop extra + // statement state from the stack. + ASSERT(cgen()->has_valid_frame()); + int count = cgen()->frame()->height() - expected_height_; + cgen()->frame()->Drop(count); + DoJump(); } @@ -72,7 +137,26 @@ void BreakTarget::Jump(Result* arg) { void BreakTarget::Bind() { - UNIMPLEMENTED_MIPS(); +#ifdef DEBUG + // All the forward-reaching frames should have been adjusted at the + // jumps to this target. + for (int i = 0; i < reaching_frames_.length(); i++) { + ASSERT(reaching_frames_[i] == NULL || + reaching_frames_[i]->height() == expected_height_); + } +#endif + // Drop leftover statement state from the frame before merging, even + // on the fall through. This is so we can bind the return target + // with state on the frame. + if (cgen()->has_valid_frame()) { + int count = cgen()->frame()->height() - expected_height_; + // On ARM we do not currently emit merge code at binding sites, so we need + // to do it explicitly here. The only merging necessary is to drop extra + // statement state from the stack. + cgen()->frame()->Drop(count); + } + + DoBind(); } diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc index e49858b1d8..c276af5106 100644 --- a/deps/v8/src/mips/macro-assembler-mips.cc +++ b/deps/v8/src/mips/macro-assembler-mips.cc @@ -55,7 +55,7 @@ void MacroAssembler::Jump(Register target, Condition cond, void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond, Register r1, const Operand& r2) { - Jump(Operand(target), cond, r1, r2); + Jump(Operand(target, rmode), cond, r1, r2); } @@ -81,7 +81,7 @@ void MacroAssembler::Call(Register target, void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode, Condition cond, Register r1, const Operand& r2) { - Call(Operand(target), cond, r1, r2); + Call(Operand(target, rmode), cond, r1, r2); } @@ -106,7 +106,7 @@ void MacroAssembler::Ret(Condition cond, Register r1, const Operand& r2) { void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) { - lw(destination, MemOperand(s4, index << kPointerSizeLog2)); + lw(destination, MemOperand(s6, index << kPointerSizeLog2)); } void MacroAssembler::LoadRoot(Register destination, @@ -114,8 +114,7 @@ void MacroAssembler::LoadRoot(Register destination, Condition cond, Register src1, const Operand& src2) { Branch(NegateCondition(cond), 2, src1, src2); - nop(); - lw(destination, MemOperand(s4, index << kPointerSizeLog2)); + lw(destination, MemOperand(s6, index << kPointerSizeLog2)); } @@ -320,7 +319,6 @@ void MacroAssembler::movn(Register rd, Register rt) { } -// load wartd in a register void MacroAssembler::li(Register rd, Operand j, bool gen2instr) { ASSERT(!j.is_reg()); @@ -372,7 +370,7 @@ void MacroAssembler::MultiPush(RegList regs) { int16_t NumToPush = NumberOfBitsSet(regs); addiu(sp, sp, -4 * NumToPush); - for (int16_t i = 0; i < kNumRegisters; i++) { + for (int16_t i = kNumRegisters; i > 0; i--) { if ((regs & (1 << i)) != 0) { sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved))); } @@ -385,7 +383,7 @@ void MacroAssembler::MultiPushReversed(RegList regs) { int16_t NumToPush = NumberOfBitsSet(regs); addiu(sp, sp, -4 * NumToPush); - for (int16_t i = kNumRegisters; i > 0; i--) { + for (int16_t i = 0; i < kNumRegisters; i++) { if ((regs & (1 << i)) != 0) { sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved))); } @@ -396,7 +394,7 @@ void MacroAssembler::MultiPushReversed(RegList regs) { void MacroAssembler::MultiPop(RegList regs) { int16_t NumSaved = 0; - for (int16_t i = kNumRegisters; i > 0; i--) { + for (int16_t i = 0; i < kNumRegisters; i++) { if ((regs & (1 << i)) != 0) { lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++))); } @@ -408,7 +406,7 @@ void MacroAssembler::MultiPop(RegList regs) { void MacroAssembler::MultiPopReversed(RegList regs) { int16_t NumSaved = 0; - for (int16_t i = 0; i < kNumRegisters; i++) { + for (int16_t i = kNumRegisters; i > 0; i--) { if ((regs & (1 << i)) != 0) { lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++))); } @@ -484,6 +482,8 @@ void MacroAssembler::Branch(Condition cond, int16_t offset, Register rs, default: UNREACHABLE(); } + // Emit a nop in the branch delay slot. + nop(); } @@ -550,6 +550,8 @@ void MacroAssembler::Branch(Condition cond, Label* L, Register rs, default: UNREACHABLE(); } + // Emit a nop in the branch delay slot. + nop(); } @@ -629,6 +631,8 @@ void MacroAssembler::BranchAndLink(Condition cond, int16_t offset, Register rs, default: UNREACHABLE(); } + // Emit a nop in the branch delay slot. + nop(); } @@ -704,6 +708,8 @@ void MacroAssembler::BranchAndLink(Condition cond, Label* L, Register rs, default: UNREACHABLE(); } + // Emit a nop in the branch delay slot. + nop(); } @@ -714,7 +720,6 @@ void MacroAssembler::Jump(const Operand& target, jr(target.rm()); } else { Branch(NegateCondition(cond), 2, rs, rt); - nop(); jr(target.rm()); } } else { // !target.is_reg() @@ -723,20 +728,20 @@ void MacroAssembler::Jump(const Operand& target, j(target.imm32_); } else { Branch(NegateCondition(cond), 2, rs, rt); - nop(); - j(target.imm32_); // will generate only one instruction. + j(target.imm32_); // Will generate only one instruction. } } else { // MustUseAt(target) - li(at, rt); + li(at, target); if (cond == cc_always) { jr(at); } else { Branch(NegateCondition(cond), 2, rs, rt); - nop(); - jr(at); // will generate only one instruction. + jr(at); // Will generate only one instruction. } } } + // Emit a nop in the branch delay slot. + nop(); } @@ -747,7 +752,6 @@ void MacroAssembler::Call(const Operand& target, jalr(target.rm()); } else { Branch(NegateCondition(cond), 2, rs, rt); - nop(); jalr(target.rm()); } } else { // !target.is_reg() @@ -756,20 +760,20 @@ void MacroAssembler::Call(const Operand& target, jal(target.imm32_); } else { Branch(NegateCondition(cond), 2, rs, rt); - nop(); - jal(target.imm32_); // will generate only one instruction. + jal(target.imm32_); // Will generate only one instruction. } } else { // MustUseAt(target) - li(at, rt); + li(at, target); if (cond == cc_always) { jalr(at); } else { Branch(NegateCondition(cond), 2, rs, rt); - nop(); - jalr(at); // will generate only one instruction. + jalr(at); // Will generate only one instruction. } } } + // Emit a nop in the branch delay slot. + nop(); } void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) { @@ -802,7 +806,58 @@ void MacroAssembler::Call(Label* target) { void MacroAssembler::PushTryHandler(CodeLocation try_location, HandlerType type) { - UNIMPLEMENTED_MIPS(); + // Adjust this code if not the case. + ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); + // The return address is passed in register ra. + if (try_location == IN_JAVASCRIPT) { + if (type == TRY_CATCH_HANDLER) { + li(t0, Operand(StackHandler::TRY_CATCH)); + } else { + li(t0, Operand(StackHandler::TRY_FINALLY)); + } + ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize + && StackHandlerConstants::kFPOffset == 2 * kPointerSize + && StackHandlerConstants::kPCOffset == 3 * kPointerSize + && StackHandlerConstants::kNextOffset == 0 * kPointerSize); + // Save the current handler as the next handler. + LoadExternalReference(t2, ExternalReference(Top::k_handler_address)); + lw(t1, MemOperand(t2)); + + addiu(sp, sp, -StackHandlerConstants::kSize); + sw(ra, MemOperand(sp, 12)); + sw(fp, MemOperand(sp, 8)); + sw(t0, MemOperand(sp, 4)); + sw(t1, MemOperand(sp, 0)); + + // Link this handler as the new current one. + sw(sp, MemOperand(t2)); + + } else { + // Must preserve a0-a3, and s0 (argv). + ASSERT(try_location == IN_JS_ENTRY); + ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize + && StackHandlerConstants::kFPOffset == 2 * kPointerSize + && StackHandlerConstants::kPCOffset == 3 * kPointerSize + && StackHandlerConstants::kNextOffset == 0 * kPointerSize); + + // The frame pointer does not point to a JS frame so we save NULL + // for fp. We expect the code throwing an exception to check fp + // before dereferencing it to restore the context. + li(t0, Operand(StackHandler::ENTRY)); + + // Save the current handler as the next handler. + LoadExternalReference(t2, ExternalReference(Top::k_handler_address)); + lw(t1, MemOperand(t2)); + + addiu(sp, sp, -StackHandlerConstants::kSize); + sw(ra, MemOperand(sp, 12)); + sw(zero_reg, MemOperand(sp, 8)); + sw(t0, MemOperand(sp, 4)); + sw(t1, MemOperand(sp, 0)); + + // Link this handler as the new current one. + sw(sp, MemOperand(t2)); + } } @@ -812,12 +867,233 @@ void MacroAssembler::PopTryHandler() { -// --------------------------------------------------------------------------- +// ----------------------------------------------------------------------------- // Activation frames +void MacroAssembler::SetupAlignedCall(Register scratch, int arg_count) { + Label extra_push, end; + + andi(scratch, sp, 7); + + // We check for args and receiver size on the stack, all of them word sized. + // We add one for sp, that we also want to store on the stack. + if (((arg_count + 1) % kPointerSizeLog2) == 0) { + Branch(ne, &extra_push, at, Operand(zero_reg)); + } else { // ((arg_count + 1) % 2) == 1 + Branch(eq, &extra_push, at, Operand(zero_reg)); + } + + // Save sp on the stack. + mov(scratch, sp); + Push(scratch); + b(&end); + + // Align before saving sp on the stack. + bind(&extra_push); + mov(scratch, sp); + addiu(sp, sp, -8); + sw(scratch, MemOperand(sp)); + + // The stack is aligned and sp is stored on the top. + bind(&end); +} + + +void MacroAssembler::ReturnFromAlignedCall() { + lw(sp, MemOperand(sp)); +} + + +// ----------------------------------------------------------------------------- +// JavaScript invokes + +void MacroAssembler::InvokePrologue(const ParameterCount& expected, + const ParameterCount& actual, + Handle<Code> code_constant, + Register code_reg, + Label* done, + InvokeFlag flag) { + bool definitely_matches = false; + Label regular_invoke; + + // Check whether the expected and actual arguments count match. If not, + // setup registers according to contract with ArgumentsAdaptorTrampoline: + // a0: actual arguments count + // a1: function (passed through to callee) + // a2: expected arguments count + // a3: callee code entry + + // The code below is made a lot easier because the calling code already sets + // up actual and expected registers according to the contract if values are + // passed in registers. + ASSERT(actual.is_immediate() || actual.reg().is(a0)); + ASSERT(expected.is_immediate() || expected.reg().is(a2)); + ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3)); + + if (expected.is_immediate()) { + ASSERT(actual.is_immediate()); + if (expected.immediate() == actual.immediate()) { + definitely_matches = true; + } else { + li(a0, Operand(actual.immediate())); + const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel; + if (expected.immediate() == sentinel) { + // Don't worry about adapting arguments for builtins that + // don't want that done. Skip adaption code by making it look + // like we have a match between expected and actual number of + // arguments. + definitely_matches = true; + } else { + li(a2, Operand(expected.immediate())); + } + } + } else if (actual.is_immediate()) { + Branch(eq, ®ular_invoke, expected.reg(), Operand(actual.immediate())); + li(a0, Operand(actual.immediate())); + } else { + Branch(eq, ®ular_invoke, expected.reg(), Operand(actual.reg())); + } + + if (!definitely_matches) { + if (!code_constant.is_null()) { + li(a3, Operand(code_constant)); + addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag); + } + + ExternalReference adaptor(Builtins::ArgumentsAdaptorTrampoline); + if (flag == CALL_FUNCTION) { + CallBuiltin(adaptor); + b(done); + nop(); + } else { + JumpToBuiltin(adaptor); + } + bind(®ular_invoke); + } +} + +void MacroAssembler::InvokeCode(Register code, + const ParameterCount& expected, + const ParameterCount& actual, + InvokeFlag flag) { + Label done; + + InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag); + if (flag == CALL_FUNCTION) { + Call(code); + } else { + ASSERT(flag == JUMP_FUNCTION); + Jump(code); + } + // Continue here if InvokePrologue does handle the invocation due to + // mismatched parameter counts. + bind(&done); +} + + +void MacroAssembler::InvokeCode(Handle<Code> code, + const ParameterCount& expected, + const ParameterCount& actual, + RelocInfo::Mode rmode, + InvokeFlag flag) { + Label done; + + InvokePrologue(expected, actual, code, no_reg, &done, flag); + if (flag == CALL_FUNCTION) { + Call(code, rmode); + } else { + Jump(code, rmode); + } + // Continue here if InvokePrologue does handle the invocation due to + // mismatched parameter counts. + bind(&done); +} + + +void MacroAssembler::InvokeFunction(Register function, + const ParameterCount& actual, + InvokeFlag flag) { + // Contract with called JS functions requires that function is passed in a1. + ASSERT(function.is(a1)); + Register expected_reg = a2; + Register code_reg = a3; + + lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); + lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); + lw(expected_reg, + FieldMemOperand(code_reg, + SharedFunctionInfo::kFormalParameterCountOffset)); + lw(code_reg, + MemOperand(code_reg, SharedFunctionInfo::kCodeOffset - kHeapObjectTag)); + addiu(code_reg, code_reg, Code::kHeaderSize - kHeapObjectTag); + + ParameterCount expected(expected_reg); + InvokeCode(code_reg, expected, actual, flag); +} + + +// --------------------------------------------------------------------------- +// Support functions. + + void MacroAssembler::GetObjectType(Register function, + Register map, + Register type_reg) { + lw(map, FieldMemOperand(function, HeapObject::kMapOffset)); + lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); + } + + + void MacroAssembler::CallBuiltin(ExternalReference builtin_entry) { + // Load builtin address. + LoadExternalReference(t9, builtin_entry); + lw(t9, MemOperand(t9)); // Deref address. + addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag); + // Call and allocate arguments slots. + jalr(t9); + // Use the branch delay slot to allocated argument slots. + addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize); + addiu(sp, sp, StandardFrameConstants::kRArgsSlotsSize); + } + + + void MacroAssembler::CallBuiltin(Register target) { + // Target already holds target address. + // Call and allocate arguments slots. + jalr(target); + // Use the branch delay slot to allocated argument slots. + addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize); + addiu(sp, sp, StandardFrameConstants::kRArgsSlotsSize); + } + + + void MacroAssembler::JumpToBuiltin(ExternalReference builtin_entry) { + // Load builtin address. + LoadExternalReference(t9, builtin_entry); + lw(t9, MemOperand(t9)); // Deref address. + addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag); + // Call and allocate arguments slots. + jr(t9); + // Use the branch delay slot to allocated argument slots. + addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize); + } + + + void MacroAssembler::JumpToBuiltin(Register target) { + // t9 already holds target address. + // Call and allocate arguments slots. + jr(t9); + // Use the branch delay slot to allocated argument slots. + addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize); + } + + +// ----------------------------------------------------------------------------- +// Runtime calls + void MacroAssembler::CallStub(CodeStub* stub, Condition cond, Register r1, const Operand& r2) { - UNIMPLEMENTED_MIPS(); + ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs. + Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2); } @@ -826,13 +1102,38 @@ void MacroAssembler::StubReturn(int argc) { } +void MacroAssembler::IllegalOperation(int num_arguments) { + if (num_arguments > 0) { + addiu(sp, sp, num_arguments * kPointerSize); + } + LoadRoot(v0, Heap::kUndefinedValueRootIndex); +} + + void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) { - UNIMPLEMENTED_MIPS(); + // All parameters are on the stack. v0 has the return value after call. + + // If the expected number of arguments of the runtime function is + // constant, we check that the actual number of arguments match the + // expectation. + if (f->nargs >= 0 && f->nargs != num_arguments) { + IllegalOperation(num_arguments); + return; + } + + // TODO(1236192): Most runtime routines don't need the number of + // arguments passed in because it is constant. At some point we + // should remove this need and make the runtime routine entry code + // smarter. + li(a0, num_arguments); + LoadExternalReference(a1, ExternalReference(f)); + CEntryStub stub(1); + CallStub(&stub); } void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) { - UNIMPLEMENTED_MIPS(); + CallRuntime(Runtime::FunctionForId(fid), num_arguments); } @@ -891,6 +1192,8 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, } +// ----------------------------------------------------------------------------- +// Debugging void MacroAssembler::Assert(Condition cc, const char* msg, Register rs, Operand rt) { @@ -908,5 +1211,113 @@ void MacroAssembler::Abort(const char* msg) { UNIMPLEMENTED_MIPS(); } + +void MacroAssembler::EnterFrame(StackFrame::Type type) { + addiu(sp, sp, -5 * kPointerSize); + li(t0, Operand(Smi::FromInt(type))); + li(t1, Operand(CodeObject())); + sw(ra, MemOperand(sp, 4 * kPointerSize)); + sw(fp, MemOperand(sp, 3 * kPointerSize)); + sw(cp, MemOperand(sp, 2 * kPointerSize)); + sw(t0, MemOperand(sp, 1 * kPointerSize)); + sw(t1, MemOperand(sp, 0 * kPointerSize)); + addiu(fp, sp, 3 * kPointerSize); +} + + +void MacroAssembler::LeaveFrame(StackFrame::Type type) { + mov(sp, fp); + lw(fp, MemOperand(sp, 0 * kPointerSize)); + lw(ra, MemOperand(sp, 1 * kPointerSize)); + addiu(sp, sp, 2 * kPointerSize); +} + + +void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode, + Register hold_argc, + Register hold_argv, + Register hold_function) { + // Compute the argv pointer and keep it in a callee-saved register. + // a0 is argc. + sll(t0, a0, kPointerSizeLog2); + add(hold_argv, sp, t0); + addi(hold_argv, hold_argv, -kPointerSize); + + // Compute callee's stack pointer before making changes and save it as + // t1 register so that it is restored as sp register on exit, thereby + // popping the args. + // t1 = sp + kPointerSize * #args + add(t1, sp, t0); + + // Align the stack at this point. + AlignStack(0); + + // Save registers. + addiu(sp, sp, -12); + sw(t1, MemOperand(sp, 8)); + sw(ra, MemOperand(sp, 4)); + sw(fp, MemOperand(sp, 0)); + mov(fp, sp); // Setup new frame pointer. + + // Push debug marker. + if (mode == ExitFrame::MODE_DEBUG) { + Push(zero_reg); + } else { + li(t0, Operand(CodeObject())); + Push(t0); + } + + // Save the frame pointer and the context in top. + LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address)); + sw(fp, MemOperand(t0)); + LoadExternalReference(t0, ExternalReference(Top::k_context_address)); + sw(cp, MemOperand(t0)); + + // Setup argc and the builtin function in callee-saved registers. + mov(hold_argc, a0); + mov(hold_function, a1); +} + + +void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) { + // Clear top frame. + LoadExternalReference(t0, ExternalReference(Top::k_c_entry_fp_address)); + sw(zero_reg, MemOperand(t0)); + + // Restore current context from top and clear it in debug mode. + LoadExternalReference(t0, ExternalReference(Top::k_context_address)); + lw(cp, MemOperand(t0)); +#ifdef DEBUG + sw(a3, MemOperand(t0)); +#endif + + // Pop the arguments, restore registers, and return. + mov(sp, fp); // Respect ABI stack constraint. + lw(fp, MemOperand(sp, 0)); + lw(ra, MemOperand(sp, 4)); + lw(sp, MemOperand(sp, 8)); + jr(ra); + nop(); // Branch delay slot nop. +} + + +void MacroAssembler::AlignStack(int offset) { + // On MIPS an offset of 0 aligns to 0 modulo 8 bytes, + // and an offset of 1 aligns to 4 modulo 8 bytes. + int activation_frame_alignment = OS::ActivationFrameAlignment(); + if (activation_frame_alignment != kPointerSize) { + // This code needs to be made more general if this assert doesn't hold. + ASSERT(activation_frame_alignment == 2 * kPointerSize); + if (offset == 0) { + andi(t0, sp, activation_frame_alignment - 1); + Push(zero_reg, eq, t0, zero_reg); + } else { + andi(t0, sp, activation_frame_alignment - 1); + addiu(t0, t0, -4); + Push(zero_reg, eq, t0, zero_reg); + } + } +} + } } // namespace v8::internal diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h index b34488cef3..0f0365b7c6 100644 --- a/deps/v8/src/mips/macro-assembler-mips.h +++ b/deps/v8/src/mips/macro-assembler-mips.h @@ -41,6 +41,7 @@ class JumpTarget; // unless we know exactly what we do. // Registers aliases +// cp is assumed to be a callee saved register. const Register cp = s7; // JavaScript context pointer const Register fp = s8_fp; // Alias fp @@ -102,10 +103,10 @@ class MacroAssembler: public Assembler { // Jump unconditionally to given label. // We NEED a nop in the branch delay slot, as it used by v8, for example in // CodeGenerator::ProcessDeferred(). + // Currently the branch delay slot is filled by the MacroAssembler. // Use rather b(Label) for code generation. void jmp(Label* L) { Branch(cc_always, L); - nop(); } // Load an object from the root table. @@ -115,6 +116,11 @@ class MacroAssembler: public Assembler { Heap::RootListIndex index, Condition cond, Register src1, const Operand& src2); + // Load an external reference. + void LoadExternalReference(Register reg, ExternalReference ext) { + li(reg, Operand(ext)); + } + // Sets the remembered set bit for [address+offset]. void RecordWrite(Register object, Register offset, Register scratch); @@ -191,7 +197,6 @@ class MacroAssembler: public Assembler { void Push(Register src, Condition cond, Register tst1, Register tst2) { // Since we don't have conditionnal execution we use a Branch. Branch(cond, 3, tst1, Operand(tst2)); - nop(); Addu(sp, sp, Operand(-kPointerSize)); sw(src, MemOperand(sp, 0)); } @@ -209,6 +214,53 @@ class MacroAssembler: public Assembler { } + // --------------------------------------------------------------------------- + // Activation frames + + void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); } + void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); } + + // Enter specific kind of exit frame; either EXIT or + // EXIT_DEBUG. Expects the number of arguments in register a0 and + // the builtin function to call in register a1. + // On output hold_argc, hold_function, and hold_argv are setup. + void EnterExitFrame(ExitFrame::Mode mode, + Register hold_argc, + Register hold_argv, + Register hold_function); + + // Leave the current exit frame. Expects the return value in v0. + void LeaveExitFrame(ExitFrame::Mode mode); + + // Align the stack by optionally pushing a Smi zero. + void AlignStack(int offset); + + void SetupAlignedCall(Register scratch, int arg_count = 0); + void ReturnFromAlignedCall(); + + + // --------------------------------------------------------------------------- + // JavaScript invokes + + // Invoke the JavaScript function code by either calling or jumping. + void InvokeCode(Register code, + const ParameterCount& expected, + const ParameterCount& actual, + InvokeFlag flag); + + void InvokeCode(Handle<Code> code, + const ParameterCount& expected, + const ParameterCount& actual, + RelocInfo::Mode rmode, + InvokeFlag flag); + + // Invoke the JavaScript function in the given register. Changes the + // current context to the context in the function before invoking. + void InvokeFunction(Register function, + const ParameterCount& actual, + InvokeFlag flag); + + #ifdef ENABLE_DEBUGGER_SUPPORT // --------------------------------------------------------------------------- // Debugger Support @@ -227,8 +279,7 @@ class MacroAssembler: public Assembler { // Exception handling // Push a new try handler and link into try handler chain. - // The return address must be passed in register lr. - // On exit, r0 contains TOS (code slot). + // The return address must be passed in register ra. void PushTryHandler(CodeLocation try_location, HandlerType type); // Unlink the stack handler on top of the stack from the try handler chain. @@ -239,6 +290,10 @@ class MacroAssembler: public Assembler { // --------------------------------------------------------------------------- // Support functions. + void GetObjectType(Register function, + Register map, + Register type_reg); + inline void BranchOnSmi(Register value, Label* smi_label, Register scratch = at) { ASSERT_EQ(0, kSmiTag); @@ -254,6 +309,15 @@ class MacroAssembler: public Assembler { Branch(ne, not_smi_label, scratch, Operand(zero_reg)); } + void CallBuiltin(ExternalReference builtin_entry); + void CallBuiltin(Register target); + void JumpToBuiltin(ExternalReference builtin_entry); + void JumpToBuiltin(Register target); + + // Generates code for reporting that an illegal operation has + // occurred. + void IllegalOperation(int num_arguments); + // --------------------------------------------------------------------------- // Runtime calls @@ -342,20 +406,33 @@ class MacroAssembler: public Assembler { bool allow_stub_calls() { return allow_stub_calls_; } private: + List<Unresolved> unresolved_; + bool generating_stub_; + bool allow_stub_calls_; + // This handle will be patched with the code object on installation. + Handle<Object> code_object_; + void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always, Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg)); void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always, Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg)); + // Helper functions for generating invokes. + void InvokePrologue(const ParameterCount& expected, + const ParameterCount& actual, + Handle<Code> code_constant, + Register code_reg, + Label* done, + InvokeFlag flag); + // Get the code for the given builtin. Returns if able to resolve // the function in the 'resolved' flag. Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved); - List<Unresolved> unresolved_; - bool generating_stub_; - bool allow_stub_calls_; - // This handle will be patched with the code object on installation. - Handle<Object> code_object_; + // Activation support. + // EnterFrame clobbers t0 and t1. + void EnterFrame(StackFrame::Type type); + void LeaveFrame(StackFrame::Type type); }; diff --git a/deps/v8/src/mips/stub-cache-mips.cc b/deps/v8/src/mips/stub-cache-mips.cc index 669fdaa3ce..0b2d2c3333 100644 --- a/deps/v8/src/mips/stub-cache-mips.cc +++ b/deps/v8/src/mips/stub-cache-mips.cc @@ -160,8 +160,31 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object, Object* StubCompiler::CompileLazyCompile(Code::Flags flags) { - UNIMPLEMENTED_MIPS(); - return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN + // Registers: + // a1: function + // ra: return address + + // Enter an internal frame. + __ EnterInternalFrame(); + // Preserve the function. + __ Push(a1); + // Setup aligned call. + __ SetupAlignedCall(t0, 1); + // Push the function on the stack as the argument to the runtime function. + __ Push(a1); + // Call the runtime function + __ CallRuntime(Runtime::kLazyCompile, 1); + __ ReturnFromAlignedCall(); + // Calculate the entry point. + __ addiu(t9, v0, Code::kHeaderSize - kHeapObjectTag); + // Restore saved function. + __ Pop(a1); + // Tear down temporary frame. + __ LeaveInternalFrame(); + // Do a tail-call of the compiled function. + __ Jump(t9); + + return GetCodeWithFlags(flags, "LazyCompileStub"); } @@ -174,6 +197,26 @@ Object* CallStubCompiler::CompileCallField(JSObject* object, } +Object* CallStubCompiler::CompileArrayPushCall(Object* object, + JSObject* holder, + JSFunction* function, + String* name, + CheckType check) { + UNIMPLEMENTED_MIPS(); + return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN +} + + +Object* CallStubCompiler::CompileArrayPopCall(Object* object, + JSObject* holder, + JSFunction* function, + String* name, + CheckType check) { + UNIMPLEMENTED_MIPS(); + return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN +} + + Object* CallStubCompiler::CompileCallConstant(Object* object, JSObject* holder, JSFunction* function, diff --git a/deps/v8/src/mips/virtual-frame-mips.cc b/deps/v8/src/mips/virtual-frame-mips.cc index e89882f2ae..c2116de77a 100644 --- a/deps/v8/src/mips/virtual-frame-mips.cc +++ b/deps/v8/src/mips/virtual-frame-mips.cc @@ -53,7 +53,12 @@ void VirtualFrame::SyncElementByPushing(int index) { void VirtualFrame::SyncRange(int begin, int end) { - UNIMPLEMENTED_MIPS(); + // All elements are in memory on MIPS (ie, synced). +#ifdef DEBUG + for (int i = begin; i <= end; i++) { + ASSERT(elements_[i].is_synced()); + } +#endif } @@ -63,7 +68,13 @@ void VirtualFrame::MergeTo(VirtualFrame* expected) { void VirtualFrame::Enter() { - UNIMPLEMENTED_MIPS(); + // TODO(MIPS): Implement DEBUG + + // We are about to push four values to the frame. + Adjust(4); + __ MultiPush(ra.bit() | fp.bit() | cp.bit() | a1.bit()); + // Adjust FP to point to saved FP. + __ addiu(fp, sp, 2 * kPointerSize); } @@ -73,7 +84,17 @@ void VirtualFrame::Exit() { void VirtualFrame::AllocateStackSlots() { - UNIMPLEMENTED_MIPS(); + int count = local_count(); + if (count > 0) { + Comment cmnt(masm(), "[ Allocate space for locals"); + Adjust(count); + // Initialize stack slots with 'undefined' value. + __ LoadRoot(t0, Heap::kUndefinedValueRootIndex); + __ addiu(sp, sp, -count * kPointerSize); + for (int i = 0; i < count; i++) { + __ sw(t0, MemOperand(sp, (count-i-1)*kPointerSize)); + } + } } @@ -128,12 +149,16 @@ void VirtualFrame::CallStub(CodeStub* stub, Result* arg0, Result* arg1) { void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) { - UNIMPLEMENTED_MIPS(); + PrepareForCall(arg_count, arg_count); + ASSERT(cgen()->HasValidEntryRegisters()); + __ CallRuntime(f, arg_count); } void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) { - UNIMPLEMENTED_MIPS(); + PrepareForCall(arg_count, arg_count); + ASSERT(cgen()->HasValidEntryRegisters()); + __ CallRuntime(id, arg_count); } @@ -155,16 +180,37 @@ void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id, } -void VirtualFrame::RawCallCodeObject(Handle<Code> code, - RelocInfo::Mode rmode) { - UNIMPLEMENTED_MIPS(); -} - - void VirtualFrame::CallCodeObject(Handle<Code> code, RelocInfo::Mode rmode, int dropped_args) { - UNIMPLEMENTED_MIPS(); + switch (code->kind()) { + case Code::CALL_IC: + break; + case Code::FUNCTION: + UNIMPLEMENTED_MIPS(); + break; + case Code::KEYED_LOAD_IC: + UNIMPLEMENTED_MIPS(); + break; + case Code::LOAD_IC: + UNIMPLEMENTED_MIPS(); + break; + case Code::KEYED_STORE_IC: + UNIMPLEMENTED_MIPS(); + break; + case Code::STORE_IC: + UNIMPLEMENTED_MIPS(); + break; + case Code::BUILTIN: + UNIMPLEMENTED_MIPS(); + break; + default: + UNREACHABLE(); + break; + } + Forget(dropped_args); + ASSERT(cgen()->HasValidEntryRegisters()); + __ Call(code, rmode); } @@ -187,7 +233,24 @@ void VirtualFrame::CallCodeObject(Handle<Code> code, void VirtualFrame::Drop(int count) { - UNIMPLEMENTED_MIPS(); + ASSERT(count >= 0); + ASSERT(height() >= count); + int num_virtual_elements = (element_count() - 1) - stack_pointer_; + + // Emit code to lower the stack pointer if necessary. + if (num_virtual_elements < count) { + int num_dropped = count - num_virtual_elements; + stack_pointer_ -= num_dropped; + __ addiu(sp, sp, num_dropped * kPointerSize); + } + + // Discard elements from the virtual frame and free any registers. + for (int i = 0; i < count; i++) { + FrameElement dropped = elements_.RemoveLast(); + if (dropped.is_register()) { + Unuse(dropped.reg()); + } + } } @@ -199,27 +262,50 @@ void VirtualFrame::DropFromVFrameOnly(int count) { Result VirtualFrame::Pop() { UNIMPLEMENTED_MIPS(); Result res = Result(); - return res; // UNIMPLEMENTED RETUR + return res; // UNIMPLEMENTED RETURN } void VirtualFrame::EmitPop(Register reg) { - UNIMPLEMENTED_MIPS(); + ASSERT(stack_pointer_ == element_count() - 1); + stack_pointer_--; + elements_.RemoveLast(); + __ Pop(reg); } + void VirtualFrame::EmitMultiPop(RegList regs) { - UNIMPLEMENTED_MIPS(); + ASSERT(stack_pointer_ == element_count() - 1); + for (int16_t i = 0; i < kNumRegisters; i++) { + if ((regs & (1 << i)) != 0) { + stack_pointer_--; + elements_.RemoveLast(); + } + } + __ MultiPop(regs); } void VirtualFrame::EmitPush(Register reg) { - UNIMPLEMENTED_MIPS(); + ASSERT(stack_pointer_ == element_count() - 1); + elements_.Add(FrameElement::MemoryElement(NumberInfo::Unknown())); + stack_pointer_++; + __ Push(reg); } + void VirtualFrame::EmitMultiPush(RegList regs) { - UNIMPLEMENTED_MIPS(); + ASSERT(stack_pointer_ == element_count() - 1); + for (int16_t i = kNumRegisters; i > 0; i--) { + if ((regs & (1 << i)) != 0) { + elements_.Add(FrameElement::MemoryElement(NumberInfo::Unknown())); + stack_pointer_++; + } + } + __ MultiPush(regs); } + void VirtualFrame::EmitArgumentSlots(RegList reglist) { UNIMPLEMENTED_MIPS(); } diff --git a/deps/v8/src/mips/virtual-frame-mips.h b/deps/v8/src/mips/virtual-frame-mips.h index 77c795c9f9..b32e2aeedc 100644 --- a/deps/v8/src/mips/virtual-frame-mips.h +++ b/deps/v8/src/mips/virtual-frame-mips.h @@ -39,18 +39,18 @@ namespace internal { // ------------------------------------------------------------------------- // Virtual frames // -// The virtual frame is an abstraction of the physical stack frame. It +// The virtual frame is an abstraction of the physical stack frame. It // encapsulates the parameters, frame-allocated locals, and the expression -// stack. It supports push/pop operations on the expression stack, as well +// stack. It supports push/pop operations on the expression stack, as well // as random access to the expression stack elements, locals, and // parameters. class VirtualFrame : public ZoneObject { public: // A utility class to introduce a scope where the virtual frame is - // expected to remain spilled. The constructor spills the code + // expected to remain spilled. The constructor spills the code // generator's current frame, but no attempt is made to require it - // to stay spilled. It is intended as documentation while the code + // to stay spilled. It is intended as documentation while the code // generator is being transformed. class SpilledScope BASE_EMBEDDED { public: @@ -105,12 +105,12 @@ class VirtualFrame : public ZoneObject { } // Add extra in-memory elements to the top of the frame to match an actual - // frame (eg, the frame after an exception handler is pushed). No code is + // frame (eg, the frame after an exception handler is pushed). No code is // emitted. void Adjust(int count); // Forget elements from the top of the frame to match an actual frame (eg, - // the frame after a runtime call). No code is emitted. + // the frame after a runtime call). No code is emitted. void Forget(int count) { ASSERT(count >= 0); ASSERT(stack_pointer_ == element_count() - 1); @@ -121,7 +121,7 @@ class VirtualFrame : public ZoneObject { } // Forget count elements from the top of the frame and adjust the stack - // pointer downward. This is used, for example, before merging frames at + // pointer downward. This is used, for example, before merging frames at // break, continue, and return targets. void ForgetElements(int count); @@ -133,24 +133,24 @@ class VirtualFrame : public ZoneObject { if (is_used(reg)) SpillElementAt(register_location(reg)); } - // Spill all occurrences of an arbitrary register if possible. Return the + // Spill all occurrences of an arbitrary register if possible. Return the // register spilled or no_reg if it was not possible to free any register // (ie, they all have frame-external references). Register SpillAnyRegister(); // Prepare this virtual frame for merging to an expected frame by // performing some state changes that do not require generating - // code. It is guaranteed that no code will be generated. + // code. It is guaranteed that no code will be generated. void PrepareMergeTo(VirtualFrame* expected); // Make this virtual frame have a state identical to an expected virtual - // frame. As a side effect, code may be emitted to make this frame match + // frame. As a side effect, code may be emitted to make this frame match // the expected one. void MergeTo(VirtualFrame* expected); - // Detach a frame from its code generator, perhaps temporarily. This + // Detach a frame from its code generator, perhaps temporarily. This // tells the register allocator that it is free to use frame-internal - // registers. Used when the code generator's frame is switched from this + // registers. Used when the code generator's frame is switched from this // one to NULL by an unconditional jump. void DetachFromCodeGenerator() { RegisterAllocator* cgen_allocator = cgen()->allocator(); @@ -159,7 +159,7 @@ class VirtualFrame : public ZoneObject { } } - // (Re)attach a frame to its code generator. This informs the register + // (Re)attach a frame to its code generator. This informs the register // allocator that the frame-internal register references are active again. // Used when a code generator's frame is switched from NULL to this one by // binding a label. @@ -170,17 +170,17 @@ class VirtualFrame : public ZoneObject { } } - // Emit code for the physical JS entry and exit frame sequences. After + // Emit code for the physical JS entry and exit frame sequences. After // calling Enter, the virtual frame is ready for use; and after calling - // Exit it should not be used. Note that Enter does not allocate space in + // Exit it should not be used. Note that Enter does not allocate space in // the physical frame for storing frame-allocated locals. void Enter(); void Exit(); // Prepare for returning from the frame by spilling locals and - // dropping all non-locals elements in the virtual frame. This + // dropping all non-locals elements in the virtual frame. This // avoids generating unnecessary merge code when jumping to the - // shared return site. Emits code for spills. + // shared return site. Emits code for spills. void PrepareForReturn(); // Allocate and initialize the frame-allocated locals. @@ -194,11 +194,11 @@ class VirtualFrame : public ZoneObject { return MemOperand(sp, index * kPointerSize); } - // Random-access store to a frame-top relative frame element. The result + // Random-access store to a frame-top relative frame element. The result // becomes owned by the frame and is invalidated. void SetElementAt(int index, Result* value); - // Set a frame element to a constant. The index is frame-top relative. + // Set a frame element to a constant. The index is frame-top relative. void SetElementAt(int index, Handle<Object> value) { Result temp(value); SetElementAt(index, &temp); @@ -221,13 +221,13 @@ class VirtualFrame : public ZoneObject { } // Push the value of a local frame slot on top of the frame and invalidate - // the local slot. The slot should be written to before trying to read + // the local slot. The slot should be written to before trying to read // from it again. void TakeLocalAt(int index) { TakeFrameSlotAt(local0_index() + index); } - // Store the top value on the virtual frame into a local frame slot. The + // Store the top value on the virtual frame into a local frame slot. The // value is left in place on top of the frame. void StoreToLocalAt(int index) { StoreToFrameSlotAt(local0_index() + index); @@ -267,7 +267,7 @@ class VirtualFrame : public ZoneObject { } // Push the value of a paramter frame slot on top of the frame and - // invalidate the parameter slot. The slot should be written to before + // invalidate the parameter slot. The slot should be written to before // trying to read from it again. void TakeParameterAt(int index) { TakeFrameSlotAt(param0_index() + index); @@ -292,12 +292,8 @@ class VirtualFrame : public ZoneObject { RawCallStub(stub); } - // Call stub that expects its argument in r0. The argument is given - // as a result which must be the register r0. void CallStub(CodeStub* stub, Result* arg); - // Call stub that expects its arguments in r1 and r0. The arguments - // are given as results which must be the appropriate registers. void CallStub(CodeStub* stub, Result* arg0, Result* arg1); // Call runtime given the number of arguments expected on (and @@ -317,7 +313,7 @@ class VirtualFrame : public ZoneObject { int arg_count); // Call into an IC stub given the number of arguments it removes - // from the stack. Register arguments are passed as results and + // from the stack. Register arguments are passed as results and // consumed by the call. void CallCodeObject(Handle<Code> ic, RelocInfo::Mode rmode, @@ -333,8 +329,8 @@ class VirtualFrame : public ZoneObject { int dropped_args, bool set_auto_args_slots = false); - // Drop a number of elements from the top of the expression stack. May - // emit code to affect the physical frame. Does not clobber any registers + // Drop a number of elements from the top of the expression stack. May + // emit code to affect the physical frame. Does not clobber any registers // excepting possibly the stack pointer. void Drop(int count); // Similar to VirtualFrame::Drop but we don't modify the actual stack. @@ -348,7 +344,7 @@ class VirtualFrame : public ZoneObject { // Duplicate the top element of the frame. void Dup() { PushFrameSlotAt(element_count() - 1); } - // Pop an element from the top of the expression stack. Returns a + // Pop an element from the top of the expression stack. Returns a // Result, which may be a constant or a register. Result Pop(); @@ -356,15 +352,15 @@ class VirtualFrame : public ZoneObject { // emit a corresponding pop instruction. void EmitPop(Register reg); // Same but for multiple registers - void EmitMultiPop(RegList regs); // higher indexed registers popped first - void EmitMultiPopReversed(RegList regs); // lower first + void EmitMultiPop(RegList regs); + void EmitMultiPopReversed(RegList regs); // Push an element on top of the expression stack and emit a // corresponding push instruction. void EmitPush(Register reg); // Same but for multiple registers. - void EmitMultiPush(RegList regs); // lower indexed registers are pushed first - void EmitMultiPushReversed(RegList regs); // higher first + void EmitMultiPush(RegList regs); + void EmitMultiPushReversed(RegList regs); // Push an element on the virtual frame. inline void Push(Register reg, NumberInfo info = NumberInfo::Unknown()); @@ -384,7 +380,7 @@ class VirtualFrame : public ZoneObject { // Nip removes zero or more elements from immediately below the top // of the frame, leaving the previous top-of-frame value on top of - // the frame. Nip(k) is equivalent to x = Pop(), Drop(k), Push(x). + // the frame. Nip(k) is equivalent to x = Pop(), Drop(k), Push(x). inline void Nip(int num_dropped); // This pushes 4 arguments slots on the stack and saves asked 'a' registers @@ -392,6 +388,7 @@ class VirtualFrame : public ZoneObject { void EmitArgumentSlots(RegList reglist); inline void SetTypeForLocalAt(int index, NumberInfo info); + inline void SetTypeForParamAt(int index, NumberInfo info); private: static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset; @@ -416,23 +413,23 @@ class VirtualFrame : public ZoneObject { int local_count() { return cgen()->scope()->num_stack_slots(); } // The index of the element that is at the processor's frame pointer - // (the fp register). The parameters, receiver, function, and context + // (the fp register). The parameters, receiver, function, and context // are below the frame pointer. int frame_pointer() { return parameter_count() + 3; } - // The index of the first parameter. The receiver lies below the first + // The index of the first parameter. The receiver lies below the first // parameter. int param0_index() { return 1; } - // The index of the context slot in the frame. It is immediately + // The index of the context slot in the frame. It is immediately // below the frame pointer. int context_index() { return frame_pointer() - 1; } - // The index of the function slot in the frame. It is below the frame + // The index of the function slot in the frame. It is below the frame // pointer and context slot. int function_index() { return frame_pointer() - 2; } - // The index of the first local. Between the frame pointer and the + // The index of the first local. Between the frame pointer and the // locals lies the return address. int local0_index() { return frame_pointer() + 2; } @@ -447,7 +444,7 @@ class VirtualFrame : public ZoneObject { return (frame_pointer() - index) * kPointerSize; } - // Record an occurrence of a register in the virtual frame. This has the + // Record an occurrence of a register in the virtual frame. This has the // effect of incrementing the register's external reference count and // of updating the index of the register's location in the frame. void Use(Register reg, int index) { @@ -456,7 +453,7 @@ class VirtualFrame : public ZoneObject { cgen()->allocator()->Use(reg); } - // Record that a register reference has been dropped from the frame. This + // Record that a register reference has been dropped from the frame. This // decrements the register's external reference count and invalidates the // index of the register's location in the frame. void Unuse(Register reg) { @@ -470,7 +467,7 @@ class VirtualFrame : public ZoneObject { // constant. void SpillElementAt(int index); - // Sync the element at a particular index. If it is a register or + // Sync the element at a particular index. If it is a register or // constant that disagrees with the value on the stack, write it to memory. // Keep the element type as register or constant, and clear the dirty bit. void SyncElementAt(int index); @@ -497,7 +494,7 @@ class VirtualFrame : public ZoneObject { void StoreToFrameSlotAt(int index); // Spill all elements in registers. Spill the top spilled_args elements - // on the frame. Sync all other frame elements. + // on the frame. Sync all other frame elements. // Then drop dropped_args elements from the virtual frame, to match // the effect of an upcoming call that will drop them from the stack. void PrepareForCall(int spilled_args, int dropped_args); @@ -518,14 +515,14 @@ class VirtualFrame : public ZoneObject { // Make the memory-to-register and constant-to-register moves // needed to make this frame equal the expected frame. // Called after all register-to-memory and register-to-register - // moves have been made. After this function returns, the frames + // moves have been made. After this function returns, the frames // should be equal. void MergeMoveMemoryToRegisters(VirtualFrame* expected); // Invalidates a frame slot (puts an invalid frame element in it). // Copies on the frame are correctly handled, and if this slot was // the backing store of copies, the index of the new backing store - // is returned. Otherwise, returns kIllegalIndex. + // is returned. Otherwise, returns kIllegalIndex. // Register counts are correctly updated. int InvalidateFrameSlotAt(int index); diff --git a/deps/v8/src/mirror-debugger.js b/deps/v8/src/mirror-debugger.js new file mode 100644 index 0000000000..dfe297b1b7 --- /dev/null +++ b/deps/v8/src/mirror-debugger.js @@ -0,0 +1,2362 @@ +// Copyright 2006-2008 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Handle id counters. +var next_handle_ = 0; +var next_transient_handle_ = -1; + +// Mirror cache. +var mirror_cache_ = []; + + +/** + * Clear the mirror handle cache. + */ +function ClearMirrorCache() { + next_handle_ = 0; + mirror_cache_ = []; +} + + +/** + * Returns the mirror for a specified value or object. + * + * @param {value or Object} value the value or object to retreive the mirror for + * @param {boolean} transient indicate whether this object is transient and + * should not be added to the mirror cache. The default is not transient. + * @returns {Mirror} the mirror reflects the passed value or object + */ +function MakeMirror(value, opt_transient) { + var mirror; + + // Look for non transient mirrors in the mirror cache. + if (!opt_transient) { + for (id in mirror_cache_) { + mirror = mirror_cache_[id]; + if (mirror.value() === value) { + return mirror; + } + // Special check for NaN as NaN == NaN is false. + if (mirror.isNumber() && isNaN(mirror.value()) && + typeof value == 'number' && isNaN(value)) { + return mirror; + } + } + } + + if (IS_UNDEFINED(value)) { + mirror = new UndefinedMirror(); + } else if (IS_NULL(value)) { + mirror = new NullMirror(); + } else if (IS_BOOLEAN(value)) { + mirror = new BooleanMirror(value); + } else if (IS_NUMBER(value)) { + mirror = new NumberMirror(value); + } else if (IS_STRING(value)) { + mirror = new StringMirror(value); + } else if (IS_ARRAY(value)) { + mirror = new ArrayMirror(value); + } else if (IS_DATE(value)) { + mirror = new DateMirror(value); + } else if (IS_FUNCTION(value)) { + mirror = new FunctionMirror(value); + } else if (IS_REGEXP(value)) { + mirror = new RegExpMirror(value); + } else if (IS_ERROR(value)) { + mirror = new ErrorMirror(value); + } else if (IS_SCRIPT(value)) { + mirror = new ScriptMirror(value); + } else { + mirror = new ObjectMirror(value, OBJECT_TYPE, opt_transient); + } + + mirror_cache_[mirror.handle()] = mirror; + return mirror; +} + + +/** + * Returns the mirror for a specified mirror handle. + * + * @param {number} handle the handle to find the mirror for + * @returns {Mirror or undefiend} the mirror with the requested handle or + * undefined if no mirror with the requested handle was found + */ +function LookupMirror(handle) { + return mirror_cache_[handle]; +} + + +/** + * Returns the mirror for the undefined value. + * + * @returns {Mirror} the mirror reflects the undefined value + */ +function GetUndefinedMirror() { + return MakeMirror(void 0); +} + + +/** + * Inherit the prototype methods from one constructor into another. + * + * The Function.prototype.inherits from lang.js rewritten as a standalone + * function (not on Function.prototype). NOTE: If this file is to be loaded + * during bootstrapping this function needs to be revritten using some native + * functions as prototype setup using normal JavaScript does not work as + * expected during bootstrapping (see mirror.js in r114903). + * + * @param {function} ctor Constructor function which needs to inherit the + * prototype + * @param {function} superCtor Constructor function to inherit prototype from + */ +function inherits(ctor, superCtor) { + var tempCtor = function(){}; + tempCtor.prototype = superCtor.prototype; + ctor.super_ = superCtor.prototype; + ctor.prototype = new tempCtor(); + ctor.prototype.constructor = ctor; +} + + +// Type names of the different mirrors. +const UNDEFINED_TYPE = 'undefined'; +const NULL_TYPE = 'null'; +const BOOLEAN_TYPE = 'boolean'; +const NUMBER_TYPE = 'number'; +const STRING_TYPE = 'string'; +const OBJECT_TYPE = 'object'; +const FUNCTION_TYPE = 'function'; +const REGEXP_TYPE = 'regexp'; +const ERROR_TYPE = 'error'; +const PROPERTY_TYPE = 'property'; +const FRAME_TYPE = 'frame'; +const SCRIPT_TYPE = 'script'; +const CONTEXT_TYPE = 'context'; +const SCOPE_TYPE = 'scope'; + +// Maximum length when sending strings through the JSON protocol. +const kMaxProtocolStringLength = 80; + +// Different kind of properties. +PropertyKind = {}; +PropertyKind.Named = 1; +PropertyKind.Indexed = 2; + + +// A copy of the PropertyType enum from global.h +PropertyType = {}; +PropertyType.Normal = 0; +PropertyType.Field = 1; +PropertyType.ConstantFunction = 2; +PropertyType.Callbacks = 3; +PropertyType.Interceptor = 4; +PropertyType.MapTransition = 5; +PropertyType.ConstantTransition = 6; +PropertyType.NullDescriptor = 7; + + +// Different attributes for a property. +PropertyAttribute = {}; +PropertyAttribute.None = NONE; +PropertyAttribute.ReadOnly = READ_ONLY; +PropertyAttribute.DontEnum = DONT_ENUM; +PropertyAttribute.DontDelete = DONT_DELETE; + + +// A copy of the scope types from runtime.cc. +ScopeType = { Global: 0, + Local: 1, + With: 2, + Closure: 3, + Catch: 4 }; + + +// Mirror hierarchy: +// - Mirror +// - ValueMirror +// - UndefinedMirror +// - NullMirror +// - NumberMirror +// - StringMirror +// - ObjectMirror +// - FunctionMirror +// - UnresolvedFunctionMirror +// - ArrayMirror +// - DateMirror +// - RegExpMirror +// - ErrorMirror +// - PropertyMirror +// - FrameMirror +// - ScriptMirror + + +/** + * Base class for all mirror objects. + * @param {string} type The type of the mirror + * @constructor + */ +function Mirror(type) { + this.type_ = type; +}; + + +Mirror.prototype.type = function() { + return this.type_; +}; + + +/** + * Check whether the mirror reflects a value. + * @returns {boolean} True if the mirror reflects a value. + */ +Mirror.prototype.isValue = function() { + return this instanceof ValueMirror; +} + + +/** + * Check whether the mirror reflects the undefined value. + * @returns {boolean} True if the mirror reflects the undefined value. + */ +Mirror.prototype.isUndefined = function() { + return this instanceof UndefinedMirror; +} + + +/** + * Check whether the mirror reflects the null value. + * @returns {boolean} True if the mirror reflects the null value + */ +Mirror.prototype.isNull = function() { + return this instanceof NullMirror; +} + + +/** + * Check whether the mirror reflects a boolean value. + * @returns {boolean} True if the mirror reflects a boolean value + */ +Mirror.prototype.isBoolean = function() { + return this instanceof BooleanMirror; +} + + +/** + * Check whether the mirror reflects a number value. + * @returns {boolean} True if the mirror reflects a number value + */ +Mirror.prototype.isNumber = function() { + return this instanceof NumberMirror; +} + + +/** + * Check whether the mirror reflects a string value. + * @returns {boolean} True if the mirror reflects a string value + */ +Mirror.prototype.isString = function() { + return this instanceof StringMirror; +} + + +/** + * Check whether the mirror reflects an object. + * @returns {boolean} True if the mirror reflects an object + */ +Mirror.prototype.isObject = function() { + return this instanceof ObjectMirror; +} + + +/** + * Check whether the mirror reflects a function. + * @returns {boolean} True if the mirror reflects a function + */ +Mirror.prototype.isFunction = function() { + return this instanceof FunctionMirror; +} + + +/** + * Check whether the mirror reflects an unresolved function. + * @returns {boolean} True if the mirror reflects an unresolved function + */ +Mirror.prototype.isUnresolvedFunction = function() { + return this instanceof UnresolvedFunctionMirror; +} + + +/** + * Check whether the mirror reflects an array. + * @returns {boolean} True if the mirror reflects an array + */ +Mirror.prototype.isArray = function() { + return this instanceof ArrayMirror; +} + + +/** + * Check whether the mirror reflects a date. + * @returns {boolean} True if the mirror reflects a date + */ +Mirror.prototype.isDate = function() { + return this instanceof DateMirror; +} + + +/** + * Check whether the mirror reflects a regular expression. + * @returns {boolean} True if the mirror reflects a regular expression + */ +Mirror.prototype.isRegExp = function() { + return this instanceof RegExpMirror; +} + + +/** + * Check whether the mirror reflects an error. + * @returns {boolean} True if the mirror reflects an error + */ +Mirror.prototype.isError = function() { + return this instanceof ErrorMirror; +} + + +/** + * Check whether the mirror reflects a property. + * @returns {boolean} True if the mirror reflects a property + */ +Mirror.prototype.isProperty = function() { + return this instanceof PropertyMirror; +} + + +/** + * Check whether the mirror reflects a stack frame. + * @returns {boolean} True if the mirror reflects a stack frame + */ +Mirror.prototype.isFrame = function() { + return this instanceof FrameMirror; +} + + +/** + * Check whether the mirror reflects a script. + * @returns {boolean} True if the mirror reflects a script + */ +Mirror.prototype.isScript = function() { + return this instanceof ScriptMirror; +} + + +/** + * Check whether the mirror reflects a context. + * @returns {boolean} True if the mirror reflects a context + */ +Mirror.prototype.isContext = function() { + return this instanceof ContextMirror; +} + + +/** + * Check whether the mirror reflects a scope. + * @returns {boolean} True if the mirror reflects a scope + */ +Mirror.prototype.isScope = function() { + return this instanceof ScopeMirror; +} + + +/** + * Allocate a handle id for this object. + */ +Mirror.prototype.allocateHandle_ = function() { + this.handle_ = next_handle_++; +} + + +/** + * Allocate a transient handle id for this object. Transient handles are + * negative. + */ +Mirror.prototype.allocateTransientHandle_ = function() { + this.handle_ = next_transient_handle_--; +} + + +Mirror.prototype.toText = function() { + // Simpel to text which is used when on specialization in subclass. + return "#<" + builtins.GetInstanceName(this.constructor.name) + ">"; +} + + +/** + * Base class for all value mirror objects. + * @param {string} type The type of the mirror + * @param {value} value The value reflected by this mirror + * @param {boolean} transient indicate whether this object is transient with a + * transient handle + * @constructor + * @extends Mirror + */ +function ValueMirror(type, value, transient) { + Mirror.call(this, type); + this.value_ = value; + if (!transient) { + this.allocateHandle_(); + } else { + this.allocateTransientHandle_(); + } +} +inherits(ValueMirror, Mirror); + + +Mirror.prototype.handle = function() { + return this.handle_; +}; + + +/** + * Check whether this is a primitive value. + * @return {boolean} True if the mirror reflects a primitive value + */ +ValueMirror.prototype.isPrimitive = function() { + var type = this.type(); + return type === 'undefined' || + type === 'null' || + type === 'boolean' || + type === 'number' || + type === 'string'; +}; + + +/** + * Get the actual value reflected by this mirror. + * @return {value} The value reflected by this mirror + */ +ValueMirror.prototype.value = function() { + return this.value_; +}; + + +/** + * Mirror object for Undefined. + * @constructor + * @extends ValueMirror + */ +function UndefinedMirror() { + ValueMirror.call(this, UNDEFINED_TYPE, void 0); +} +inherits(UndefinedMirror, ValueMirror); + + +UndefinedMirror.prototype.toText = function() { + return 'undefined'; +} + + +/** + * Mirror object for null. + * @constructor + * @extends ValueMirror + */ +function NullMirror() { + ValueMirror.call(this, NULL_TYPE, null); +} +inherits(NullMirror, ValueMirror); + + +NullMirror.prototype.toText = function() { + return 'null'; +} + + +/** + * Mirror object for boolean values. + * @param {boolean} value The boolean value reflected by this mirror + * @constructor + * @extends ValueMirror + */ +function BooleanMirror(value) { + ValueMirror.call(this, BOOLEAN_TYPE, value); +} +inherits(BooleanMirror, ValueMirror); + + +BooleanMirror.prototype.toText = function() { + return this.value_ ? 'true' : 'false'; +} + + +/** + * Mirror object for number values. + * @param {number} value The number value reflected by this mirror + * @constructor + * @extends ValueMirror + */ +function NumberMirror(value) { + ValueMirror.call(this, NUMBER_TYPE, value); +} +inherits(NumberMirror, ValueMirror); + + +NumberMirror.prototype.toText = function() { + return %NumberToString(this.value_); +} + + +/** + * Mirror object for string values. + * @param {string} value The string value reflected by this mirror + * @constructor + * @extends ValueMirror + */ +function StringMirror(value) { + ValueMirror.call(this, STRING_TYPE, value); +} +inherits(StringMirror, ValueMirror); + + +StringMirror.prototype.length = function() { + return this.value_.length; +}; + +StringMirror.prototype.getTruncatedValue = function(maxLength) { + if (maxLength != -1 && this.length() > maxLength) { + return this.value_.substring(0, maxLength) + + '... (length: ' + this.length() + ')'; + } + return this.value_; +} + +StringMirror.prototype.toText = function() { + return this.getTruncatedValue(kMaxProtocolStringLength); +} + + +/** + * Mirror object for objects. + * @param {object} value The object reflected by this mirror + * @param {boolean} transient indicate whether this object is transient with a + * transient handle + * @constructor + * @extends ValueMirror + */ +function ObjectMirror(value, type, transient) { + ValueMirror.call(this, type || OBJECT_TYPE, value, transient); +} +inherits(ObjectMirror, ValueMirror); + + +ObjectMirror.prototype.className = function() { + return %_ClassOf(this.value_); +}; + + +ObjectMirror.prototype.constructorFunction = function() { + return MakeMirror(%DebugGetProperty(this.value_, 'constructor')); +}; + + +ObjectMirror.prototype.prototypeObject = function() { + return MakeMirror(%DebugGetProperty(this.value_, 'prototype')); +}; + + +ObjectMirror.prototype.protoObject = function() { + return MakeMirror(%DebugGetPrototype(this.value_)); +}; + + +ObjectMirror.prototype.hasNamedInterceptor = function() { + // Get information on interceptors for this object. + var x = %GetInterceptorInfo(this.value_); + return (x & 2) != 0; +}; + + +ObjectMirror.prototype.hasIndexedInterceptor = function() { + // Get information on interceptors for this object. + var x = %GetInterceptorInfo(this.value_); + return (x & 1) != 0; +}; + + +/** + * Return the property names for this object. + * @param {number} kind Indicate whether named, indexed or both kinds of + * properties are requested + * @param {number} limit Limit the number of names returend to the specified + value + * @return {Array} Property names for this object + */ +ObjectMirror.prototype.propertyNames = function(kind, limit) { + // Find kind and limit and allocate array for the result + kind = kind || PropertyKind.Named | PropertyKind.Indexed; + + var propertyNames; + var elementNames; + var total = 0; + + // Find all the named properties. + if (kind & PropertyKind.Named) { + // Get the local property names. + propertyNames = %GetLocalPropertyNames(this.value_); + total += propertyNames.length; + + // Get names for named interceptor properties if any. + if (this.hasNamedInterceptor() && (kind & PropertyKind.Named)) { + var namedInterceptorNames = + %GetNamedInterceptorPropertyNames(this.value_); + if (namedInterceptorNames) { + propertyNames = propertyNames.concat(namedInterceptorNames); + total += namedInterceptorNames.length; + } + } + } + + // Find all the indexed properties. + if (kind & PropertyKind.Indexed) { + // Get the local element names. + elementNames = %GetLocalElementNames(this.value_); + total += elementNames.length; + + // Get names for indexed interceptor properties. + if (this.hasIndexedInterceptor() && (kind & PropertyKind.Indexed)) { + var indexedInterceptorNames = + %GetIndexedInterceptorElementNames(this.value_); + if (indexedInterceptorNames) { + elementNames = elementNames.concat(indexedInterceptorNames); + total += indexedInterceptorNames.length; + } + } + } + limit = Math.min(limit || total, total); + + var names = new Array(limit); + var index = 0; + + // Copy names for named properties. + if (kind & PropertyKind.Named) { + for (var i = 0; index < limit && i < propertyNames.length; i++) { + names[index++] = propertyNames[i]; + } + } + + // Copy names for indexed properties. + if (kind & PropertyKind.Indexed) { + for (var i = 0; index < limit && i < elementNames.length; i++) { + names[index++] = elementNames[i]; + } + } + + return names; +}; + + +/** + * Return the properties for this object as an array of PropertyMirror objects. + * @param {number} kind Indicate whether named, indexed or both kinds of + * properties are requested + * @param {number} limit Limit the number of properties returend to the + specified value + * @return {Array} Property mirrors for this object + */ +ObjectMirror.prototype.properties = function(kind, limit) { + var names = this.propertyNames(kind, limit); + var properties = new Array(names.length); + for (var i = 0; i < names.length; i++) { + properties[i] = this.property(names[i]); + } + + return properties; +}; + + +ObjectMirror.prototype.property = function(name) { + var details = %DebugGetPropertyDetails(this.value_, %ToString(name)); + if (details) { + return new PropertyMirror(this, name, details); + } + + // Nothing found. + return GetUndefinedMirror(); +}; + + + +/** + * Try to find a property from its value. + * @param {Mirror} value The property value to look for + * @return {PropertyMirror} The property with the specified value. If no + * property was found with the specified value UndefinedMirror is returned + */ +ObjectMirror.prototype.lookupProperty = function(value) { + var properties = this.properties(); + + // Look for property value in properties. + for (var i = 0; i < properties.length; i++) { + + // Skip properties which are defined through assessors. + var property = properties[i]; + if (property.propertyType() != PropertyType.Callbacks) { + if (%_ObjectEquals(property.value_, value.value_)) { + return property; + } + } + } + + // Nothing found. + return GetUndefinedMirror(); +}; + + +/** + * Returns objects which has direct references to this object + * @param {number} opt_max_objects Optional parameter specifying the maximum + * number of referencing objects to return. + * @return {Array} The objects which has direct references to this object. + */ +ObjectMirror.prototype.referencedBy = function(opt_max_objects) { + // Find all objects with direct references to this object. + var result = %DebugReferencedBy(this.value_, + Mirror.prototype, opt_max_objects || 0); + + // Make mirrors for all the references found. + for (var i = 0; i < result.length; i++) { + result[i] = MakeMirror(result[i]); + } + + return result; +}; + + +ObjectMirror.prototype.toText = function() { + var name; + var ctor = this.constructorFunction(); + if (!ctor.isFunction()) { + name = this.className(); + } else { + name = ctor.name(); + if (!name) { + name = this.className(); + } + } + return '#<' + builtins.GetInstanceName(name) + '>'; +}; + + +/** + * Mirror object for functions. + * @param {function} value The function object reflected by this mirror. + * @constructor + * @extends ObjectMirror + */ +function FunctionMirror(value) { + ObjectMirror.call(this, value, FUNCTION_TYPE); + this.resolved_ = true; +} +inherits(FunctionMirror, ObjectMirror); + + +/** + * Returns whether the function is resolved. + * @return {boolean} True if the function is resolved. Unresolved functions can + * only originate as functions from stack frames + */ +FunctionMirror.prototype.resolved = function() { + return this.resolved_; +}; + + +/** + * Returns the name of the function. + * @return {string} Name of the function + */ +FunctionMirror.prototype.name = function() { + return %FunctionGetName(this.value_); +}; + + +/** + * Returns the inferred name of the function. + * @return {string} Name of the function + */ +FunctionMirror.prototype.inferredName = function() { + return %FunctionGetInferredName(this.value_); +}; + + +/** + * Returns the source code for the function. + * @return {string or undefined} The source code for the function. If the + * function is not resolved undefined will be returned. + */ +FunctionMirror.prototype.source = function() { + // Return source if function is resolved. Otherwise just fall through to + // return undefined. + if (this.resolved()) { + return builtins.FunctionSourceString(this.value_); + } +}; + + +/** + * Returns the script object for the function. + * @return {ScriptMirror or undefined} Script object for the function or + * undefined if the function has no script + */ +FunctionMirror.prototype.script = function() { + // Return script if function is resolved. Otherwise just fall through + // to return undefined. + if (this.resolved()) { + var script = %FunctionGetScript(this.value_); + if (script) { + return MakeMirror(script); + } + } +}; + + +/** + * Returns the script source position for the function. Only makes sense + * for functions which has a script defined. + * @return {Number or undefined} in-script position for the function + */ +FunctionMirror.prototype.sourcePosition_ = function() { + // Return script if function is resolved. Otherwise just fall through + // to return undefined. + if (this.resolved()) { + return %FunctionGetScriptSourcePosition(this.value_); + } +}; + + +/** + * Returns the script source location object for the function. Only makes sense + * for functions which has a script defined. + * @return {Location or undefined} in-script location for the function begin + */ +FunctionMirror.prototype.sourceLocation = function() { + if (this.resolved() && this.script()) { + return this.script().locationFromPosition(this.sourcePosition_(), + true); + } +}; + + +/** + * Returns objects constructed by this function. + * @param {number} opt_max_instances Optional parameter specifying the maximum + * number of instances to return. + * @return {Array or undefined} The objects constructed by this function. + */ +FunctionMirror.prototype.constructedBy = function(opt_max_instances) { + if (this.resolved()) { + // Find all objects constructed from this function. + var result = %DebugConstructedBy(this.value_, opt_max_instances || 0); + + // Make mirrors for all the instances found. + for (var i = 0; i < result.length; i++) { + result[i] = MakeMirror(result[i]); + } + + return result; + } else { + return []; + } +}; + + +FunctionMirror.prototype.toText = function() { + return this.source(); +} + + +/** + * Mirror object for unresolved functions. + * @param {string} value The name for the unresolved function reflected by this + * mirror. + * @constructor + * @extends ObjectMirror + */ +function UnresolvedFunctionMirror(value) { + // Construct this using the ValueMirror as an unresolved function is not a + // real object but just a string. + ValueMirror.call(this, FUNCTION_TYPE, value); + this.propertyCount_ = 0; + this.elementCount_ = 0; + this.resolved_ = false; +} +inherits(UnresolvedFunctionMirror, FunctionMirror); + + +UnresolvedFunctionMirror.prototype.className = function() { + return 'Function'; +}; + + +UnresolvedFunctionMirror.prototype.constructorFunction = function() { + return GetUndefinedMirror(); +}; + + +UnresolvedFunctionMirror.prototype.prototypeObject = function() { + return GetUndefinedMirror(); +}; + + +UnresolvedFunctionMirror.prototype.protoObject = function() { + return GetUndefinedMirror(); +}; + + +UnresolvedFunctionMirror.prototype.name = function() { + return this.value_; +}; + + +UnresolvedFunctionMirror.prototype.inferredName = function() { + return undefined; +}; + + +UnresolvedFunctionMirror.prototype.propertyNames = function(kind, limit) { + return []; +} + + +/** + * Mirror object for arrays. + * @param {Array} value The Array object reflected by this mirror + * @constructor + * @extends ObjectMirror + */ +function ArrayMirror(value) { + ObjectMirror.call(this, value); +} +inherits(ArrayMirror, ObjectMirror); + + +ArrayMirror.prototype.length = function() { + return this.value_.length; +}; + + +ArrayMirror.prototype.indexedPropertiesFromRange = function(opt_from_index, opt_to_index) { + var from_index = opt_from_index || 0; + var to_index = opt_to_index || this.length() - 1; + if (from_index > to_index) return new Array(); + var values = new Array(to_index - from_index + 1); + for (var i = from_index; i <= to_index; i++) { + var details = %DebugGetPropertyDetails(this.value_, %ToString(i)); + var value; + if (details) { + value = new PropertyMirror(this, i, details); + } else { + value = GetUndefinedMirror(); + } + values[i - from_index] = value; + } + return values; +} + + +/** + * Mirror object for dates. + * @param {Date} value The Date object reflected by this mirror + * @constructor + * @extends ObjectMirror + */ +function DateMirror(value) { + ObjectMirror.call(this, value); +} +inherits(DateMirror, ObjectMirror); + + +DateMirror.prototype.toText = function() { + var s = JSON.stringify(this.value_); + return s.substring(1, s.length - 1); // cut quotes +} + + +/** + * Mirror object for regular expressions. + * @param {RegExp} value The RegExp object reflected by this mirror + * @constructor + * @extends ObjectMirror + */ +function RegExpMirror(value) { + ObjectMirror.call(this, value, REGEXP_TYPE); +} +inherits(RegExpMirror, ObjectMirror); + + +/** + * Returns the source to the regular expression. + * @return {string or undefined} The source to the regular expression + */ +RegExpMirror.prototype.source = function() { + return this.value_.source; +}; + + +/** + * Returns whether this regular expression has the global (g) flag set. + * @return {boolean} Value of the global flag + */ +RegExpMirror.prototype.global = function() { + return this.value_.global; +}; + + +/** + * Returns whether this regular expression has the ignore case (i) flag set. + * @return {boolean} Value of the ignore case flag + */ +RegExpMirror.prototype.ignoreCase = function() { + return this.value_.ignoreCase; +}; + + +/** + * Returns whether this regular expression has the multiline (m) flag set. + * @return {boolean} Value of the multiline flag + */ +RegExpMirror.prototype.multiline = function() { + return this.value_.multiline; +}; + + +RegExpMirror.prototype.toText = function() { + // Simpel to text which is used when on specialization in subclass. + return "/" + this.source() + "/"; +} + + +/** + * Mirror object for error objects. + * @param {Error} value The error object reflected by this mirror + * @constructor + * @extends ObjectMirror + */ +function ErrorMirror(value) { + ObjectMirror.call(this, value, ERROR_TYPE); +} +inherits(ErrorMirror, ObjectMirror); + + +/** + * Returns the message for this eror object. + * @return {string or undefined} The message for this eror object + */ +ErrorMirror.prototype.message = function() { + return this.value_.message; +}; + + +ErrorMirror.prototype.toText = function() { + // Use the same text representation as in messages.js. + var text; + try { + str = builtins.ToDetailString(this.value_); + } catch (e) { + str = '#<an Error>'; + } + return str; +} + + +/** + * Base mirror object for properties. + * @param {ObjectMirror} mirror The mirror object having this property + * @param {string} name The name of the property + * @param {Array} details Details about the property + * @constructor + * @extends Mirror + */ +function PropertyMirror(mirror, name, details) { + Mirror.call(this, PROPERTY_TYPE); + this.mirror_ = mirror; + this.name_ = name; + this.value_ = details[0]; + this.details_ = details[1]; + if (details.length > 2) { + this.exception_ = details[2] + this.getter_ = details[3]; + this.setter_ = details[4]; + } +} +inherits(PropertyMirror, Mirror); + + +PropertyMirror.prototype.isReadOnly = function() { + return (this.attributes() & PropertyAttribute.ReadOnly) != 0; +} + + +PropertyMirror.prototype.isEnum = function() { + return (this.attributes() & PropertyAttribute.DontEnum) == 0; +} + + +PropertyMirror.prototype.canDelete = function() { + return (this.attributes() & PropertyAttribute.DontDelete) == 0; +} + + +PropertyMirror.prototype.name = function() { + return this.name_; +} + + +PropertyMirror.prototype.isIndexed = function() { + for (var i = 0; i < this.name_.length; i++) { + if (this.name_[i] < '0' || '9' < this.name_[i]) { + return false; + } + } + return true; +} + + +PropertyMirror.prototype.value = function() { + return MakeMirror(this.value_, false); +} + + +/** + * Returns whether this property value is an exception. + * @return {booolean} True if this property value is an exception + */ +PropertyMirror.prototype.isException = function() { + return this.exception_ ? true : false; +} + + +PropertyMirror.prototype.attributes = function() { + return %DebugPropertyAttributesFromDetails(this.details_); +} + + +PropertyMirror.prototype.propertyType = function() { + return %DebugPropertyTypeFromDetails(this.details_); +} + + +PropertyMirror.prototype.insertionIndex = function() { + return %DebugPropertyIndexFromDetails(this.details_); +} + + +/** + * Returns whether this property has a getter defined through __defineGetter__. + * @return {booolean} True if this property has a getter + */ +PropertyMirror.prototype.hasGetter = function() { + return this.getter_ ? true : false; +} + + +/** + * Returns whether this property has a setter defined through __defineSetter__. + * @return {booolean} True if this property has a setter + */ +PropertyMirror.prototype.hasSetter = function() { + return this.setter_ ? true : false; +} + + +/** + * Returns the getter for this property defined through __defineGetter__. + * @return {Mirror} FunctionMirror reflecting the getter function or + * UndefinedMirror if there is no getter for this property + */ +PropertyMirror.prototype.getter = function() { + if (this.hasGetter()) { + return MakeMirror(this.getter_); + } else { + return GetUndefinedMirror(); + } +} + + +/** + * Returns the setter for this property defined through __defineSetter__. + * @return {Mirror} FunctionMirror reflecting the setter function or + * UndefinedMirror if there is no setter for this property + */ +PropertyMirror.prototype.setter = function() { + if (this.hasSetter()) { + return MakeMirror(this.setter_); + } else { + return GetUndefinedMirror(); + } +} + + +/** + * Returns whether this property is natively implemented by the host or a set + * through JavaScript code. + * @return {boolean} True if the property is + * UndefinedMirror if there is no setter for this property + */ +PropertyMirror.prototype.isNative = function() { + return (this.propertyType() == PropertyType.Interceptor) || + ((this.propertyType() == PropertyType.Callbacks) && + !this.hasGetter() && !this.hasSetter()); +} + + +const kFrameDetailsFrameIdIndex = 0; +const kFrameDetailsReceiverIndex = 1; +const kFrameDetailsFunctionIndex = 2; +const kFrameDetailsArgumentCountIndex = 3; +const kFrameDetailsLocalCountIndex = 4; +const kFrameDetailsSourcePositionIndex = 5; +const kFrameDetailsConstructCallIndex = 6; +const kFrameDetailsDebuggerFrameIndex = 7; +const kFrameDetailsFirstDynamicIndex = 8; + +const kFrameDetailsNameIndex = 0; +const kFrameDetailsValueIndex = 1; +const kFrameDetailsNameValueSize = 2; + +/** + * Wrapper for the frame details information retreived from the VM. The frame + * details from the VM is an array with the following content. See runtime.cc + * Runtime_GetFrameDetails. + * 0: Id + * 1: Receiver + * 2: Function + * 3: Argument count + * 4: Local count + * 5: Source position + * 6: Construct call + * Arguments name, value + * Locals name, value + * @param {number} break_id Current break id + * @param {number} index Frame number + * @constructor + */ +function FrameDetails(break_id, index) { + this.break_id_ = break_id; + this.details_ = %GetFrameDetails(break_id, index); +} + + +FrameDetails.prototype.frameId = function() { + %CheckExecutionState(this.break_id_); + return this.details_[kFrameDetailsFrameIdIndex]; +} + + +FrameDetails.prototype.receiver = function() { + %CheckExecutionState(this.break_id_); + return this.details_[kFrameDetailsReceiverIndex]; +} + + +FrameDetails.prototype.func = function() { + %CheckExecutionState(this.break_id_); + return this.details_[kFrameDetailsFunctionIndex]; +} + + +FrameDetails.prototype.isConstructCall = function() { + %CheckExecutionState(this.break_id_); + return this.details_[kFrameDetailsConstructCallIndex]; +} + + +FrameDetails.prototype.isDebuggerFrame = function() { + %CheckExecutionState(this.break_id_); + return this.details_[kFrameDetailsDebuggerFrameIndex]; +} + + +FrameDetails.prototype.argumentCount = function() { + %CheckExecutionState(this.break_id_); + return this.details_[kFrameDetailsArgumentCountIndex]; +} + + +FrameDetails.prototype.argumentName = function(index) { + %CheckExecutionState(this.break_id_); + if (index >= 0 && index < this.argumentCount()) { + return this.details_[kFrameDetailsFirstDynamicIndex + + index * kFrameDetailsNameValueSize + + kFrameDetailsNameIndex] + } +} + + +FrameDetails.prototype.argumentValue = function(index) { + %CheckExecutionState(this.break_id_); + if (index >= 0 && index < this.argumentCount()) { + return this.details_[kFrameDetailsFirstDynamicIndex + + index * kFrameDetailsNameValueSize + + kFrameDetailsValueIndex] + } +} + + +FrameDetails.prototype.localCount = function() { + %CheckExecutionState(this.break_id_); + return this.details_[kFrameDetailsLocalCountIndex]; +} + + +FrameDetails.prototype.sourcePosition = function() { + %CheckExecutionState(this.break_id_); + return this.details_[kFrameDetailsSourcePositionIndex]; +} + + +FrameDetails.prototype.localName = function(index) { + %CheckExecutionState(this.break_id_); + if (index >= 0 && index < this.localCount()) { + var locals_offset = kFrameDetailsFirstDynamicIndex + this.argumentCount() * kFrameDetailsNameValueSize + return this.details_[locals_offset + + index * kFrameDetailsNameValueSize + + kFrameDetailsNameIndex] + } +} + + +FrameDetails.prototype.localValue = function(index) { + %CheckExecutionState(this.break_id_); + if (index >= 0 && index < this.localCount()) { + var locals_offset = kFrameDetailsFirstDynamicIndex + this.argumentCount() * kFrameDetailsNameValueSize + return this.details_[locals_offset + + index * kFrameDetailsNameValueSize + + kFrameDetailsValueIndex] + } +} + + +FrameDetails.prototype.scopeCount = function() { + return %GetScopeCount(this.break_id_, this.frameId()); +} + + +/** + * Mirror object for stack frames. + * @param {number} break_id The break id in the VM for which this frame is + valid + * @param {number} index The frame index (top frame is index 0) + * @constructor + * @extends Mirror + */ +function FrameMirror(break_id, index) { + Mirror.call(this, FRAME_TYPE); + this.break_id_ = break_id; + this.index_ = index; + this.details_ = new FrameDetails(break_id, index); +} +inherits(FrameMirror, Mirror); + + +FrameMirror.prototype.index = function() { + return this.index_; +}; + + +FrameMirror.prototype.func = function() { + // Get the function for this frame from the VM. + var f = this.details_.func(); + + // Create a function mirror. NOTE: MakeMirror cannot be used here as the + // value returned from the VM might be a string if the function for the + // frame is unresolved. + if (IS_FUNCTION(f)) { + return MakeMirror(f); + } else { + return new UnresolvedFunctionMirror(f); + } +}; + + +FrameMirror.prototype.receiver = function() { + return MakeMirror(this.details_.receiver()); +}; + + +FrameMirror.prototype.isConstructCall = function() { + return this.details_.isConstructCall(); +}; + + +FrameMirror.prototype.isDebuggerFrame = function() { + return this.details_.isDebuggerFrame(); +}; + + +FrameMirror.prototype.argumentCount = function() { + return this.details_.argumentCount(); +}; + + +FrameMirror.prototype.argumentName = function(index) { + return this.details_.argumentName(index); +}; + + +FrameMirror.prototype.argumentValue = function(index) { + return MakeMirror(this.details_.argumentValue(index)); +}; + + +FrameMirror.prototype.localCount = function() { + return this.details_.localCount(); +}; + + +FrameMirror.prototype.localName = function(index) { + return this.details_.localName(index); +}; + + +FrameMirror.prototype.localValue = function(index) { + return MakeMirror(this.details_.localValue(index)); +}; + + +FrameMirror.prototype.sourcePosition = function() { + return this.details_.sourcePosition(); +}; + + +FrameMirror.prototype.sourceLocation = function() { + if (this.func().resolved() && this.func().script()) { + return this.func().script().locationFromPosition(this.sourcePosition(), + true); + } +}; + + +FrameMirror.prototype.sourceLine = function() { + if (this.func().resolved()) { + var location = this.sourceLocation(); + if (location) { + return location.line; + } + } +}; + + +FrameMirror.prototype.sourceColumn = function() { + if (this.func().resolved()) { + var location = this.sourceLocation(); + if (location) { + return location.column; + } + } +}; + + +FrameMirror.prototype.sourceLineText = function() { + if (this.func().resolved()) { + var location = this.sourceLocation(); + if (location) { + return location.sourceText(); + } + } +}; + + +FrameMirror.prototype.scopeCount = function() { + return this.details_.scopeCount(); +}; + + +FrameMirror.prototype.scope = function(index) { + return new ScopeMirror(this, index); +}; + + +FrameMirror.prototype.evaluate = function(source, disable_break) { + var result = %DebugEvaluate(this.break_id_, this.details_.frameId(), + source, Boolean(disable_break)); + return MakeMirror(result); +}; + + +FrameMirror.prototype.invocationText = function() { + // Format frame invoaction (receiver, function and arguments). + var result = ''; + var func = this.func(); + var receiver = this.receiver(); + if (this.isConstructCall()) { + // For constructor frames display new followed by the function name. + result += 'new '; + result += func.name() ? func.name() : '[anonymous]'; + } else if (this.isDebuggerFrame()) { + result += '[debugger]'; + } else { + // If the receiver has a className which is 'global' don't display it. + var display_receiver = !receiver.className || receiver.className() != 'global'; + if (display_receiver) { + result += receiver.toText(); + } + // Try to find the function as a property in the receiver. Include the + // prototype chain in the lookup. + var property = GetUndefinedMirror(); + if (!receiver.isUndefined()) { + for (var r = receiver; !r.isNull() && property.isUndefined(); r = r.protoObject()) { + property = r.lookupProperty(func); + } + } + if (!property.isUndefined()) { + // The function invoked was found on the receiver. Use the property name + // for the backtrace. + if (!property.isIndexed()) { + if (display_receiver) { + result += '.'; + } + result += property.name(); + } else { + result += '['; + result += property.name(); + result += ']'; + } + // Also known as - if the name in the function doesn't match the name + // under which it was looked up. + if (func.name() && func.name() != property.name()) { + result += '(aka ' + func.name() + ')'; + } + } else { + // The function invoked was not found on the receiver. Use the function + // name if available for the backtrace. + if (display_receiver) { + result += '.'; + } + result += func.name() ? func.name() : '[anonymous]'; + } + } + + // Render arguments for normal frames. + if (!this.isDebuggerFrame()) { + result += '('; + for (var i = 0; i < this.argumentCount(); i++) { + if (i != 0) result += ', '; + if (this.argumentName(i)) { + result += this.argumentName(i); + result += '='; + } + result += this.argumentValue(i).toText(); + } + result += ')'; + } + + return result; +} + + +FrameMirror.prototype.sourceAndPositionText = function() { + // Format source and position. + var result = ''; + var func = this.func(); + if (func.resolved()) { + if (func.script()) { + if (func.script().name()) { + result += func.script().name(); + } else { + result += '[unnamed]'; + } + if (!this.isDebuggerFrame()) { + var location = this.sourceLocation(); + result += ' line '; + result += !IS_UNDEFINED(location) ? (location.line + 1) : '?'; + result += ' column '; + result += !IS_UNDEFINED(location) ? (location.column + 1) : '?'; + if (!IS_UNDEFINED(this.sourcePosition())) { + result += ' (position ' + (this.sourcePosition() + 1) + ')'; + } + } + } else { + result += '[no source]'; + } + } else { + result += '[unresolved]'; + } + + return result; +} + + +FrameMirror.prototype.localsText = function() { + // Format local variables. + var result = ''; + var locals_count = this.localCount() + if (locals_count > 0) { + for (var i = 0; i < locals_count; ++i) { + result += ' var '; + result += this.localName(i); + result += ' = '; + result += this.localValue(i).toText(); + if (i < locals_count - 1) result += '\n'; + } + } + + return result; +} + + +FrameMirror.prototype.toText = function(opt_locals) { + var result = ''; + result += '#' + (this.index() <= 9 ? '0' : '') + this.index(); + result += ' '; + result += this.invocationText(); + result += ' '; + result += this.sourceAndPositionText(); + if (opt_locals) { + result += '\n'; + result += this.localsText(); + } + return result; +} + + +const kScopeDetailsTypeIndex = 0; +const kScopeDetailsObjectIndex = 1; + +function ScopeDetails(frame, index) { + this.break_id_ = frame.break_id_; + this.details_ = %GetScopeDetails(frame.break_id_, + frame.details_.frameId(), + index); +} + + +ScopeDetails.prototype.type = function() { + %CheckExecutionState(this.break_id_); + return this.details_[kScopeDetailsTypeIndex]; +} + + +ScopeDetails.prototype.object = function() { + %CheckExecutionState(this.break_id_); + return this.details_[kScopeDetailsObjectIndex]; +} + + +/** + * Mirror object for scope. + * @param {FrameMirror} frame The frame this scope is a part of + * @param {number} index The scope index in the frame + * @constructor + * @extends Mirror + */ +function ScopeMirror(frame, index) { + Mirror.call(this, SCOPE_TYPE); + this.frame_index_ = frame.index_; + this.scope_index_ = index; + this.details_ = new ScopeDetails(frame, index); +} +inherits(ScopeMirror, Mirror); + + +ScopeMirror.prototype.frameIndex = function() { + return this.frame_index_; +}; + + +ScopeMirror.prototype.scopeIndex = function() { + return this.scope_index_; +}; + + +ScopeMirror.prototype.scopeType = function() { + return this.details_.type(); +}; + + +ScopeMirror.prototype.scopeObject = function() { + // For local and closure scopes create a transient mirror as these objects are + // created on the fly materializing the local or closure scopes and + // therefore will not preserve identity. + var transient = this.scopeType() == ScopeType.Local || + this.scopeType() == ScopeType.Closure; + return MakeMirror(this.details_.object(), transient); +}; + + +/** + * Mirror object for script source. + * @param {Script} script The script object + * @constructor + * @extends Mirror + */ +function ScriptMirror(script) { + Mirror.call(this, SCRIPT_TYPE); + this.script_ = script; + this.context_ = new ContextMirror(script.context_data); + this.allocateHandle_(); +} +inherits(ScriptMirror, Mirror); + + +ScriptMirror.prototype.value = function() { + return this.script_; +}; + + +ScriptMirror.prototype.name = function() { + // If we have name, we trust it more than sourceURL from comments + return this.script_.name || this.sourceUrlFromComment_(); +}; + + +ScriptMirror.prototype.id = function() { + return this.script_.id; +}; + + +ScriptMirror.prototype.source = function() { + return this.script_.source; +}; + + +ScriptMirror.prototype.lineOffset = function() { + return this.script_.line_offset; +}; + + +ScriptMirror.prototype.columnOffset = function() { + return this.script_.column_offset; +}; + + +ScriptMirror.prototype.data = function() { + return this.script_.data; +}; + + +ScriptMirror.prototype.scriptType = function() { + return this.script_.type; +}; + + +ScriptMirror.prototype.compilationType = function() { + return this.script_.compilation_type; +}; + + +ScriptMirror.prototype.lineCount = function() { + return this.script_.lineCount(); +}; + + +ScriptMirror.prototype.locationFromPosition = function( + position, include_resource_offset) { + return this.script_.locationFromPosition(position, include_resource_offset); +} + + +ScriptMirror.prototype.sourceSlice = function (opt_from_line, opt_to_line) { + return this.script_.sourceSlice(opt_from_line, opt_to_line); +} + + +ScriptMirror.prototype.context = function() { + return this.context_; +}; + + +ScriptMirror.prototype.evalFromScript = function() { + return MakeMirror(this.script_.eval_from_script); +}; + + +ScriptMirror.prototype.evalFromFunctionName = function() { + return MakeMirror(this.script_.eval_from_function_name); +}; + + +ScriptMirror.prototype.evalFromLocation = function() { + var eval_from_script = this.evalFromScript(); + if (!eval_from_script.isUndefined()) { + var position = this.script_.eval_from_script_position; + return eval_from_script.locationFromPosition(position, true); + } +}; + + +ScriptMirror.prototype.toText = function() { + var result = ''; + result += this.name(); + result += ' (lines: '; + if (this.lineOffset() > 0) { + result += this.lineOffset(); + result += '-'; + result += this.lineOffset() + this.lineCount() - 1; + } else { + result += this.lineCount(); + } + result += ')'; + return result; +} + + +/** + * Returns a suggested script URL from comments in script code (if found), + * undefined otherwise. Used primarily by debuggers for identifying eval()'ed + * scripts. See + * http://fbug.googlecode.com/svn/branches/firebug1.1/docs/ReleaseNotes_1.1.txt + * for details. + * + * @return {?string} value for //@ sourceURL comment + */ +ScriptMirror.prototype.sourceUrlFromComment_ = function() { + if (!('sourceUrl_' in this) && this.source()) { + // TODO(608): the spaces in a regexp below had to be escaped as \040 + // because this file is being processed by js2c whose handling of spaces + // in regexps is broken. + // We're not using \s here to prevent \n from matching. + var sourceUrlPattern = /\/\/@[\040\t]sourceURL=[\040\t]*(\S+)[\040\t]*$/m; + var match = sourceUrlPattern.exec(this.source()); + this.sourceUrl_ = match ? match[1] : undefined; + } + return this.sourceUrl_; +}; + + +/** + * Mirror object for context. + * @param {Object} data The context data + * @constructor + * @extends Mirror + */ +function ContextMirror(data) { + Mirror.call(this, CONTEXT_TYPE); + this.data_ = data; + this.allocateHandle_(); +} +inherits(ContextMirror, Mirror); + + +ContextMirror.prototype.data = function() { + return this.data_; +}; + + +/** + * Returns a mirror serializer + * + * @param {boolean} details Set to true to include details + * @param {Object} options Options comtrolling the serialization + * The following options can be set: + * includeSource: include ths full source of scripts + * @returns {MirrorSerializer} mirror serializer + */ +function MakeMirrorSerializer(details, options) { + return new JSONProtocolSerializer(details, options); +} + + +/** + * Object for serializing a mirror objects and its direct references. + * @param {boolean} details Indicates whether to include details for the mirror + * serialized + * @constructor + */ +function JSONProtocolSerializer(details, options) { + this.details_ = details; + this.options_ = options; + this.mirrors_ = [ ]; +} + + +/** + * Returns a serialization of an object reference. The referenced object are + * added to the serialization state. + * + * @param {Mirror} mirror The mirror to serialize + * @returns {String} JSON serialization + */ +JSONProtocolSerializer.prototype.serializeReference = function(mirror) { + return this.serialize_(mirror, true, true); +} + + +/** + * Returns a serialization of an object value. The referenced objects are + * added to the serialization state. + * + * @param {Mirror} mirror The mirror to serialize + * @returns {String} JSON serialization + */ +JSONProtocolSerializer.prototype.serializeValue = function(mirror) { + var json = this.serialize_(mirror, false, true); + return json; +} + + +/** + * Returns a serialization of all the objects referenced. + * + * @param {Mirror} mirror The mirror to serialize. + * @returns {Array.<Object>} Array of the referenced objects converted to + * protcol objects. + */ +JSONProtocolSerializer.prototype.serializeReferencedObjects = function() { + // Collect the protocol representation of the referenced objects in an array. + var content = []; + + // Get the number of referenced objects. + var count = this.mirrors_.length; + + for (var i = 0; i < count; i++) { + content.push(this.serialize_(this.mirrors_[i], false, false)); + } + + return content; +} + + +JSONProtocolSerializer.prototype.includeSource_ = function() { + return this.options_ && this.options_.includeSource; +} + + +JSONProtocolSerializer.prototype.inlineRefs_ = function() { + return this.options_ && this.options_.inlineRefs; +} + + +JSONProtocolSerializer.prototype.maxStringLength_ = function() { + if (IS_UNDEFINED(this.options_) || + IS_UNDEFINED(this.options_.maxStringLength)) { + return kMaxProtocolStringLength; + } + return this.options_.maxStringLength; +} + + +JSONProtocolSerializer.prototype.add_ = function(mirror) { + // If this mirror is already in the list just return. + for (var i = 0; i < this.mirrors_.length; i++) { + if (this.mirrors_[i] === mirror) { + return; + } + } + + // Add the mirror to the list of mirrors to be serialized. + this.mirrors_.push(mirror); +} + + +/** + * Formats mirror object to protocol reference object with some data that can + * be used to display the value in debugger. + * @param {Mirror} mirror Mirror to serialize. + * @return {Object} Protocol reference object. + */ +JSONProtocolSerializer.prototype.serializeReferenceWithDisplayData_ = + function(mirror) { + var o = {}; + o.ref = mirror.handle(); + o.type = mirror.type(); + switch (mirror.type()) { + case UNDEFINED_TYPE: + case NULL_TYPE: + case BOOLEAN_TYPE: + case NUMBER_TYPE: + o.value = mirror.value(); + break; + case STRING_TYPE: + o.value = mirror.getTruncatedValue(this.maxStringLength_()); + break; + case FUNCTION_TYPE: + o.name = mirror.name(); + o.inferredName = mirror.inferredName(); + if (mirror.script()) { + o.scriptId = mirror.script().id(); + } + break; + case ERROR_TYPE: + case REGEXP_TYPE: + o.value = mirror.toText(); + break; + case OBJECT_TYPE: + o.className = mirror.className(); + break; + } + return o; +}; + + +JSONProtocolSerializer.prototype.serialize_ = function(mirror, reference, + details) { + // If serializing a reference to a mirror just return the reference and add + // the mirror to the referenced mirrors. + if (reference && + (mirror.isValue() || mirror.isScript() || mirror.isContext())) { + if (this.inlineRefs_() && mirror.isValue()) { + return this.serializeReferenceWithDisplayData_(mirror); + } else { + this.add_(mirror); + return {'ref' : mirror.handle()}; + } + } + + // Collect the JSON property/value pairs. + var content = {}; + + // Add the mirror handle. + if (mirror.isValue() || mirror.isScript() || mirror.isContext()) { + content.handle = mirror.handle(); + } + + // Always add the type. + content.type = mirror.type(); + + switch (mirror.type()) { + case UNDEFINED_TYPE: + case NULL_TYPE: + // Undefined and null are represented just by their type. + break; + + case BOOLEAN_TYPE: + // Boolean values are simply represented by their value. + content.value = mirror.value(); + break; + + case NUMBER_TYPE: + // Number values are simply represented by their value. + content.value = NumberToJSON_(mirror.value()); + break; + + case STRING_TYPE: + // String values might have their value cropped to keep down size. + if (this.maxStringLength_() != -1 && + mirror.length() > this.maxStringLength_()) { + var str = mirror.getTruncatedValue(this.maxStringLength_()); + content.value = str; + content.fromIndex = 0; + content.toIndex = this.maxStringLength_(); + } else { + content.value = mirror.value(); + } + content.length = mirror.length(); + break; + + case OBJECT_TYPE: + case FUNCTION_TYPE: + case ERROR_TYPE: + case REGEXP_TYPE: + // Add object representation. + this.serializeObject_(mirror, content, details); + break; + + case PROPERTY_TYPE: + throw new Error('PropertyMirror cannot be serialized independeltly') + break; + + case FRAME_TYPE: + // Add object representation. + this.serializeFrame_(mirror, content); + break; + + case SCOPE_TYPE: + // Add object representation. + this.serializeScope_(mirror, content); + break; + + case SCRIPT_TYPE: + // Script is represented by id, name and source attributes. + if (mirror.name()) { + content.name = mirror.name(); + } + content.id = mirror.id(); + content.lineOffset = mirror.lineOffset(); + content.columnOffset = mirror.columnOffset(); + content.lineCount = mirror.lineCount(); + if (mirror.data()) { + content.data = mirror.data(); + } + if (this.includeSource_()) { + content.source = mirror.source(); + } else { + var sourceStart = mirror.source().substring(0, 80); + content.sourceStart = sourceStart; + } + content.sourceLength = mirror.source().length; + content.scriptType = mirror.scriptType(); + content.compilationType = mirror.compilationType(); + // For compilation type eval emit information on the script from which + // eval was called if a script is present. + if (mirror.compilationType() == 1 && + mirror.evalFromScript()) { + content.evalFromScript = + this.serializeReference(mirror.evalFromScript()); + var evalFromLocation = mirror.evalFromLocation() + if (evalFromLocation) { + content.evalFromLocation = { line: evalFromLocation.line, + column: evalFromLocation.column }; + } + if (mirror.evalFromFunctionName()) { + content.evalFromFunctionName = mirror.evalFromFunctionName(); + } + } + if (mirror.context()) { + content.context = this.serializeReference(mirror.context()); + } + break; + + case CONTEXT_TYPE: + content.data = mirror.data(); + break; + } + + // Always add the text representation. + content.text = mirror.toText(); + + // Create and return the JSON string. + return content; +} + + +/** + * Serialize object information to the following JSON format. + * + * {"className":"<class name>", + * "constructorFunction":{"ref":<number>}, + * "protoObject":{"ref":<number>}, + * "prototypeObject":{"ref":<number>}, + * "namedInterceptor":<boolean>, + * "indexedInterceptor":<boolean>, + * "properties":[<properties>]} + */ +JSONProtocolSerializer.prototype.serializeObject_ = function(mirror, content, + details) { + // Add general object properties. + content.className = mirror.className(); + content.constructorFunction = + this.serializeReference(mirror.constructorFunction()); + content.protoObject = this.serializeReference(mirror.protoObject()); + content.prototypeObject = this.serializeReference(mirror.prototypeObject()); + + // Add flags to indicate whether there are interceptors. + if (mirror.hasNamedInterceptor()) { + content.namedInterceptor = true; + } + if (mirror.hasIndexedInterceptor()) { + content.indexedInterceptor = true; + } + + // Add function specific properties. + if (mirror.isFunction()) { + // Add function specific properties. + content.name = mirror.name(); + if (!IS_UNDEFINED(mirror.inferredName())) { + content.inferredName = mirror.inferredName(); + } + content.resolved = mirror.resolved(); + if (mirror.resolved()) { + content.source = mirror.source(); + } + if (mirror.script()) { + content.script = this.serializeReference(mirror.script()); + content.scriptId = mirror.script().id(); + + serializeLocationFields(mirror.sourceLocation(), content); + } + } + + // Add date specific properties. + if (mirror.isDate()) { + // Add date specific properties. + content.value = mirror.value(); + } + + // Add actual properties - named properties followed by indexed properties. + var propertyNames = mirror.propertyNames(PropertyKind.Named); + var propertyIndexes = mirror.propertyNames(PropertyKind.Indexed); + var p = new Array(propertyNames.length + propertyIndexes.length); + for (var i = 0; i < propertyNames.length; i++) { + var propertyMirror = mirror.property(propertyNames[i]); + p[i] = this.serializeProperty_(propertyMirror); + if (details) { + this.add_(propertyMirror.value()); + } + } + for (var i = 0; i < propertyIndexes.length; i++) { + var propertyMirror = mirror.property(propertyIndexes[i]); + p[propertyNames.length + i] = this.serializeProperty_(propertyMirror); + if (details) { + this.add_(propertyMirror.value()); + } + } + content.properties = p; +} + + +/** + * Serialize location information to the following JSON format: + * + * "position":"<position>", + * "line":"<line>", + * "column":"<column>", + * + * @param {SourceLocation} location The location to serialize, may be undefined. + */ +function serializeLocationFields (location, content) { + if (!location) { + return; + } + content.position = location.position; + var line = location.line; + if (!IS_UNDEFINED(line)) { + content.line = line; + } + var column = location.column; + if (!IS_UNDEFINED(column)) { + content.column = column; + } +} + + +/** + * Serialize property information to the following JSON format for building the + * array of properties. + * + * {"name":"<property name>", + * "attributes":<number>, + * "propertyType":<number>, + * "ref":<number>} + * + * If the attribute for the property is PropertyAttribute.None it is not added. + * If the propertyType for the property is PropertyType.Normal it is not added. + * Here are a couple of examples. + * + * {"name":"hello","ref":1} + * {"name":"length","attributes":7,"propertyType":3,"ref":2} + * + * @param {PropertyMirror} propertyMirror The property to serialize. + * @returns {Object} Protocol object representing the property. + */ +JSONProtocolSerializer.prototype.serializeProperty_ = function(propertyMirror) { + var result = {}; + + result.name = propertyMirror.name(); + var propertyValue = propertyMirror.value(); + if (this.inlineRefs_() && propertyValue.isValue()) { + result.value = this.serializeReferenceWithDisplayData_(propertyValue); + } else { + if (propertyMirror.attributes() != PropertyAttribute.None) { + result.attributes = propertyMirror.attributes(); + } + if (propertyMirror.propertyType() != PropertyType.Normal) { + result.propertyType = propertyMirror.propertyType(); + } + result.ref = propertyValue.handle(); + } + return result; +} + + +JSONProtocolSerializer.prototype.serializeFrame_ = function(mirror, content) { + content.index = mirror.index(); + content.receiver = this.serializeReference(mirror.receiver()); + var func = mirror.func(); + content.func = this.serializeReference(func); + if (func.script()) { + content.script = this.serializeReference(func.script()); + } + content.constructCall = mirror.isConstructCall(); + content.debuggerFrame = mirror.isDebuggerFrame(); + var x = new Array(mirror.argumentCount()); + for (var i = 0; i < mirror.argumentCount(); i++) { + var arg = {}; + var argument_name = mirror.argumentName(i) + if (argument_name) { + arg.name = argument_name; + } + arg.value = this.serializeReference(mirror.argumentValue(i)); + x[i] = arg; + } + content.arguments = x; + var x = new Array(mirror.localCount()); + for (var i = 0; i < mirror.localCount(); i++) { + var local = {}; + local.name = mirror.localName(i); + local.value = this.serializeReference(mirror.localValue(i)); + x[i] = local; + } + content.locals = x; + serializeLocationFields(mirror.sourceLocation(), content); + var source_line_text = mirror.sourceLineText(); + if (!IS_UNDEFINED(source_line_text)) { + content.sourceLineText = source_line_text; + } + + content.scopes = []; + for (var i = 0; i < mirror.scopeCount(); i++) { + var scope = mirror.scope(i); + content.scopes.push({ + type: scope.scopeType(), + index: i + }); + } +} + + +JSONProtocolSerializer.prototype.serializeScope_ = function(mirror, content) { + content.index = mirror.scopeIndex(); + content.frameIndex = mirror.frameIndex(); + content.type = mirror.scopeType(); + content.object = this.inlineRefs_() ? + this.serializeValue(mirror.scopeObject()) : + this.serializeReference(mirror.scopeObject()); +} + + +/** + * Convert a number to a protocol value. For all finite numbers the number + * itself is returned. For non finite numbers NaN, Infinite and + * -Infinite the string representation "NaN", "Infinite" or "-Infinite" + * (not including the quotes) is returned. + * + * @param {number} value The number value to convert to a protocol value. + * @returns {number|string} Protocol value. + */ +function NumberToJSON_(value) { + if (isNaN(value)) { + return 'NaN'; + } + if (!isFinite(value)) { + if (value > 0) { + return 'Infinity'; + } else { + return '-Infinity'; + } + } + return value; +} diff --git a/deps/v8/src/mksnapshot.cc b/deps/v8/src/mksnapshot.cc index 6457ae7422..a30b45079e 100644 --- a/deps/v8/src/mksnapshot.cc +++ b/deps/v8/src/mksnapshot.cc @@ -35,6 +35,7 @@ #include "natives.h" #include "platform.h" #include "serialize.h" +#include "list.h" // use explicit namespace to avoid clashing with types in namespace v8 namespace i = v8::internal; @@ -96,7 +97,9 @@ static CounterMap counter_table_; class CppByteSink : public i::SnapshotByteSink { public: - explicit CppByteSink(const char* snapshot_file) : bytes_written_(0) { + explicit CppByteSink(const char* snapshot_file) + : bytes_written_(0), + partial_sink_(this) { fp_ = i::OS::FOpen(snapshot_file, "wb"); if (fp_ == NULL) { i::PrintF("Unable to write to snapshot file \"%s\"\n", snapshot_file); @@ -111,11 +114,53 @@ class CppByteSink : public i::SnapshotByteSink { } virtual ~CppByteSink() { - if (fp_ != NULL) { - fprintf(fp_, "};\n\n"); - fprintf(fp_, "int Snapshot::size_ = %d;\n\n", bytes_written_); - fprintf(fp_, "} } // namespace v8::internal\n"); - fclose(fp_); + fprintf(fp_, "const int Snapshot::size_ = %d;\n\n", bytes_written_); + fprintf(fp_, "} } // namespace v8::internal\n"); + fclose(fp_); + } + + void WriteSpaceUsed( + int new_space_used, + int pointer_space_used, + int data_space_used, + int code_space_used, + int map_space_used, + int cell_space_used, + int large_space_used) { + fprintf(fp_, "};\n\n"); + fprintf(fp_, "const int Snapshot::new_space_used_ = %d;\n", new_space_used); + fprintf(fp_, + "const int Snapshot::pointer_space_used_ = %d;\n", + pointer_space_used); + fprintf(fp_, + "const int Snapshot::data_space_used_ = %d;\n", + data_space_used); + fprintf(fp_, + "const int Snapshot::code_space_used_ = %d;\n", + code_space_used); + fprintf(fp_, "const int Snapshot::map_space_used_ = %d;\n", map_space_used); + fprintf(fp_, + "const int Snapshot::cell_space_used_ = %d;\n", + cell_space_used); + fprintf(fp_, + "const int Snapshot::large_space_used_ = %d;\n", + large_space_used); + } + + void WritePartialSnapshot() { + int length = partial_sink_.Position(); + fprintf(fp_, "};\n\n"); + fprintf(fp_, "const int Snapshot::context_size_ = %d;\n", length); + fprintf(fp_, "const byte Snapshot::context_data_[] = {\n"); + for (int j = 0; j < length; j++) { + if ((j & 0x1f) == 0x1f) { + fprintf(fp_, "\n"); + } + char byte = partial_sink_.at(j); + if (j != 0) { + fprintf(fp_, ","); + } + fprintf(fp_, "%d", byte); } } @@ -125,7 +170,7 @@ class CppByteSink : public i::SnapshotByteSink { } fprintf(fp_, "%d", byte); bytes_written_++; - if ((bytes_written_ & 0x3f) == 0) { + if ((bytes_written_ & 0x1f) == 0) { fprintf(fp_, "\n"); } } @@ -134,9 +179,28 @@ class CppByteSink : public i::SnapshotByteSink { return bytes_written_; } + i::SnapshotByteSink* partial_sink() { return &partial_sink_; } + + class PartialSnapshotSink : public i::SnapshotByteSink { + public: + explicit PartialSnapshotSink(CppByteSink* parent) + : parent_(parent), + data_() { } + virtual ~PartialSnapshotSink() { data_.Free(); } + virtual void Put(int byte, const char* description) { + data_.Add(byte); + } + virtual int Position() { return data_.length(); } + char at(int i) { return data_[i]; } + private: + CppByteSink* parent_; + i::List<char> data_; + }; + private: FILE* fp_; int bytes_written_; + PartialSnapshotSink partial_sink_; }; @@ -162,12 +226,31 @@ int main(int argc, char** argv) { i::Bootstrapper::NativesSourceLookup(i); } } + // If we don't do this then we end up with a stray root pointing at the + // context even after we have disposed of the context. + i::Heap::CollectAllGarbage(true); + i::Object* raw_context = *(v8::Utils::OpenHandle(*context)); context.Dispose(); CppByteSink sink(argv[1]); // This results in a somewhat smaller snapshot, probably because it gets rid // of some things that are cached between garbage collections. - i::Heap::CollectAllGarbage(true); i::StartupSerializer ser(&sink); - ser.Serialize(); + ser.SerializeStrongReferences(); + + i::PartialSerializer partial_ser(&ser, sink.partial_sink()); + partial_ser.Serialize(&raw_context); + + ser.SerializeWeakReferences(); + + sink.WritePartialSnapshot(); + + sink.WriteSpaceUsed( + partial_ser.CurrentAllocationAddress(i::NEW_SPACE), + partial_ser.CurrentAllocationAddress(i::OLD_POINTER_SPACE), + partial_ser.CurrentAllocationAddress(i::OLD_DATA_SPACE), + partial_ser.CurrentAllocationAddress(i::CODE_SPACE), + partial_ser.CurrentAllocationAddress(i::MAP_SPACE), + partial_ser.CurrentAllocationAddress(i::CELL_SPACE), + partial_ser.CurrentAllocationAddress(i::LO_SPACE)); return 0; } diff --git a/deps/v8/src/natives.h b/deps/v8/src/natives.h index fdfd213580..639a2d37b0 100644 --- a/deps/v8/src/natives.h +++ b/deps/v8/src/natives.h @@ -44,13 +44,13 @@ class NativesCollection { public: // Number of built-in scripts. static int GetBuiltinsCount(); - // Number of delayed/lazy loading scripts. - static int GetDelayCount(); + // Number of debugger implementation scripts. + static int GetDebuggerCount(); - // These are used to access built-in scripts. - // The delayed script has an index in the interval [0, GetDelayCount()). - // The non-delayed script has an index in the interval - // [GetDelayCount(), GetNativesCount()). + // These are used to access built-in scripts. The debugger implementation + // scripts have an index in the interval [0, GetDebuggerCount()). The + // non-debugger scripts have an index in the interval [GetDebuggerCount(), + // GetNativesCount()). static int GetIndex(const char* name); static Vector<const char> GetScriptSource(int index); static Vector<const char> GetScriptName(int index); diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc index 44a3b1ae09..a6ad958c40 100644 --- a/deps/v8/src/objects-debug.cc +++ b/deps/v8/src/objects-debug.cc @@ -615,9 +615,6 @@ void Map::MapPrint() { if (is_undetectable()) { PrintF(" - undetectable\n"); } - if (needs_loading()) { - PrintF(" - needs_loading\n"); - } if (has_instance_call_handler()) { PrintF(" - instance_call_handler\n"); } diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h index e7daa2d68c..a26da7dd62 100644 --- a/deps/v8/src/objects-inl.h +++ b/deps/v8/src/objects-inl.h @@ -2368,8 +2368,8 @@ ACCESSORS(BreakPointInfo, statement_position, Smi, kStatementPositionIndex) ACCESSORS(BreakPointInfo, break_point_objects, Object, kBreakPointObjectsIndex) #endif -ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset) ACCESSORS(SharedFunctionInfo, name, Object, kNameOffset) +ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset) ACCESSORS(SharedFunctionInfo, instance_class_name, Object, kInstanceClassNameOffset) ACCESSORS(SharedFunctionInfo, function_data, Object, kFunctionDataOffset) @@ -2401,6 +2401,7 @@ INT_ACCESSORS(SharedFunctionInfo, formal_parameter_count, kFormalParameterCountOffset) INT_ACCESSORS(SharedFunctionInfo, expected_nof_properties, kExpectedNofPropertiesOffset) +INT_ACCESSORS(SharedFunctionInfo, num_literals, kNumLiteralsOffset) INT_ACCESSORS(SharedFunctionInfo, start_position_and_type, kStartPositionAndTypeOffset) INT_ACCESSORS(SharedFunctionInfo, end_position, kEndPositionOffset) @@ -2489,11 +2490,6 @@ bool JSFunction::IsBuiltin() { } -bool JSObject::IsLoaded() { - return !map()->needs_loading(); -} - - Code* JSFunction::code() { return shared()->code(); } @@ -2573,6 +2569,7 @@ bool JSFunction::is_compiled() { int JSFunction::NumberOfLiterals() { + ASSERT(!IsBoilerplate()); return literals()->length(); } diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc index 132aa9e693..a1fbc99277 100644 --- a/deps/v8/src/objects.cc +++ b/deps/v8/src/objects.cc @@ -338,55 +338,6 @@ PropertyAttributes JSObject::GetPropertyAttributeWithFailedAccessCheck( } -Object* JSObject::GetLazyProperty(Object* receiver, - LookupResult* result, - String* name, - PropertyAttributes* attributes) { - HandleScope scope; - Handle<Object> this_handle(this); - Handle<Object> receiver_handle(receiver); - Handle<String> name_handle(name); - bool pending_exception; - LoadLazy(Handle<JSObject>(JSObject::cast(result->GetLazyValue())), - &pending_exception); - if (pending_exception) return Failure::Exception(); - return this_handle->GetPropertyWithReceiver(*receiver_handle, - *name_handle, - attributes); -} - - -Object* JSObject::SetLazyProperty(LookupResult* result, - String* name, - Object* value, - PropertyAttributes attributes) { - ASSERT(!IsJSGlobalProxy()); - HandleScope scope; - Handle<JSObject> this_handle(this); - Handle<String> name_handle(name); - Handle<Object> value_handle(value); - bool pending_exception; - LoadLazy(Handle<JSObject>(JSObject::cast(result->GetLazyValue())), - &pending_exception); - if (pending_exception) return Failure::Exception(); - return this_handle->SetProperty(*name_handle, *value_handle, attributes); -} - - -Object* JSObject::DeleteLazyProperty(LookupResult* result, - String* name, - DeleteMode mode) { - HandleScope scope; - Handle<JSObject> this_handle(this); - Handle<String> name_handle(name); - bool pending_exception; - LoadLazy(Handle<JSObject>(JSObject::cast(result->GetLazyValue())), - &pending_exception); - if (pending_exception) return Failure::Exception(); - return this_handle->DeleteProperty(*name_handle, mode); -} - - Object* JSObject::GetNormalizedProperty(LookupResult* result) { ASSERT(!HasFastProperties()); Object* value = property_dictionary()->ValueAt(result->GetDictionaryEntry()); @@ -530,12 +481,6 @@ Object* Object::GetProperty(Object* receiver, return Heap::undefined_value(); } *attributes = result->GetAttributes(); - if (!result->IsLoaded()) { - return JSObject::cast(this)->GetLazyProperty(receiver, - result, - name, - attributes); - } Object* value; JSObject* holder = result->holder(); switch (result->type()) { @@ -1786,7 +1731,6 @@ void JSObject::LocalLookupRealNamedProperty(String* name, return; } value = JSGlobalPropertyCell::cast(value)->value(); - ASSERT(result->IsLoaded()); } // Make sure to disallow caching for uninitialized constants // found in the dictionary-mode objects. @@ -1912,9 +1856,6 @@ Object* JSObject::SetProperty(LookupResult* result, // Neither properties nor transitions found. return AddProperty(name, value, attributes); } - if (!result->IsLoaded()) { - return SetLazyProperty(result, name, value, attributes); - } if (result->IsReadOnly() && result->IsProperty()) return value; // This is a real property that is not read-only, or it is a // transition or null descriptor and there are no setters in the prototypes. @@ -1994,9 +1935,7 @@ Object* JSObject::IgnoreAttributesAndSetLocalProperty( // Neither properties nor transitions found. return AddProperty(name, value, attributes); } - if (!result.IsLoaded()) { - return SetLazyProperty(&result, name, value, attributes); - } + PropertyDetails details = PropertyDetails(attributes, NORMAL); // Check of IsReadOnly removed from here in clone. @@ -2514,11 +2453,6 @@ Object* JSObject::DeleteProperty(String* name, DeleteMode mode) { } return DeletePropertyWithInterceptor(name); } - if (!result.IsLoaded()) { - return JSObject::cast(this)->DeleteLazyProperty(&result, - name, - mode); - } // Normalize object if needed. Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0); if (obj->IsFailure()) return obj; diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h index d696570201..01977f0929 100644 --- a/deps/v8/src/objects.h +++ b/deps/v8/src/objects.h @@ -1224,12 +1224,6 @@ class JSObject: public HeapObject { // Deletes the named property in a normalized object. Object* DeleteNormalizedProperty(String* name, DeleteMode mode); - // Sets a property that currently has lazy loading. - Object* SetLazyProperty(LookupResult* result, - String* name, - Object* value, - PropertyAttributes attributes); - // Returns the class name ([[Class]] property in the specification). String* class_name(); @@ -1264,13 +1258,6 @@ class JSObject: public HeapObject { Object* GetLocalPropertyPostInterceptor(JSObject* receiver, String* name, PropertyAttributes* attributes); - Object* GetLazyProperty(Object* receiver, - LookupResult* result, - String* name, - PropertyAttributes* attributes); - - // Tells whether this object needs to be loaded. - inline bool IsLoaded(); // Returns true if this is an instance of an api function and has // been modified since it was created. May give false positives. @@ -1308,9 +1295,6 @@ class JSObject: public HeapObject { Object* DeleteProperty(String* name, DeleteMode mode); Object* DeleteElement(uint32_t index, DeleteMode mode); - Object* DeleteLazyProperty(LookupResult* result, - String* name, - DeleteMode mode); // Tests for the fast common case for property enumeration. bool IsSimpleEnum(); @@ -2892,20 +2876,6 @@ class Map: public HeapObject { return ((1 << kIsUndetectable) & bit_field()) != 0; } - inline void set_needs_loading(bool value) { - if (value) { - set_bit_field2(bit_field2() | (1 << kNeedsLoading)); - } else { - set_bit_field2(bit_field2() & ~(1 << kNeedsLoading)); - } - } - - // Does this object or function require a lazily loaded script to be - // run before being used? - inline bool needs_loading() { - return ((1 << kNeedsLoading) & bit_field2()) != 0; - } - // Tells whether the instance has a call-as-function handler. inline void set_has_instance_call_handler() { set_bit_field(bit_field() | (1 << kHasInstanceCallHandler)); @@ -3039,8 +3009,7 @@ class Map: public HeapObject { static const int kIsAccessCheckNeeded = 7; // Bit positions for bit field 2 - static const int kNeedsLoading = 0; - static const int kIsExtensible = 1; + static const int kIsExtensible = 0; // Layout of the default cache. It holds alternating name and code objects. static const int kCodeCacheEntrySize = 2; @@ -3204,6 +3173,10 @@ class SharedFunctionInfo: public HeapObject { // [script info]: Script from which the function originates. DECL_ACCESSORS(script, Object) + // [num_literals]: Number of literals used by this function. + inline int num_literals(); + inline void set_num_literals(int value); + // [start_position_and_type]: Field used to store both the source code // position, whether or not the function is a function expression, // and whether or not the function is a toplevel function. The two @@ -3321,8 +3294,9 @@ class SharedFunctionInfo: public HeapObject { static const int kFormalParameterCountOffset = kLengthOffset + kIntSize; static const int kExpectedNofPropertiesOffset = kFormalParameterCountOffset + kIntSize; + static const int kNumLiteralsOffset = kExpectedNofPropertiesOffset + kIntSize; static const int kStartPositionAndTypeOffset = - kExpectedNofPropertiesOffset + kIntSize; + kNumLiteralsOffset + kIntSize; static const int kEndPositionOffset = kStartPositionAndTypeOffset + kIntSize; static const int kFunctionTokenPositionOffset = kEndPositionOffset + kIntSize; static const int kCompilerHintsOffset = @@ -3331,6 +3305,7 @@ class SharedFunctionInfo: public HeapObject { kCompilerHintsOffset + kIntSize; // Total size. static const int kSize = kThisPropertyAssignmentsCountOffset + kIntSize; + static const int kAlignedSize = POINTER_SIZE_ALIGN(kSize); private: // Bit positions in start_position_and_type. diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc index cff56a3951..b923a7f1c1 100644 --- a/deps/v8/src/parser.cc +++ b/deps/v8/src/parser.cc @@ -1,4 +1,4 @@ -// Copyright 2006-2008 the V8 project authors. All rights reserved. +// Copyright 2010 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -148,6 +148,7 @@ class Parser { ParserLog* log_; bool is_pre_parsing_; ScriptDataImpl* pre_data_; + bool seen_loop_stmt_; // Used for inner loop detection. bool inside_with() const { return with_nesting_level_ > 0; } ParserFactory* factory() const { return factory_; } @@ -1205,7 +1206,8 @@ Parser::Parser(Handle<Script> script, factory_(factory), log_(log), is_pre_parsing_(is_pre_parsing == PREPARSE), - pre_data_(pre_data) { + pre_data_(pre_data), + seen_loop_stmt_(false) { } @@ -1962,20 +1964,19 @@ Statement* Parser::ParseNativeDeclaration(bool* ok) { const int literals = fun->NumberOfLiterals(); Handle<Code> code = Handle<Code>(fun->shared()->code()); Handle<Code> construct_stub = Handle<Code>(fun->shared()->construct_stub()); - Handle<JSFunction> boilerplate = - Factory::NewFunctionBoilerplate(name, literals, code); - boilerplate->shared()->set_construct_stub(*construct_stub); + Handle<SharedFunctionInfo> shared = + Factory::NewSharedFunctionInfo(name, literals, code); + shared->set_construct_stub(*construct_stub); // Copy the function data to the boilerplate. - boilerplate->shared()->set_function_data(fun->shared()->function_data()); + shared->set_function_data(fun->shared()->function_data()); int parameters = fun->shared()->formal_parameter_count(); - boilerplate->shared()->set_formal_parameter_count(parameters); + shared->set_formal_parameter_count(parameters); // TODO(1240846): It's weird that native function declarations are // introduced dynamically when we meet their declarations, whereas // other functions are setup when entering the surrounding scope. - FunctionBoilerplateLiteral* lit = - NEW(FunctionBoilerplateLiteral(boilerplate)); + SharedFunctionInfoLiteral* lit = NEW(SharedFunctionInfoLiteral(shared)); VariableProxy* var = Declare(name, Variable::VAR, NULL, true, CHECK_OK); return NEW(ExpressionStatement( new Assignment(Token::INIT_VAR, var, lit, RelocInfo::kNoPosition))); @@ -2644,6 +2645,7 @@ DoWhileStatement* Parser::ParseDoWhileStatement(ZoneStringList* labels, } Expression* cond = ParseExpression(true, CHECK_OK); + if (cond != NULL) cond->set_is_loop_condition(true); Expect(Token::RPAREN, CHECK_OK); // Allow do-statements to be terminated with and without @@ -2653,6 +2655,9 @@ DoWhileStatement* Parser::ParseDoWhileStatement(ZoneStringList* labels, if (peek() == Token::SEMICOLON) Consume(Token::SEMICOLON); if (loop != NULL) loop->Initialize(cond, body); + + seen_loop_stmt_ = true; + return loop; } @@ -2667,10 +2672,14 @@ WhileStatement* Parser::ParseWhileStatement(ZoneStringList* labels, bool* ok) { Expect(Token::WHILE, CHECK_OK); Expect(Token::LPAREN, CHECK_OK); Expression* cond = ParseExpression(true, CHECK_OK); + if (cond != NULL) cond->set_is_loop_condition(true); Expect(Token::RPAREN, CHECK_OK); Statement* body = ParseStatement(NULL, CHECK_OK); if (loop != NULL) loop->Initialize(cond, body); + + seen_loop_stmt_ = true; + return loop; } @@ -2704,6 +2713,9 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) { Block* result = NEW(Block(NULL, 2, false)); result->AddStatement(variable_statement); result->AddStatement(loop); + + seen_loop_stmt_ = true; + // Parsed for-in loop w/ variable/const declaration. return result; } @@ -2733,6 +2745,8 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) { Statement* body = ParseStatement(NULL, CHECK_OK); if (loop) loop->Initialize(expression, enumerable, body); + seen_loop_stmt_ = true; + // Parsed for-in loop. return loop; @@ -2752,9 +2766,7 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) { Expression* cond = NULL; if (peek() != Token::SEMICOLON) { cond = ParseExpression(true, CHECK_OK); - if (cond && cond->AsCompareOperation()) { - cond->AsCompareOperation()->set_is_for_loop_condition(); - } + if (cond != NULL) cond->set_is_loop_condition(true); } Expect(Token::SEMICOLON, CHECK_OK); @@ -2765,9 +2777,17 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) { } Expect(Token::RPAREN, CHECK_OK); + seen_loop_stmt_ = false; + Statement* body = ParseStatement(NULL, CHECK_OK); + // Mark this loop if it is an inner loop. + if (loop && !seen_loop_stmt_) loop->set_peel_this_loop(true); + if (loop) loop->Initialize(init, cond, next, body); + + seen_loop_stmt_ = true; + return loop; } @@ -3712,6 +3732,9 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name, // Function :: // '(' FormalParameterList? ')' '{' FunctionBody '}' + // Reset flag used for inner loop detection. + seen_loop_stmt_ = false; + bool is_named = !var_name.is_null(); // The name associated with this function. If it's a function expression, @@ -3822,6 +3845,12 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name, if (!is_pre_parsing_) { function_literal->set_function_token_position(function_token_position); } + + // Set flag for inner loop detection. We treat loops that contain a function + // literal not as inner loops because we avoid duplicating function literals + // when peeling or unrolling such a loop. + seen_loop_stmt_ = true; + return function_literal; } } diff --git a/deps/v8/src/platform-freebsd.cc b/deps/v8/src/platform-freebsd.cc index 3617e8af2b..e5cdd2ee38 100644 --- a/deps/v8/src/platform-freebsd.cc +++ b/deps/v8/src/platform-freebsd.cc @@ -192,7 +192,8 @@ void OS::Abort() { void OS::DebugBreak() { -#if defined(__arm__) || defined(__thumb__) +#if (defined(__arm__) || defined(__thumb__)) && \ + defined(CAN_USE_ARMV5_INSTRUCTIONS) asm("bkpt 0"); #else asm("int $3"); diff --git a/deps/v8/src/platform-linux.cc b/deps/v8/src/platform-linux.cc index f1812ff205..cd7bcb1265 100644 --- a/deps/v8/src/platform-linux.cc +++ b/deps/v8/src/platform-linux.cc @@ -266,7 +266,8 @@ void OS::Abort() { void OS::DebugBreak() { // TODO(lrn): Introduce processor define for runtime system (!= V8_ARCH_x, // which is the architecture of generated code). -#if defined(__arm__) || defined(__thumb__) +#if (defined(__arm__) || defined(__thumb__)) && \ + defined(CAN_USE_ARMV5_INSTRUCTIONS) asm("bkpt 0"); #elif defined(__mips__) asm("break"); diff --git a/deps/v8/src/platform-openbsd.cc b/deps/v8/src/platform-openbsd.cc index 62e6004411..f96e769ca2 100644 --- a/deps/v8/src/platform-openbsd.cc +++ b/deps/v8/src/platform-openbsd.cc @@ -190,7 +190,8 @@ void OS::Abort() { void OS::DebugBreak() { -#if defined(__arm__) || defined(__thumb__) +#if (defined(__arm__) || defined(__thumb__)) && \ + defined(CAN_USE_ARMV5_INSTRUCTIONS) asm("bkpt 0"); #else asm("int $3"); diff --git a/deps/v8/src/platform-win32.cc b/deps/v8/src/platform-win32.cc index 04ffea968c..48f306d4ea 100644 --- a/deps/v8/src/platform-win32.cc +++ b/deps/v8/src/platform-win32.cc @@ -511,7 +511,7 @@ void Time::SetToCurrentTime() { // takes into account whether daylight saving is in effect at the time. // Only times in the 32-bit Unix range may be passed to this function. // Also, adding the time-zone offset to the input must not overflow. -// The function EquivalentTime() in date-delay.js guarantees this. +// The function EquivalentTime() in date.js guarantees this. int64_t Time::LocalOffset() { // Initialize timezone information, if needed. TzSet(); diff --git a/deps/v8/src/prettyprinter.cc b/deps/v8/src/prettyprinter.cc index 6e2a60ec8d..75f6fc3cbd 100644 --- a/deps/v8/src/prettyprinter.cc +++ b/deps/v8/src/prettyprinter.cc @@ -227,10 +227,10 @@ void PrettyPrinter::VisitFunctionLiteral(FunctionLiteral* node) { } -void PrettyPrinter::VisitFunctionBoilerplateLiteral( - FunctionBoilerplateLiteral* node) { +void PrettyPrinter::VisitSharedFunctionInfoLiteral( + SharedFunctionInfoLiteral* node) { Print("("); - PrintLiteral(node->boilerplate(), true); + PrintLiteral(node->shared_function_info(), true); Print(")"); } @@ -668,7 +668,8 @@ void AstPrinter::PrintLiteralWithModeIndented(const char* info, Variable* var, Handle<Object> value, StaticType* type, - int num) { + int num, + bool is_primitive) { if (var == NULL) { PrintLiteralIndented(info, value, true); } else { @@ -682,6 +683,8 @@ void AstPrinter::PrintLiteralWithModeIndented(const char* info, if (num != AstNode::kNoNumber) { pos += OS::SNPrintF(buf + pos, ", num = %d", num); } + pos += OS::SNPrintF(buf + pos, + is_primitive ? ", primitive" : ", non-primitive"); OS::SNPrintF(buf + pos, ")"); PrintLiteralIndented(buf.start(), value, true); } @@ -740,7 +743,8 @@ void AstPrinter::PrintParameters(Scope* scope) { PrintLiteralWithModeIndented("VAR", scope->parameter(i), scope->parameter(i)->name(), scope->parameter(i)->type(), - AstNode::kNoNumber); + AstNode::kNoNumber, + false); } } } @@ -786,7 +790,8 @@ void AstPrinter::VisitDeclaration(Declaration* node) { node->proxy()->AsVariable(), node->proxy()->name(), node->proxy()->AsVariable()->type(), - AstNode::kNoNumber); + AstNode::kNoNumber, + node->proxy()->IsPrimitive()); } else { // function declarations PrintIndented("FUNCTION "); @@ -918,10 +923,10 @@ void AstPrinter::VisitFunctionLiteral(FunctionLiteral* node) { } -void AstPrinter::VisitFunctionBoilerplateLiteral( - FunctionBoilerplateLiteral* node) { +void AstPrinter::VisitSharedFunctionInfoLiteral( + SharedFunctionInfoLiteral* node) { IndentedScope indent("FUNC LITERAL"); - PrintLiteralIndented("BOILERPLATE", node->boilerplate(), true); + PrintLiteralIndented("SHARED INFO", node->shared_function_info(), true); } @@ -1022,7 +1027,7 @@ void AstPrinter::VisitSlot(Slot* node) { void AstPrinter::VisitVariableProxy(VariableProxy* node) { PrintLiteralWithModeIndented("VAR PROXY", node->AsVariable(), node->name(), - node->type(), node->num()); + node->type(), node->num(), node->IsPrimitive()); Variable* var = node->var(); if (var != NULL && var->rewrite() != NULL) { IndentedScope indent; @@ -1326,9 +1331,9 @@ void JsonAstBuilder::VisitFunctionLiteral(FunctionLiteral* expr) { } -void JsonAstBuilder::VisitFunctionBoilerplateLiteral( - FunctionBoilerplateLiteral* expr) { - TagScope tag(this, "FunctionBoilerplateLiteral"); +void JsonAstBuilder::VisitSharedFunctionInfoLiteral( + SharedFunctionInfoLiteral* expr) { + TagScope tag(this, "SharedFunctionInfoLiteral"); } diff --git a/deps/v8/src/prettyprinter.h b/deps/v8/src/prettyprinter.h index 8e958c77e2..93ba0d95a8 100644 --- a/deps/v8/src/prettyprinter.h +++ b/deps/v8/src/prettyprinter.h @@ -103,7 +103,8 @@ class AstPrinter: public PrettyPrinter { Variable* var, Handle<Object> value, StaticType* type, - int num); + int num, + bool is_primitive); void PrintLabelsIndented(const char* info, ZoneStringList* labels); void inc_indent() { indent_++; } diff --git a/deps/v8/src/property.h b/deps/v8/src/property.h index dc513484be..15a56528d3 100644 --- a/deps/v8/src/property.h +++ b/deps/v8/src/property.h @@ -239,15 +239,6 @@ class LookupResult BASE_EMBEDDED { bool IsCacheable() { return cacheable_; } void DisallowCaching() { cacheable_ = false; } - // Tells whether the value needs to be loaded. - bool IsLoaded() { - if (lookup_type_ == DESCRIPTOR_TYPE || lookup_type_ == DICTIONARY_TYPE) { - Object* target = GetLazyValue(); - return !target->IsJSObject() || JSObject::cast(target)->IsLoaded(); - } - return true; - } - Object* GetLazyValue() { switch (type()) { case FIELD: diff --git a/deps/v8/src/regexp.js b/deps/v8/src/regexp.js new file mode 100644 index 0000000000..dc1b0429f7 --- /dev/null +++ b/deps/v8/src/regexp.js @@ -0,0 +1,528 @@ +// Copyright 2006-2009 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Expect $Object = global.Object; +// Expect $Array = global.Array; + +const $RegExp = global.RegExp; + +// A recursive descent parser for Patterns according to the grammar of +// ECMA-262 15.10.1, with deviations noted below. +function DoConstructRegExp(object, pattern, flags, isConstructorCall) { + // RegExp : Called as constructor; see ECMA-262, section 15.10.4. + if (IS_REGEXP(pattern)) { + if (!IS_UNDEFINED(flags)) { + throw MakeTypeError('regexp_flags', []); + } + flags = (pattern.global ? 'g' : '') + + (pattern.ignoreCase ? 'i' : '') + + (pattern.multiline ? 'm' : ''); + pattern = pattern.source; + } + + pattern = IS_UNDEFINED(pattern) ? '' : ToString(pattern); + flags = IS_UNDEFINED(flags) ? '' : ToString(flags); + + var global = false; + var ignoreCase = false; + var multiline = false; + + for (var i = 0; i < flags.length; i++) { + var c = StringCharAt.call(flags, i); + switch (c) { + case 'g': + // Allow duplicate flags to be consistent with JSC and others. + global = true; + break; + case 'i': + ignoreCase = true; + break; + case 'm': + multiline = true; + break; + default: + // Ignore flags that have no meaning to be consistent with + // JSC. + break; + } + } + + if (isConstructorCall) { + // ECMA-262, section 15.10.7.1. + %SetProperty(object, 'source', pattern, + DONT_DELETE | READ_ONLY | DONT_ENUM); + + // ECMA-262, section 15.10.7.2. + %SetProperty(object, 'global', global, DONT_DELETE | READ_ONLY | DONT_ENUM); + + // ECMA-262, section 15.10.7.3. + %SetProperty(object, 'ignoreCase', ignoreCase, + DONT_DELETE | READ_ONLY | DONT_ENUM); + + // ECMA-262, section 15.10.7.4. + %SetProperty(object, 'multiline', multiline, + DONT_DELETE | READ_ONLY | DONT_ENUM); + + // ECMA-262, section 15.10.7.5. + %SetProperty(object, 'lastIndex', 0, DONT_DELETE | DONT_ENUM); + } else { // RegExp is being recompiled via RegExp.prototype.compile. + %IgnoreAttributesAndSetProperty(object, 'source', pattern); + %IgnoreAttributesAndSetProperty(object, 'global', global); + %IgnoreAttributesAndSetProperty(object, 'ignoreCase', ignoreCase); + %IgnoreAttributesAndSetProperty(object, 'multiline', multiline); + %IgnoreAttributesAndSetProperty(object, 'lastIndex', 0); + regExpCache.type = 'none'; + } + + // Call internal function to compile the pattern. + %RegExpCompile(object, pattern, flags); +} + + +function RegExpConstructor(pattern, flags) { + if (%_IsConstructCall()) { + DoConstructRegExp(this, pattern, flags, true); + } else { + // RegExp : Called as function; see ECMA-262, section 15.10.3.1. + if (IS_REGEXP(pattern) && IS_UNDEFINED(flags)) { + return pattern; + } + return new $RegExp(pattern, flags); + } +} + + +// Deprecated RegExp.prototype.compile method. We behave like the constructor +// were called again. In SpiderMonkey, this method returns the regexp object. +// In JSC, it returns undefined. For compatibility with JSC, we match their +// behavior. +function CompileRegExp(pattern, flags) { + // Both JSC and SpiderMonkey treat a missing pattern argument as the + // empty subject string, and an actual undefined value passed as the + // pattern as the string 'undefined'. Note that JSC is inconsistent + // here, treating undefined values differently in + // RegExp.prototype.compile and in the constructor, where they are + // the empty string. For compatibility with JSC, we match their + // behavior. + if (IS_UNDEFINED(pattern) && %_ArgumentsLength() != 0) { + DoConstructRegExp(this, 'undefined', flags, false); + } else { + DoConstructRegExp(this, pattern, flags, false); + } +} + + +function DoRegExpExec(regexp, string, index) { + return %_RegExpExec(regexp, string, index, lastMatchInfo); +} + + +function RegExpCache() { + this.type = 'none'; + this.regExp = 0; + this.subject = 0; + this.replaceString = 0; + this.lastIndex = 0; + this.answer = 0; +} + + +var regExpCache = new RegExpCache(); + + +function CloneRegexpAnswer(array) { + var len = array.length; + var answer = new $Array(len); + for (var i = 0; i < len; i++) { + answer[i] = array[i]; + } + answer.index = array.index; + answer.input = array.input; + return answer; +} + + +function RegExpExec(string) { + if (!IS_REGEXP(this)) { + throw MakeTypeError('incompatible_method_receiver', + ['RegExp.prototype.exec', this]); + } + + var cache = regExpCache; + + if (%_ObjectEquals(cache.type, 'exec') && + %_ObjectEquals(cache.lastIndex, this.lastIndex) && + %_ObjectEquals(cache.regExp, this) && + %_ObjectEquals(cache.subject, string)) { + var last = cache.answer; + if (last == null) { + return last; + } else { + return CloneRegexpAnswer(last); + } + } + + if (%_ArgumentsLength() == 0) { + var regExpInput = LAST_INPUT(lastMatchInfo); + if (IS_UNDEFINED(regExpInput)) { + throw MakeError('no_input_to_regexp', [this]); + } + string = regExpInput; + } + var s; + if (IS_STRING(string)) { + s = string; + } else { + s = ToString(string); + } + var lastIndex = this.lastIndex; + + var i = this.global ? TO_INTEGER(lastIndex) : 0; + + if (i < 0 || i > s.length) { + this.lastIndex = 0; + return null; + } + + %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, s, lastIndex]); + // matchIndices is either null or the lastMatchInfo array. + var matchIndices = %_RegExpExec(this, s, i, lastMatchInfo); + + if (matchIndices == null) { + if (this.global) this.lastIndex = 0; + cache.lastIndex = lastIndex; + cache.regExp = this; + cache.subject = s; + cache.answer = matchIndices; // Null. + cache.type = 'exec'; + return matchIndices; // No match. + } + + var numResults = NUMBER_OF_CAPTURES(lastMatchInfo) >> 1; + var result; + if (numResults === 1) { + var matchStart = lastMatchInfo[CAPTURE(0)]; + var matchEnd = lastMatchInfo[CAPTURE(1)]; + result = [SubString(s, matchStart, matchEnd)]; + } else { + result = new $Array(numResults); + for (var i = 0; i < numResults; i++) { + var matchStart = lastMatchInfo[CAPTURE(i << 1)]; + var matchEnd = lastMatchInfo[CAPTURE((i << 1) + 1)]; + if (matchStart != -1 && matchEnd != -1) { + result[i] = SubString(s, matchStart, matchEnd); + } else { + // Make sure the element is present. Avoid reading the undefined + // property from the global object since this may change. + result[i] = void 0; + } + } + } + + result.index = lastMatchInfo[CAPTURE0]; + result.input = s; + if (this.global) { + this.lastIndex = lastMatchInfo[CAPTURE1]; + return result; + } else { + cache.regExp = this; + cache.subject = s; + cache.lastIndex = lastIndex; + cache.answer = result; + cache.type = 'exec'; + return CloneRegexpAnswer(result); + } +} + + +// Section 15.10.6.3 doesn't actually make sense, but the intention seems to be +// that test is defined in terms of String.prototype.exec. However, it probably +// means the original value of String.prototype.exec, which is what everybody +// else implements. +function RegExpTest(string) { + if (!IS_REGEXP(this)) { + throw MakeTypeError('incompatible_method_receiver', + ['RegExp.prototype.test', this]); + } + if (%_ArgumentsLength() == 0) { + var regExpInput = LAST_INPUT(lastMatchInfo); + if (IS_UNDEFINED(regExpInput)) { + throw MakeError('no_input_to_regexp', [this]); + } + string = regExpInput; + } + var s; + if (IS_STRING(string)) { + s = string; + } else { + s = ToString(string); + } + + var lastIndex = this.lastIndex; + + var cache = regExpCache; + + if (%_ObjectEquals(cache.type, 'test') && + %_ObjectEquals(cache.regExp, this) && + %_ObjectEquals(cache.subject, string) && + %_ObjectEquals(cache.lastIndex, lastIndex)) { + return cache.answer; + } + + var length = s.length; + var i = this.global ? TO_INTEGER(lastIndex) : 0; + + cache.type = 'test'; + cache.regExp = this; + cache.subject = s; + cache.lastIndex = i; + + if (i < 0 || i > s.length) { + this.lastIndex = 0; + cache.answer = false; + return false; + } + + %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, s, lastIndex]); + // matchIndices is either null or the lastMatchInfo array. + var matchIndices = %_RegExpExec(this, s, i, lastMatchInfo); + + if (matchIndices == null) { + if (this.global) this.lastIndex = 0; + cache.answer = false; + return false; + } + + if (this.global) this.lastIndex = lastMatchInfo[CAPTURE1]; + cache.answer = true; + return true; +} + + +function RegExpToString() { + // If this.source is an empty string, output /(?:)/. + // http://bugzilla.mozilla.org/show_bug.cgi?id=225550 + // ecma_2/RegExp/properties-001.js. + var src = this.source ? this.source : '(?:)'; + var result = '/' + src + '/'; + if (this.global) + result += 'g'; + if (this.ignoreCase) + result += 'i'; + if (this.multiline) + result += 'm'; + return result; +} + + +// Getters for the static properties lastMatch, lastParen, leftContext, and +// rightContext of the RegExp constructor. The properties are computed based +// on the captures array of the last successful match and the subject string +// of the last successful match. +function RegExpGetLastMatch() { + if (lastMatchInfoOverride) { return lastMatchInfoOverride[0]; } + var regExpSubject = LAST_SUBJECT(lastMatchInfo); + return SubString(regExpSubject, + lastMatchInfo[CAPTURE0], + lastMatchInfo[CAPTURE1]); +} + + +function RegExpGetLastParen() { + if (lastMatchInfoOverride) { + var override = lastMatchInfoOverride; + if (override.length <= 3) return ''; + return override[override.length - 3]; + } + var length = NUMBER_OF_CAPTURES(lastMatchInfo); + if (length <= 2) return ''; // There were no captures. + // We match the SpiderMonkey behavior: return the substring defined by the + // last pair (after the first pair) of elements of the capture array even if + // it is empty. + var regExpSubject = LAST_SUBJECT(lastMatchInfo); + var start = lastMatchInfo[CAPTURE(length - 2)]; + var end = lastMatchInfo[CAPTURE(length - 1)]; + if (start != -1 && end != -1) { + return SubString(regExpSubject, start, end); + } + return ""; +} + + +function RegExpGetLeftContext() { + var start_index; + var subject; + if (!lastMatchInfoOverride) { + start_index = lastMatchInfo[CAPTURE0]; + subject = LAST_SUBJECT(lastMatchInfo); + } else { + var override = lastMatchInfoOverride; + start_index = override[override.length - 2]; + subject = override[override.length - 1]; + } + return SubString(subject, 0, start_index); +} + + +function RegExpGetRightContext() { + var start_index; + var subject; + if (!lastMatchInfoOverride) { + start_index = lastMatchInfo[CAPTURE1]; + subject = LAST_SUBJECT(lastMatchInfo); + } else { + var override = lastMatchInfoOverride; + subject = override[override.length - 1]; + start_index = override[override.length - 2] + subject.length; + } + return SubString(subject, start_index, subject.length); +} + + +// The properties $1..$9 are the first nine capturing substrings of the last +// successful match, or ''. The function RegExpMakeCaptureGetter will be +// called with indices from 1 to 9. +function RegExpMakeCaptureGetter(n) { + return function() { + if (lastMatchInfoOverride) { + if (n < lastMatchInfoOverride.length - 2) return lastMatchInfoOverride[n]; + return ''; + } + var index = n * 2; + if (index >= NUMBER_OF_CAPTURES(lastMatchInfo)) return ''; + var matchStart = lastMatchInfo[CAPTURE(index)]; + var matchEnd = lastMatchInfo[CAPTURE(index + 1)]; + if (matchStart == -1 || matchEnd == -1) return ''; + return SubString(LAST_SUBJECT(lastMatchInfo), matchStart, matchEnd); + }; +} + + +// Property of the builtins object for recording the result of the last +// regexp match. The property lastMatchInfo includes the matchIndices +// array of the last successful regexp match (an array of start/end index +// pairs for the match and all the captured substrings), the invariant is +// that there are at least two capture indeces. The array also contains +// the subject string for the last successful match. +var lastMatchInfo = [ + 2, // REGEXP_NUMBER_OF_CAPTURES + "", // Last subject. + void 0, // Last input - settable with RegExpSetInput. + 0, // REGEXP_FIRST_CAPTURE + 0 + 0, // REGEXP_FIRST_CAPTURE + 1 +]; + +// Override last match info with an array of actual substrings. +// Used internally by replace regexp with function. +// The array has the format of an "apply" argument for a replacement +// function. +var lastMatchInfoOverride = null; + +// ------------------------------------------------------------------- + +function SetupRegExp() { + %FunctionSetInstanceClassName($RegExp, 'RegExp'); + %FunctionSetPrototype($RegExp, new $Object()); + %SetProperty($RegExp.prototype, 'constructor', $RegExp, DONT_ENUM); + %SetCode($RegExp, RegExpConstructor); + + InstallFunctions($RegExp.prototype, DONT_ENUM, $Array( + "exec", RegExpExec, + "test", RegExpTest, + "toString", RegExpToString, + "compile", CompileRegExp + )); + + // The length of compile is 1 in SpiderMonkey. + %FunctionSetLength($RegExp.prototype.compile, 1); + + // The properties input, $input, and $_ are aliases for each other. When this + // value is set the value it is set to is coerced to a string. + // Getter and setter for the input. + function RegExpGetInput() { + var regExpInput = LAST_INPUT(lastMatchInfo); + return IS_UNDEFINED(regExpInput) ? "" : regExpInput; + } + function RegExpSetInput(string) { + regExpCache.type = 'none'; + LAST_INPUT(lastMatchInfo) = ToString(string); + }; + + %DefineAccessor($RegExp, 'input', GETTER, RegExpGetInput, DONT_DELETE); + %DefineAccessor($RegExp, 'input', SETTER, RegExpSetInput, DONT_DELETE); + %DefineAccessor($RegExp, '$_', GETTER, RegExpGetInput, DONT_ENUM | DONT_DELETE); + %DefineAccessor($RegExp, '$_', SETTER, RegExpSetInput, DONT_ENUM | DONT_DELETE); + %DefineAccessor($RegExp, '$input', GETTER, RegExpGetInput, DONT_ENUM | DONT_DELETE); + %DefineAccessor($RegExp, '$input', SETTER, RegExpSetInput, DONT_ENUM | DONT_DELETE); + + // The properties multiline and $* are aliases for each other. When this + // value is set in SpiderMonkey, the value it is set to is coerced to a + // boolean. We mimic that behavior with a slight difference: in SpiderMonkey + // the value of the expression 'RegExp.multiline = null' (for instance) is the + // boolean false (ie, the value after coercion), while in V8 it is the value + // null (ie, the value before coercion). + + // Getter and setter for multiline. + var multiline = false; + function RegExpGetMultiline() { return multiline; }; + function RegExpSetMultiline(flag) { multiline = flag ? true : false; }; + + %DefineAccessor($RegExp, 'multiline', GETTER, RegExpGetMultiline, DONT_DELETE); + %DefineAccessor($RegExp, 'multiline', SETTER, RegExpSetMultiline, DONT_DELETE); + %DefineAccessor($RegExp, '$*', GETTER, RegExpGetMultiline, DONT_ENUM | DONT_DELETE); + %DefineAccessor($RegExp, '$*', SETTER, RegExpSetMultiline, DONT_ENUM | DONT_DELETE); + + + function NoOpSetter(ignored) {} + + + // Static properties set by a successful match. + %DefineAccessor($RegExp, 'lastMatch', GETTER, RegExpGetLastMatch, DONT_DELETE); + %DefineAccessor($RegExp, 'lastMatch', SETTER, NoOpSetter, DONT_DELETE); + %DefineAccessor($RegExp, '$&', GETTER, RegExpGetLastMatch, DONT_ENUM | DONT_DELETE); + %DefineAccessor($RegExp, '$&', SETTER, NoOpSetter, DONT_ENUM | DONT_DELETE); + %DefineAccessor($RegExp, 'lastParen', GETTER, RegExpGetLastParen, DONT_DELETE); + %DefineAccessor($RegExp, 'lastParen', SETTER, NoOpSetter, DONT_DELETE); + %DefineAccessor($RegExp, '$+', GETTER, RegExpGetLastParen, DONT_ENUM | DONT_DELETE); + %DefineAccessor($RegExp, '$+', SETTER, NoOpSetter, DONT_ENUM | DONT_DELETE); + %DefineAccessor($RegExp, 'leftContext', GETTER, RegExpGetLeftContext, DONT_DELETE); + %DefineAccessor($RegExp, 'leftContext', SETTER, NoOpSetter, DONT_DELETE); + %DefineAccessor($RegExp, '$`', GETTER, RegExpGetLeftContext, DONT_ENUM | DONT_DELETE); + %DefineAccessor($RegExp, '$`', SETTER, NoOpSetter, DONT_ENUM | DONT_DELETE); + %DefineAccessor($RegExp, 'rightContext', GETTER, RegExpGetRightContext, DONT_DELETE); + %DefineAccessor($RegExp, 'rightContext', SETTER, NoOpSetter, DONT_DELETE); + %DefineAccessor($RegExp, "$'", GETTER, RegExpGetRightContext, DONT_ENUM | DONT_DELETE); + %DefineAccessor($RegExp, "$'", SETTER, NoOpSetter, DONT_ENUM | DONT_DELETE); + + for (var i = 1; i < 10; ++i) { + %DefineAccessor($RegExp, '$' + i, GETTER, RegExpMakeCaptureGetter(i), DONT_DELETE); + %DefineAccessor($RegExp, '$' + i, SETTER, NoOpSetter, DONT_DELETE); + } +} + + +SetupRegExp(); diff --git a/deps/v8/src/register-allocator-inl.h b/deps/v8/src/register-allocator-inl.h index 8453104ee2..e0ea9e1894 100644 --- a/deps/v8/src/register-allocator-inl.h +++ b/deps/v8/src/register-allocator-inl.h @@ -104,42 +104,36 @@ void RegisterAllocator::Unuse(Register reg) { } -NumberInfo Result::number_info() const { +TypeInfo Result::type_info() const { ASSERT(is_valid()); - if (!is_constant()) { - return NumberInfo::FromInt(NumberInfoField::decode(value_)); - } - Handle<Object> value = handle(); - if (value->IsSmi()) return NumberInfo::Smi(); - if (value->IsHeapNumber()) return NumberInfo::HeapNumber(); - return NumberInfo::Unknown(); + return TypeInfo::FromInt(TypeInfoField::decode(value_)); } -void Result::set_number_info(NumberInfo info) { +void Result::set_type_info(TypeInfo info) { ASSERT(is_valid()); - value_ &= ~NumberInfoField::mask(); - value_ |= NumberInfoField::encode(info.ToInt()); + value_ &= ~TypeInfoField::mask(); + value_ |= TypeInfoField::encode(info.ToInt()); } bool Result::is_number() const { - return number_info().IsNumber(); + return type_info().IsNumber(); } bool Result::is_smi() const { - return number_info().IsSmi(); + return type_info().IsSmi(); } bool Result::is_integer32() const { - return number_info().IsInteger32(); + return type_info().IsInteger32(); } -bool Result::is_heap_number() const { - return number_info().IsHeapNumber(); +bool Result::is_double() const { + return type_info().IsDouble(); } } } // namespace v8::internal diff --git a/deps/v8/src/register-allocator.cc b/deps/v8/src/register-allocator.cc index 64e4428792..b9989a5dde 100644 --- a/deps/v8/src/register-allocator.cc +++ b/deps/v8/src/register-allocator.cc @@ -38,11 +38,11 @@ namespace internal { // Result implementation. -Result::Result(Register reg, NumberInfo info) { +Result::Result(Register reg, TypeInfo info) { ASSERT(reg.is_valid() && !RegisterAllocator::IsReserved(reg)); CodeGeneratorScope::Current()->allocator()->Use(reg); value_ = TypeField::encode(REGISTER) - | NumberInfoField::encode(info.ToInt()) + | TypeInfoField::encode(info.ToInt()) | DataField::encode(reg.code_); } diff --git a/deps/v8/src/register-allocator.h b/deps/v8/src/register-allocator.h index 8bc8edeae3..0fbc83b821 100644 --- a/deps/v8/src/register-allocator.h +++ b/deps/v8/src/register-allocator.h @@ -29,7 +29,7 @@ #define V8_REGISTER_ALLOCATOR_H_ #include "macro-assembler.h" -#include "number-info.h" +#include "type-info-inl.h" #if V8_TARGET_ARCH_IA32 #include "ia32/register-allocator-ia32.h" @@ -65,12 +65,13 @@ class Result BASE_EMBEDDED { Result() { invalidate(); } // Construct a register Result. - explicit Result(Register reg, NumberInfo info = NumberInfo::Unknown()); + explicit Result(Register reg, TypeInfo info = TypeInfo::Unknown()); // Construct a Result whose value is a compile-time constant. explicit Result(Handle<Object> value) { + TypeInfo info = TypeInfo::TypeFromValue(value); value_ = TypeField::encode(CONSTANT) - | NumberInfoField::encode(NumberInfo::Uninitialized().ToInt()) + | TypeInfoField::encode(info.ToInt()) | IsUntaggedInt32Field::encode(false) | DataField::encode(ConstantList()->length()); ConstantList()->Add(value); @@ -102,12 +103,12 @@ class Result BASE_EMBEDDED { void invalidate() { value_ = TypeField::encode(INVALID); } - inline NumberInfo number_info() const; - inline void set_number_info(NumberInfo info); + inline TypeInfo type_info() const; + inline void set_type_info(TypeInfo info); inline bool is_number() const; inline bool is_smi() const; inline bool is_integer32() const; - inline bool is_heap_number() const; + inline bool is_double() const; bool is_valid() const { return type() != INVALID; } bool is_register() const { return type() == REGISTER; } @@ -152,10 +153,11 @@ class Result BASE_EMBEDDED { private: uint32_t value_; + // Declare BitFields with template parameters <type, start, size>. class TypeField: public BitField<Type, 0, 2> {}; - class NumberInfoField : public BitField<int, 2, 4> {}; - class IsUntaggedInt32Field : public BitField<bool, 6, 1> {}; - class DataField: public BitField<uint32_t, 7, 32 - 7> {}; + class TypeInfoField : public BitField<int, 2, 6> {}; + class IsUntaggedInt32Field : public BitField<bool, 8, 1> {}; + class DataField: public BitField<uint32_t, 9, 32 - 9> {}; inline void CopyTo(Result* destination) const; diff --git a/deps/v8/src/rewriter.cc b/deps/v8/src/rewriter.cc index e87fccec1a..c97408e0d4 100644 --- a/deps/v8/src/rewriter.cc +++ b/deps/v8/src/rewriter.cc @@ -213,8 +213,8 @@ void AstOptimizer::VisitFunctionLiteral(FunctionLiteral* node) { } -void AstOptimizer::VisitFunctionBoilerplateLiteral( - FunctionBoilerplateLiteral* node) { +void AstOptimizer::VisitSharedFunctionInfoLiteral( + SharedFunctionInfoLiteral* node) { USE(node); } @@ -804,8 +804,8 @@ void Processor::VisitFunctionLiteral(FunctionLiteral* node) { } -void Processor::VisitFunctionBoilerplateLiteral( - FunctionBoilerplateLiteral* node) { +void Processor::VisitSharedFunctionInfoLiteral( + SharedFunctionInfoLiteral* node) { USE(node); UNREACHABLE(); } diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc index 0fe24579b5..c77d518371 100644 --- a/deps/v8/src/runtime.cc +++ b/deps/v8/src/runtime.cc @@ -788,9 +788,10 @@ static Object* Runtime_DeclareGlobals(Arguments args) { } } else { // Copy the function and update its context. Use it as value. - Handle<JSFunction> boilerplate = Handle<JSFunction>::cast(value); + Handle<SharedFunctionInfo> shared = + Handle<SharedFunctionInfo>::cast(value); Handle<JSFunction> function = - Factory::NewFunctionFromBoilerplate(boilerplate, context, TENURED); + Factory::NewFunctionFromSharedFunctionInfo(shared, context, TENURED); value = function; } @@ -1239,9 +1240,9 @@ static Object* Runtime_FinishArrayPrototypeSetup(Arguments args) { static void SetCustomCallGenerator(Handle<JSFunction> function, - CustomCallGenerator generator) { + ExternalReference* generator) { if (function->shared()->function_data()->IsUndefined()) { - function->shared()->set_function_data(*FromCData(generator)); + function->shared()->set_function_data(*FromCData(generator->address())); } } @@ -1249,7 +1250,7 @@ static void SetCustomCallGenerator(Handle<JSFunction> function, static Handle<JSFunction> InstallBuiltin(Handle<JSObject> holder, const char* name, Builtins::Name builtin_name, - CustomCallGenerator generator = NULL) { + ExternalReference* generator = NULL) { Handle<String> key = Factory::LookupAsciiSymbol(name); Handle<Code> code(Builtins::builtin(builtin_name)); Handle<JSFunction> optimized = Factory::NewFunction(key, @@ -1266,22 +1267,22 @@ static Handle<JSFunction> InstallBuiltin(Handle<JSObject> holder, } -static Object* CompileArrayPushCall(CallStubCompiler* compiler, - Object* object, - JSObject* holder, - JSFunction* function, - String* name, - StubCompiler::CheckType check) { +Object* CompileArrayPushCall(CallStubCompiler* compiler, + Object* object, + JSObject* holder, + JSFunction* function, + String* name, + StubCompiler::CheckType check) { return compiler->CompileArrayPushCall(object, holder, function, name, check); } -static Object* CompileArrayPopCall(CallStubCompiler* compiler, - Object* object, - JSObject* holder, - JSFunction* function, - String* name, - StubCompiler::CheckType check) { +Object* CompileArrayPopCall(CallStubCompiler* compiler, + Object* object, + JSObject* holder, + JSFunction* function, + String* name, + StubCompiler::CheckType check) { return compiler->CompileArrayPopCall(object, holder, function, name, check); } @@ -1291,8 +1292,11 @@ static Object* Runtime_SpecialArrayFunctions(Arguments args) { ASSERT(args.length() == 1); CONVERT_ARG_CHECKED(JSObject, holder, 0); - InstallBuiltin(holder, "pop", Builtins::ArrayPop, CompileArrayPopCall); - InstallBuiltin(holder, "push", Builtins::ArrayPush, CompileArrayPushCall); + ExternalReference pop = ExternalReference::compile_array_pop_call(); + ExternalReference push = ExternalReference::compile_array_push_call(); + + InstallBuiltin(holder, "pop", Builtins::ArrayPop, &pop); + InstallBuiltin(holder, "push", Builtins::ArrayPush, &push); InstallBuiltin(holder, "shift", Builtins::ArrayShift); InstallBuiltin(holder, "unshift", Builtins::ArrayUnshift); InstallBuiltin(holder, "slice", Builtins::ArraySlice); @@ -1563,9 +1567,91 @@ static Object* Runtime_CharFromCode(Arguments args) { return CharFromCode(args[0]); } + +class FixedArrayBuilder { + public: + explicit FixedArrayBuilder(int initial_capacity) + : array_(Factory::NewFixedArrayWithHoles(initial_capacity)), + length_(0) { + // Require a non-zero initial size. Ensures that doubling the size to + // extend the array will work. + ASSERT(initial_capacity > 0); + } + + explicit FixedArrayBuilder(Handle<FixedArray> backing_store) + : array_(backing_store), + length_(0) { + // Require a non-zero initial size. Ensures that doubling the size to + // extend the array will work. + ASSERT(backing_store->length() > 0); + } + + bool HasCapacity(int elements) { + int length = array_->length(); + int required_length = length_ + elements; + return (length >= required_length); + } + + void EnsureCapacity(int elements) { + int length = array_->length(); + int required_length = length_ + elements; + if (length < required_length) { + int new_length = length; + do { + new_length *= 2; + } while (new_length < required_length); + Handle<FixedArray> extended_array = + Factory::NewFixedArrayWithHoles(new_length); + array_->CopyTo(0, *extended_array, 0, length_); + array_ = extended_array; + } + } + + void Add(Object* value) { + ASSERT(length_ < capacity()); + array_->set(length_, value); + length_++; + } + + void Add(Smi* value) { + ASSERT(length_ < capacity()); + array_->set(length_, value); + length_++; + } + + Handle<FixedArray> array() { + return array_; + } + + int length() { + return length_; + } + + int capacity() { + return array_->length(); + } + + Handle<JSArray> ToJSArray() { + Handle<JSArray> result_array = Factory::NewJSArrayWithElements(array_); + result_array->set_length(Smi::FromInt(length_)); + return result_array; + } + + Handle<JSArray> ToJSArray(Handle<JSArray> target_array) { + target_array->set_elements(*array_); + target_array->set_length(Smi::FromInt(length_)); + return target_array; + } + + private: + Handle<FixedArray> array_; + int length_; +}; + + // Forward declarations. -static const int kStringBuilderConcatHelperLengthBits = 11; -static const int kStringBuilderConcatHelperPositionBits = 19; +const int kStringBuilderConcatHelperLengthBits = 11; +const int kStringBuilderConcatHelperPositionBits = 19; template <typename schar> static inline void StringBuilderConcatHelper(String*, @@ -1573,15 +1659,19 @@ static inline void StringBuilderConcatHelper(String*, FixedArray*, int); -typedef BitField<int, 0, 11> StringBuilderSubstringLength; -typedef BitField<int, 11, 19> StringBuilderSubstringPosition; +typedef BitField<int, 0, kStringBuilderConcatHelperLengthBits> + StringBuilderSubstringLength; +typedef BitField<int, + kStringBuilderConcatHelperLengthBits, + kStringBuilderConcatHelperPositionBits> + StringBuilderSubstringPosition; + class ReplacementStringBuilder { public: ReplacementStringBuilder(Handle<String> subject, int estimated_part_count) - : subject_(subject), - parts_(Factory::NewFixedArray(estimated_part_count)), - part_count_(0), + : array_builder_(estimated_part_count), + subject_(subject), character_count_(0), is_ascii_(subject->IsAsciiRepresentation()) { // Require a non-zero initial size. Ensures that doubling the size to @@ -1589,38 +1679,35 @@ class ReplacementStringBuilder { ASSERT(estimated_part_count > 0); } - void EnsureCapacity(int elements) { - int length = parts_->length(); - int required_length = part_count_ + elements; - if (length < required_length) { - int new_length = length; - do { - new_length *= 2; - } while (new_length < required_length); - Handle<FixedArray> extended_array = - Factory::NewFixedArray(new_length); - parts_->CopyTo(0, *extended_array, 0, part_count_); - parts_ = extended_array; - } - } - - void AddSubjectSlice(int from, int to) { + static inline void AddSubjectSlice(FixedArrayBuilder* builder, + int from, + int to) { ASSERT(from >= 0); int length = to - from; ASSERT(length > 0); - // Can we encode the slice in 11 bits for length and 19 bits for - // start position - as used by StringBuilderConcatHelper? if (StringBuilderSubstringLength::is_valid(length) && StringBuilderSubstringPosition::is_valid(from)) { int encoded_slice = StringBuilderSubstringLength::encode(length) | StringBuilderSubstringPosition::encode(from); - AddElement(Smi::FromInt(encoded_slice)); + builder->Add(Smi::FromInt(encoded_slice)); } else { // Otherwise encode as two smis. - AddElement(Smi::FromInt(-length)); - AddElement(Smi::FromInt(from)); + builder->Add(Smi::FromInt(-length)); + builder->Add(Smi::FromInt(from)); } - IncrementCharacterCount(length); + } + + + void EnsureCapacity(int elements) { + array_builder_.EnsureCapacity(elements); + } + + + void AddSubjectSlice(int from, int to) { + AddSubjectSlice(&array_builder_, from, to); + // Can we encode the slice in 11 bits for length and 19 bits for + // start position - as used by StringBuilderConcatHelper? + IncrementCharacterCount(to - from); } @@ -1636,7 +1723,7 @@ class ReplacementStringBuilder { Handle<String> ToString() { - if (part_count_ == 0) { + if (array_builder_.length() == 0) { return Factory::empty_string(); } @@ -1648,8 +1735,8 @@ class ReplacementStringBuilder { char* char_buffer = seq->GetChars(); StringBuilderConcatHelper(*subject_, char_buffer, - *parts_, - part_count_); + *array_builder_.array(), + array_builder_.length()); } else { // Non-ASCII. joined_string = NewRawTwoByteString(character_count_); @@ -1658,8 +1745,8 @@ class ReplacementStringBuilder { uc16* char_buffer = seq->GetChars(); StringBuilderConcatHelper(*subject_, char_buffer, - *parts_, - part_count_); + *array_builder_.array(), + array_builder_.length()); } return joined_string; } @@ -1672,8 +1759,14 @@ class ReplacementStringBuilder { character_count_ += by; } - private: + Handle<JSArray> GetParts() { + Handle<JSArray> result = + Factory::NewJSArrayWithElements(array_builder_.array()); + result->set_length(Smi::FromInt(array_builder_.length())); + return result; + } + private: Handle<String> NewRawAsciiString(int size) { CALL_HEAP_FUNCTION(Heap::AllocateRawAsciiString(size), String); } @@ -1686,14 +1779,12 @@ class ReplacementStringBuilder { void AddElement(Object* element) { ASSERT(element->IsSmi() || element->IsString()); - ASSERT(parts_->length() > part_count_); - parts_->set(part_count_, element); - part_count_++; + ASSERT(array_builder_.capacity() > array_builder_.length()); + array_builder_.Add(element); } + FixedArrayBuilder array_builder_; Handle<String> subject_; - Handle<FixedArray> parts_; - int part_count_; int character_count_; bool is_ascii_; }; @@ -2101,7 +2192,6 @@ static Object* Runtime_StringReplaceRegExpWithString(Arguments args) { } - // Cap on the maximal shift in the Boyer-Moore implementation. By setting a // limit, we can fix the size of tables. static const int kBMMaxShift = 0xff; @@ -2865,6 +2955,468 @@ static Object* Runtime_StringMatch(Arguments args) { } +// Two smis before and after the match, for very long strings. +const int kMaxBuilderEntriesPerRegExpMatch = 5; + + +static void SetLastMatchInfoNoCaptures(Handle<String> subject, + Handle<JSArray> last_match_info, + int match_start, + int match_end) { + // Fill last_match_info with a single capture. + last_match_info->EnsureSize(2 + RegExpImpl::kLastMatchOverhead); + AssertNoAllocation no_gc; + FixedArray* elements = FixedArray::cast(last_match_info->elements()); + RegExpImpl::SetLastCaptureCount(elements, 2); + RegExpImpl::SetLastInput(elements, *subject); + RegExpImpl::SetLastSubject(elements, *subject); + RegExpImpl::SetCapture(elements, 0, match_start); + RegExpImpl::SetCapture(elements, 1, match_end); +} + + +template <typename schar> +static bool SearchCharMultiple(Vector<schar> subject, + String* pattern, + schar pattern_char, + FixedArrayBuilder* builder, + int* match_pos) { + // Position of last match. + int pos = *match_pos; + int subject_length = subject.length(); + while (pos < subject_length) { + int match_end = pos + 1; + if (!builder->HasCapacity(kMaxBuilderEntriesPerRegExpMatch)) { + *match_pos = pos; + return false; + } + int new_pos = SingleCharIndexOf(subject, pattern_char, match_end); + if (new_pos >= 0) { + // Match has been found. + if (new_pos > match_end) { + ReplacementStringBuilder::AddSubjectSlice(builder, match_end, new_pos); + } + pos = new_pos; + builder->Add(pattern); + } else { + break; + } + } + if (pos + 1 < subject_length) { + ReplacementStringBuilder::AddSubjectSlice(builder, pos + 1, subject_length); + } + *match_pos = pos; + return true; +} + + +static bool SearchCharMultiple(Handle<String> subject, + Handle<String> pattern, + Handle<JSArray> last_match_info, + FixedArrayBuilder* builder) { + ASSERT(subject->IsFlat()); + ASSERT_EQ(1, pattern->length()); + uc16 pattern_char = pattern->Get(0); + // Treating position before first as initial "previous match position". + int match_pos = -1; + + for (;;) { // Break when search complete. + builder->EnsureCapacity(kMaxBuilderEntriesPerRegExpMatch); + AssertNoAllocation no_gc; + if (subject->IsAsciiRepresentation()) { + if (pattern_char > String::kMaxAsciiCharCode) { + break; + } + Vector<const char> subject_vector = subject->ToAsciiVector(); + char pattern_ascii_char = static_cast<char>(pattern_char); + bool complete = SearchCharMultiple<const char>(subject_vector, + *pattern, + pattern_ascii_char, + builder, + &match_pos); + if (complete) break; + } else { + Vector<const uc16> subject_vector = subject->ToUC16Vector(); + bool complete = SearchCharMultiple<const uc16>(subject_vector, + *pattern, + pattern_char, + builder, + &match_pos); + if (complete) break; + } + } + + if (match_pos >= 0) { + SetLastMatchInfoNoCaptures(subject, + last_match_info, + match_pos, + match_pos + 1); + return true; + } + return false; // No matches at all. +} + + +template <typename schar, typename pchar> +static bool SearchStringMultiple(Vector<schar> subject, + String* pattern, + Vector<pchar> pattern_string, + FixedArrayBuilder* builder, + int* match_pos) { + int pos = *match_pos; + int subject_length = subject.length(); + int pattern_length = pattern_string.length(); + int max_search_start = subject_length - pattern_length; + bool is_ascii = (sizeof(schar) == 1); + StringSearchStrategy strategy = + InitializeStringSearch(pattern_string, is_ascii); + switch (strategy) { + case SEARCH_FAIL: return false; + case SEARCH_SHORT: + while (pos <= max_search_start) { + if (!builder->HasCapacity(kMaxBuilderEntriesPerRegExpMatch)) { + *match_pos = pos; + return false; + } + // Position of end of previous match. + int match_end = pos + pattern_length; + int new_pos = SimpleIndexOf(subject, pattern_string, match_end); + if (new_pos >= 0) { + // A match. + if (new_pos > match_end) { + ReplacementStringBuilder::AddSubjectSlice(builder, + match_end, + new_pos); + } + pos = new_pos; + builder->Add(pattern); + } else { + break; + } + } + break; + case SEARCH_LONG: + while (pos <= max_search_start) { + if (!builder->HasCapacity(kMaxBuilderEntriesPerRegExpMatch)) { + *match_pos = pos; + return false; + } + int new_pos = ComplexIndexOf(subject, + pattern_string, + pos + pattern_length); + if (new_pos >= 0) { + // A match has been found. + if (new_pos > pos) { + ReplacementStringBuilder::AddSubjectSlice(builder, pos, new_pos); + } + pos = new_pos; + builder->Add(pattern); + } else { + break; + } + } + break; + } + if (pos < max_search_start) { + ReplacementStringBuilder::AddSubjectSlice(builder, + pos + pattern_length, + subject_length); + } + *match_pos = pos; + return true; +} + + +static bool SearchStringMultiple(Handle<String> subject, + Handle<String> pattern, + Handle<JSArray> last_match_info, + FixedArrayBuilder* builder) { + ASSERT(subject->IsFlat()); + ASSERT(pattern->IsFlat()); + ASSERT(pattern->length() > 1); + + // Treating as if a previous match was before first character. + int match_pos = -pattern->length(); + + for (;;) { // Break when search complete. + builder->EnsureCapacity(kMaxBuilderEntriesPerRegExpMatch); + AssertNoAllocation no_gc; + if (subject->IsAsciiRepresentation()) { + Vector<const char> subject_vector = subject->ToAsciiVector(); + if (pattern->IsAsciiRepresentation()) { + if (SearchStringMultiple(subject_vector, + *pattern, + pattern->ToAsciiVector(), + builder, + &match_pos)) break; + } else { + if (SearchStringMultiple(subject_vector, + *pattern, + pattern->ToUC16Vector(), + builder, + &match_pos)) break; + } + } else { + Vector<const uc16> subject_vector = subject->ToUC16Vector(); + if (pattern->IsAsciiRepresentation()) { + if (SearchStringMultiple(subject_vector, + *pattern, + pattern->ToAsciiVector(), + builder, + &match_pos)) break; + } else { + if (SearchStringMultiple(subject_vector, + *pattern, + pattern->ToUC16Vector(), + builder, + &match_pos)) break; + } + } + } + + if (match_pos >= 0) { + SetLastMatchInfoNoCaptures(subject, + last_match_info, + match_pos, + match_pos + pattern->length()); + return true; + } + return false; // No matches at all. +} + + +static RegExpImpl::IrregexpResult SearchRegExpNoCaptureMultiple( + Handle<String> subject, + Handle<JSRegExp> regexp, + Handle<JSArray> last_match_array, + FixedArrayBuilder* builder) { + ASSERT(subject->IsFlat()); + int match_start = -1; + int match_end = 0; + int pos = 0; + int required_registers = RegExpImpl::IrregexpPrepare(regexp, subject); + if (required_registers < 0) return RegExpImpl::RE_EXCEPTION; + + OffsetsVector registers(required_registers); + Vector<int> register_vector(registers.vector(), registers.length()); + int subject_length = subject->length(); + + for (;;) { // Break on failure, return on exception. + RegExpImpl::IrregexpResult result = + RegExpImpl::IrregexpExecOnce(regexp, + subject, + pos, + register_vector); + if (result == RegExpImpl::RE_SUCCESS) { + match_start = register_vector[0]; + builder->EnsureCapacity(kMaxBuilderEntriesPerRegExpMatch); + if (match_end < match_start) { + ReplacementStringBuilder::AddSubjectSlice(builder, + match_end, + match_start); + } + match_end = register_vector[1]; + HandleScope loop_scope; + builder->Add(*Factory::NewSubString(subject, match_start, match_end)); + if (match_start != match_end) { + pos = match_end; + } else { + pos = match_end + 1; + if (pos > subject_length) break; + } + } else if (result == RegExpImpl::RE_FAILURE) { + break; + } else { + ASSERT_EQ(result, RegExpImpl::RE_EXCEPTION); + return result; + } + } + + if (match_start >= 0) { + if (match_end < subject_length) { + ReplacementStringBuilder::AddSubjectSlice(builder, + match_end, + subject_length); + } + SetLastMatchInfoNoCaptures(subject, + last_match_array, + match_start, + match_end); + return RegExpImpl::RE_SUCCESS; + } else { + return RegExpImpl::RE_FAILURE; // No matches at all. + } +} + + +static RegExpImpl::IrregexpResult SearchRegExpMultiple( + Handle<String> subject, + Handle<JSRegExp> regexp, + Handle<JSArray> last_match_array, + FixedArrayBuilder* builder) { + + ASSERT(subject->IsFlat()); + int required_registers = RegExpImpl::IrregexpPrepare(regexp, subject); + if (required_registers < 0) return RegExpImpl::RE_EXCEPTION; + + OffsetsVector registers(required_registers); + Vector<int> register_vector(registers.vector(), registers.length()); + + RegExpImpl::IrregexpResult result = + RegExpImpl::IrregexpExecOnce(regexp, + subject, + 0, + register_vector); + + int capture_count = regexp->CaptureCount(); + int subject_length = subject->length(); + + // Position to search from. + int pos = 0; + // End of previous match. Differs from pos if match was empty. + int match_end = 0; + if (result == RegExpImpl::RE_SUCCESS) { + // Need to keep a copy of the previous match for creating last_match_info + // at the end, so we have two vectors that we swap between. + OffsetsVector registers2(required_registers); + Vector<int> prev_register_vector(registers2.vector(), registers2.length()); + + do { + int match_start = register_vector[0]; + builder->EnsureCapacity(kMaxBuilderEntriesPerRegExpMatch); + if (match_end < match_start) { + ReplacementStringBuilder::AddSubjectSlice(builder, + match_end, + match_start); + } + match_end = register_vector[1]; + + { + // Avoid accumulating new handles inside loop. + HandleScope temp_scope; + // Arguments array to replace function is match, captures, index and + // subject, i.e., 3 + capture count in total. + Handle<FixedArray> elements = Factory::NewFixedArray(3 + capture_count); + elements->set(0, *Factory::NewSubString(subject, + match_start, + match_end)); + for (int i = 1; i <= capture_count; i++) { + Handle<String> substring = + Factory::NewSubString(subject, + register_vector[i * 2], + register_vector[i * 2 + 1]); + elements->set(i, *substring); + } + elements->set(capture_count + 1, Smi::FromInt(match_start)); + elements->set(capture_count + 2, *subject); + builder->Add(*Factory::NewJSArrayWithElements(elements)); + } + // Swap register vectors, so the last successful match is in + // prev_register_vector. + Vector<int> tmp = prev_register_vector; + prev_register_vector = register_vector; + register_vector = tmp; + + if (match_end > match_start) { + pos = match_end; + } else { + pos = match_end + 1; + if (pos > subject_length) { + break; + } + } + + result = RegExpImpl::IrregexpExecOnce(regexp, + subject, + pos, + register_vector); + } while (result == RegExpImpl::RE_SUCCESS); + + if (result != RegExpImpl::RE_EXCEPTION) { + // Finished matching, with at least one match. + if (match_end < subject_length) { + ReplacementStringBuilder::AddSubjectSlice(builder, + match_end, + subject_length); + } + + int last_match_capture_count = (capture_count + 1) * 2; + int last_match_array_size = + last_match_capture_count + RegExpImpl::kLastMatchOverhead; + last_match_array->EnsureSize(last_match_array_size); + AssertNoAllocation no_gc; + FixedArray* elements = FixedArray::cast(last_match_array->elements()); + RegExpImpl::SetLastCaptureCount(elements, last_match_capture_count); + RegExpImpl::SetLastSubject(elements, *subject); + RegExpImpl::SetLastInput(elements, *subject); + for (int i = 0; i < last_match_capture_count; i++) { + RegExpImpl::SetCapture(elements, i, prev_register_vector[i]); + } + return RegExpImpl::RE_SUCCESS; + } + } + // No matches at all, return failure or exception result directly. + return result; +} + + +static Object* Runtime_RegExpExecMultiple(Arguments args) { + ASSERT(args.length() == 4); + HandleScope handles; + + CONVERT_ARG_CHECKED(String, subject, 1); + if (!subject->IsFlat()) { FlattenString(subject); } + CONVERT_ARG_CHECKED(JSRegExp, regexp, 0); + CONVERT_ARG_CHECKED(JSArray, last_match_info, 2); + CONVERT_ARG_CHECKED(JSArray, result_array, 3); + + ASSERT(last_match_info->HasFastElements()); + ASSERT(regexp->GetFlags().is_global()); + Handle<FixedArray> result_elements; + if (result_array->HasFastElements()) { + result_elements = + Handle<FixedArray>(FixedArray::cast(result_array->elements())); + } else { + result_elements = Factory::NewFixedArrayWithHoles(16); + } + FixedArrayBuilder builder(result_elements); + + if (regexp->TypeTag() == JSRegExp::ATOM) { + Handle<String> pattern( + String::cast(regexp->DataAt(JSRegExp::kAtomPatternIndex))); + int pattern_length = pattern->length(); + if (pattern_length == 1) { + if (SearchCharMultiple(subject, pattern, last_match_info, &builder)) { + return *builder.ToJSArray(result_array); + } + return Heap::null_value(); + } + + if (!pattern->IsFlat()) FlattenString(pattern); + if (SearchStringMultiple(subject, pattern, last_match_info, &builder)) { + return *builder.ToJSArray(result_array); + } + return Heap::null_value(); + } + + ASSERT_EQ(regexp->TypeTag(), JSRegExp::IRREGEXP); + + RegExpImpl::IrregexpResult result; + if (regexp->CaptureCount() == 0) { + result = SearchRegExpNoCaptureMultiple(subject, + regexp, + last_match_info, + &builder); + } else { + result = SearchRegExpMultiple(subject, regexp, last_match_info, &builder); + } + if (result == RegExpImpl::RE_SUCCESS) return *builder.ToJSArray(result_array); + if (result == RegExpImpl::RE_FAILURE) return Heap::null_value(); + ASSERT_EQ(result, RegExpImpl::RE_EXCEPTION); + return Failure::Exception(); +} + + static Object* Runtime_NumberToRadixString(Arguments args) { NoHandleAllocation ha; ASSERT(args.length() == 2); @@ -3099,7 +3651,7 @@ static Object* Runtime_KeyedGetProperty(Arguments args) { // Lookup cache miss. Perform lookup and update the cache if appropriate. LookupResult result; receiver->LocalLookup(key, &result); - if (result.IsProperty() && result.IsLoaded() && result.type() == FIELD) { + if (result.IsProperty() && result.type() == FIELD) { int offset = result.GetFieldIndex(); KeyedLookupCache::Update(receiver_map, key, offset); return receiver->FastPropertyAt(offset); @@ -5812,13 +6364,13 @@ static Object* Runtime_NewClosure(Arguments args) { HandleScope scope; ASSERT(args.length() == 2); CONVERT_ARG_CHECKED(Context, context, 0); - CONVERT_ARG_CHECKED(JSFunction, boilerplate, 1); + CONVERT_ARG_CHECKED(SharedFunctionInfo, shared, 1); PretenureFlag pretenure = (context->global_context() == *context) ? TENURED // Allocate global closures in old space. : NOT_TENURED; // Allocate local closures in new space. Handle<JSFunction> result = - Factory::NewFunctionFromBoilerplate(boilerplate, context, pretenure); + Factory::NewFunctionFromSharedFunctionInfo(shared, context, pretenure); return *result; } @@ -6503,13 +7055,13 @@ static Object* Runtime_CompileString(Arguments args) { Handle<Context> context(Top::context()->global_context()); Compiler::ValidationState validate = (is_json->IsTrue()) ? Compiler::VALIDATE_JSON : Compiler::DONT_VALIDATE_JSON; - Handle<JSFunction> boilerplate = Compiler::CompileEval(source, - context, - true, - validate); - if (boilerplate.is_null()) return Failure::Exception(); + Handle<SharedFunctionInfo> shared = Compiler::CompileEval(source, + context, + true, + validate); + if (shared.is_null()) return Failure::Exception(); Handle<JSFunction> fun = - Factory::NewFunctionFromBoilerplate(boilerplate, context, NOT_TENURED); + Factory::NewFunctionFromSharedFunctionInfo(shared, context, NOT_TENURED); return *fun; } @@ -6582,14 +7134,14 @@ static ObjectPair Runtime_ResolvePossiblyDirectEval(Arguments args) { // Deal with a normal eval call with a string argument. Compile it // and return the compiled function bound in the local context. Handle<String> source = args.at<String>(1); - Handle<JSFunction> boilerplate = Compiler::CompileEval( + Handle<SharedFunctionInfo> shared = Compiler::CompileEval( source, Handle<Context>(Top::context()), Top::context()->IsGlobalContext(), Compiler::DONT_VALIDATE_JSON); - if (boilerplate.is_null()) return MakePair(Failure::Exception(), NULL); - callee = Factory::NewFunctionFromBoilerplate( - boilerplate, + if (shared.is_null()) return MakePair(Failure::Exception(), NULL); + callee = Factory::NewFunctionFromSharedFunctionInfo( + shared, Handle<Context>(Top::context()), NOT_TENURED); return MakePair(*callee, args[2]); @@ -8571,14 +9123,14 @@ static Object* Runtime_DebugEvaluate(Arguments args) { Handle<String> function_source = Factory::NewStringFromAscii(Vector<const char>(source_str, source_str_length)); - Handle<JSFunction> boilerplate = + Handle<SharedFunctionInfo> shared = Compiler::CompileEval(function_source, context, context->IsGlobalContext(), Compiler::DONT_VALIDATE_JSON); - if (boilerplate.is_null()) return Failure::Exception(); + if (shared.is_null()) return Failure::Exception(); Handle<JSFunction> compiled_function = - Factory::NewFunctionFromBoilerplate(boilerplate, context); + Factory::NewFunctionFromSharedFunctionInfo(shared, context); // Invoke the result of the compilation to get the evaluation function. bool has_pending_exception; @@ -8639,15 +9191,15 @@ static Object* Runtime_DebugEvaluateGlobal(Arguments args) { Handle<Context> context = Top::global_context(); // Compile the source to be evaluated. - Handle<JSFunction> boilerplate = - Handle<JSFunction>(Compiler::CompileEval(source, - context, - true, - Compiler::DONT_VALIDATE_JSON)); - if (boilerplate.is_null()) return Failure::Exception(); + Handle<SharedFunctionInfo> shared = + Compiler::CompileEval(source, + context, + true, + Compiler::DONT_VALIDATE_JSON); + if (shared.is_null()) return Failure::Exception(); Handle<JSFunction> compiled_function = - Handle<JSFunction>(Factory::NewFunctionFromBoilerplate(boilerplate, - context)); + Handle<JSFunction>(Factory::NewFunctionFromSharedFunctionInfo(shared, + context)); // Invoke the result of the compilation to get the evaluation function. bool has_pending_exception; diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h index 4175902c45..42af3df88a 100644 --- a/deps/v8/src/runtime.h +++ b/deps/v8/src/runtime.h @@ -153,6 +153,7 @@ namespace internal { /* Regular expressions */ \ F(RegExpCompile, 3, 1) \ F(RegExpExec, 4, 1) \ + F(RegExpExecMultiple, 4, 1) \ \ /* Strings */ \ F(StringCharCodeAt, 2, 1) \ diff --git a/deps/v8/src/serialize.cc b/deps/v8/src/serialize.cc index 0e3815183f..980a1bc449 100644 --- a/deps/v8/src/serialize.cc +++ b/deps/v8/src/serialize.cc @@ -409,36 +409,44 @@ void ExternalReferenceTable::PopulateTable() { UNCLASSIFIED, 19, "compare_doubles"); + Add(ExternalReference::compile_array_pop_call().address(), + UNCLASSIFIED, + 20, + "compile_array_pop"); + Add(ExternalReference::compile_array_push_call().address(), + UNCLASSIFIED, + 21, + "compile_array_push"); #ifdef V8_NATIVE_REGEXP Add(ExternalReference::re_case_insensitive_compare_uc16().address(), UNCLASSIFIED, - 20, + 22, "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()"); Add(ExternalReference::re_check_stack_guard_state().address(), UNCLASSIFIED, - 21, + 23, "RegExpMacroAssembler*::CheckStackGuardState()"); Add(ExternalReference::re_grow_stack().address(), UNCLASSIFIED, - 22, + 24, "NativeRegExpMacroAssembler::GrowStack()"); Add(ExternalReference::re_word_character_map().address(), UNCLASSIFIED, - 23, + 25, "NativeRegExpMacroAssembler::word_character_map"); #endif // Keyed lookup cache. Add(ExternalReference::keyed_lookup_cache_keys().address(), UNCLASSIFIED, - 24, + 26, "KeyedLookupCache::keys()"); Add(ExternalReference::keyed_lookup_cache_field_offsets().address(), UNCLASSIFIED, - 25, + 27, "KeyedLookupCache::field_offsets()"); Add(ExternalReference::transcendental_cache_array_address().address(), UNCLASSIFIED, - 26, + 28, "TranscendentalCache::caches()"); } @@ -547,7 +555,7 @@ Address Deserializer::Allocate(int space_index, Space* space, int size) { HeapObject* new_object = HeapObject::cast(new_allocation); // Record all large objects in the same space. address = new_object->address(); - high_water_[LO_SPACE] = address + size; + pages_[LO_SPACE].Add(address); } last_object_address_ = address; return address; @@ -900,7 +908,7 @@ void Serializer::Synchronize(const char* tag) { Serializer::Serializer(SnapshotByteSink* sink) : sink_(sink), current_root_index_(0), - external_reference_encoder_(NULL), + external_reference_encoder_(new ExternalReferenceEncoder), large_object_total_(0) { for (int i = 0; i <= LAST_SPACE; i++) { fullness_[i] = 0; @@ -908,28 +916,28 @@ Serializer::Serializer(SnapshotByteSink* sink) } +Serializer::~Serializer() { + delete external_reference_encoder_; +} + + void StartupSerializer::SerializeStrongReferences() { // No active threads. CHECK_EQ(NULL, ThreadState::FirstInUse()); // No active or weak handles. CHECK(HandleScopeImplementer::instance()->blocks()->is_empty()); CHECK_EQ(0, GlobalHandles::NumberOfWeakHandles()); - CHECK_EQ(NULL, external_reference_encoder_); // We don't support serializing installed extensions. for (RegisteredExtension* ext = RegisteredExtension::first_extension(); ext != NULL; ext = ext->next()) { CHECK_NE(v8::INSTALLED, ext->state()); } - external_reference_encoder_ = new ExternalReferenceEncoder(); Heap::IterateStrongRoots(this, VISIT_ONLY_STRONG); - delete external_reference_encoder_; - external_reference_encoder_ = NULL; } void PartialSerializer::Serialize(Object** object) { - external_reference_encoder_ = new ExternalReferenceEncoder(); this->VisitPointer(object); // After we have done the partial serialization the partial snapshot cache @@ -943,9 +951,6 @@ void PartialSerializer::Serialize(Object** object) { startup_serializer_->VisitPointer(&partial_snapshot_cache_[index]); } partial_snapshot_cache_length_ = kPartialSnapshotCacheCapacity; - - delete external_reference_encoder_; - external_reference_encoder_ = NULL; } @@ -997,6 +1002,7 @@ int PartialSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) { Object* entry = partial_snapshot_cache_[i]; if (entry == heap_object) return i; } + // We didn't find the object in the cache. So we add it to the cache and // then visit the pointer so that it becomes part of the startup snapshot // and we can refer to it from the partial snapshot. diff --git a/deps/v8/src/serialize.h b/deps/v8/src/serialize.h index ce3b0061c2..ab2ae9f296 100644 --- a/deps/v8/src/serialize.h +++ b/deps/v8/src/serialize.h @@ -120,28 +120,9 @@ class SnapshotByteSource { return data_[position_++]; } - void CopyRaw(byte* to, int number_of_bytes) { - memcpy(to, data_ + position_, number_of_bytes); - position_ += number_of_bytes; - } + inline void CopyRaw(byte* to, int number_of_bytes); - int GetInt() { - // A little unwind to catch the really small ints. - int snapshot_byte = Get(); - if ((snapshot_byte & 0x80) == 0) { - return snapshot_byte; - } - int accumulator = (snapshot_byte & 0x7f) << 7; - while (true) { - snapshot_byte = Get(); - if ((snapshot_byte & 0x80) == 0) { - return accumulator | snapshot_byte; - } - accumulator = (accumulator | (snapshot_byte & 0x7f)) << 7; - } - UNREACHABLE(); - return accumulator; - } + inline int GetInt(); bool AtEOF() { return position_ == length_; @@ -235,11 +216,35 @@ class SerializerDeserializer: public ObjectVisitor { } static int partial_snapshot_cache_length_; - static const int kPartialSnapshotCacheCapacity = 1024; + static const int kPartialSnapshotCacheCapacity = 1300; static Object* partial_snapshot_cache_[]; }; +int SnapshotByteSource::GetInt() { + // A little unwind to catch the really small ints. + int snapshot_byte = Get(); + if ((snapshot_byte & 0x80) == 0) { + return snapshot_byte; + } + int accumulator = (snapshot_byte & 0x7f) << 7; + while (true) { + snapshot_byte = Get(); + if ((snapshot_byte & 0x80) == 0) { + return accumulator | snapshot_byte; + } + accumulator = (accumulator | (snapshot_byte & 0x7f)) << 7; + } + UNREACHABLE(); + return accumulator; +} + + +void SnapshotByteSource::CopyRaw(byte* to, int number_of_bytes) { + memcpy(to, data_ + position_, number_of_bytes); + position_ += number_of_bytes; +} + // A Deserializer reads a snapshot and reconstructs the Object graph it defines. class Deserializer: public SerializerDeserializer { @@ -364,6 +369,7 @@ class SerializationAddressMapper { class Serializer : public SerializerDeserializer { public: explicit Serializer(SnapshotByteSink* sink); + ~Serializer(); void VisitPointers(Object** start, Object** end); // You can call this after serialization to find out how much space was used // in each space. @@ -492,7 +498,12 @@ class PartialSerializer : public Serializer { virtual int RootIndex(HeapObject* o); virtual int PartialSnapshotCacheIndex(HeapObject* o); virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) { - return o->IsString() || o->IsSharedFunctionInfo(); + // Scripts should be referred only through shared function infos. We can't + // allow them to be part of the partial snapshot because they contain a + // unique ID, and deserializing several partial snapshots containing script + // would cause dupes. + ASSERT(!o->IsScript()); + return o->IsString() || o->IsSharedFunctionInfo() || o->IsHeapNumber(); } private: @@ -530,6 +541,7 @@ class StartupSerializer : public Serializer { } }; + } } // namespace v8::internal #endif // V8_SERIALIZE_H_ diff --git a/deps/v8/src/snapshot-common.cc b/deps/v8/src/snapshot-common.cc index 1e81b8ece5..f1106e138d 100644 --- a/deps/v8/src/snapshot-common.cc +++ b/deps/v8/src/snapshot-common.cc @@ -59,4 +59,24 @@ bool Snapshot::Initialize(const char* snapshot_file) { return false; } + +Handle<Context> Snapshot::NewContextFromSnapshot() { + if (context_size_ == 0) { + return Handle<Context>(); + } + Heap::ReserveSpace(new_space_used_, + pointer_space_used_, + data_space_used_, + code_space_used_, + map_space_used_, + cell_space_used_, + large_space_used_); + SnapshotByteSource source(context_data_, context_size_); + Deserializer deserializer(&source); + Object* root; + deserializer.DeserializePartial(&root); + CHECK(root->IsContext()); + return Handle<Context>(Context::cast(root)); +} + } } // namespace v8::internal diff --git a/deps/v8/src/snapshot-empty.cc b/deps/v8/src/snapshot-empty.cc index 60ab1e5647..cb26eb8c52 100644 --- a/deps/v8/src/snapshot-empty.cc +++ b/deps/v8/src/snapshot-empty.cc @@ -35,6 +35,16 @@ namespace v8 { namespace internal { const byte Snapshot::data_[] = { 0 }; -int Snapshot::size_ = 0; +const int Snapshot::size_ = 0; +const byte Snapshot::context_data_[] = { 0 }; +const int Snapshot::context_size_ = 0; + +const int Snapshot::new_space_used_ = 0; +const int Snapshot::pointer_space_used_ = 0; +const int Snapshot::data_space_used_ = 0; +const int Snapshot::code_space_used_ = 0; +const int Snapshot::map_space_used_ = 0; +const int Snapshot::cell_space_used_ = 0; +const int Snapshot::large_space_used_ = 0; } } // namespace v8::internal diff --git a/deps/v8/src/snapshot.h b/deps/v8/src/snapshot.h index 88ba8db30e..9f77c20f52 100644 --- a/deps/v8/src/snapshot.h +++ b/deps/v8/src/snapshot.h @@ -38,6 +38,9 @@ class Snapshot { // could be found. static bool Initialize(const char* snapshot_file = NULL); + // Create a new context using the internal partial snapshot. + static Handle<Context> NewContextFromSnapshot(); + // Returns whether or not the snapshot is enabled. static bool IsEnabled() { return size_ != 0; } @@ -47,7 +50,16 @@ class Snapshot { private: static const byte data_[]; - static int size_; + static const byte context_data_[]; + static const int new_space_used_; + static const int pointer_space_used_; + static const int data_space_used_; + static const int code_space_used_; + static const int map_space_used_; + static const int cell_space_used_; + static const int large_space_used_; + static const int size_; + static const int context_size_; static bool Deserialize(const byte* content, int len); diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc index 08399ee8dd..5f191ed3a5 100644 --- a/deps/v8/src/spaces.cc +++ b/deps/v8/src/spaces.cc @@ -2749,6 +2749,9 @@ void LargeObjectSpace::FreeUnmarkedObjects() { bool LargeObjectSpace::Contains(HeapObject* object) { Address address = object->address(); + if (Heap::new_space()->Contains(address)) { + return false; + } Page* page = Page::FromAddress(address); SLOW_ASSERT(!page->IsLargeObjectPage() diff --git a/deps/v8/src/string.js b/deps/v8/src/string.js index 6bb19e9548..f4489efa12 100644 --- a/deps/v8/src/string.js +++ b/deps/v8/src/string.js @@ -164,7 +164,7 @@ function StringLocaleCompare(other) { // ECMA-262 section 15.5.4.10 function StringMatch(regexp) { - if (!IS_REGEXP(regexp)) regexp = new ORIGINAL_REGEXP(regexp); + if (!IS_REGEXP(regexp)) regexp = new $RegExp(regexp); var subject = TO_STRING_INLINE(this); if (!regexp.global) return regexp.exec(subject); @@ -183,7 +183,7 @@ function StringMatch(regexp) { } %_Log('regexp', 'regexp-match,%0S,%1r', [subject, regexp]); - // lastMatchInfo is defined in regexp-delay.js. + // lastMatchInfo is defined in regexp.js. var result = %StringMatch(subject, regexp, lastMatchInfo); cache.type = 'match'; cache.regExp = regexp; @@ -405,97 +405,91 @@ function addCaptureString(builder, matchInfo, index) { builder.addSpecialSlice(start, end); }; +// TODO(lrn): This array will survive indefinitely if replace is never +// called again. However, it will be empty, since the contents are cleared +// in the finally block. +var reusableReplaceArray = $Array(16); // Helper function for replacing regular expressions with the result of a -// function application in String.prototype.replace. The function application -// must be interleaved with the regexp matching (contrary to ECMA-262 -// 15.5.4.11) to mimic SpiderMonkey and KJS behavior when the function uses -// the static properties of the RegExp constructor. Example: -// 'abcd'.replace(/(.)/g, function() { return RegExp.$1; } -// should be 'abcd' and not 'dddd' (or anything else). +// function application in String.prototype.replace. function StringReplaceRegExpWithFunction(subject, regexp, replace) { - var matchInfo = DoRegExpExec(regexp, subject, 0); - if (IS_NULL(matchInfo)) return subject; - - var result = new ReplaceResultBuilder(subject); - // There's at least one match. If the regexp is global, we have to loop - // over all matches. The loop is not in C++ code here like the one in - // RegExp.prototype.exec, because of the interleaved function application. - // Unfortunately, that means this code is nearly duplicated, here and in - // jsregexp.cc. if (regexp.global) { - var previous = 0; - var startOfMatch; - if (NUMBER_OF_CAPTURES(matchInfo) == 2) { - // Both branches contain essentially the same loop except for the call - // to the replace function. The branch is put outside of the loop for - // speed - do { - startOfMatch = matchInfo[CAPTURE0]; - result.addSpecialSlice(previous, startOfMatch); - previous = matchInfo[CAPTURE1]; - var match = SubString(subject, startOfMatch, previous); - // Don't call directly to avoid exposing the built-in global object. - result.add(replace.call(null, match, startOfMatch, subject)); - // Can't use matchInfo any more from here, since the function could - // overwrite it. - // Continue with the next match. - // Increment previous if we matched an empty string, as per ECMA-262 - // 15.5.4.10. - if (previous == startOfMatch) { - // Add the skipped character to the output, if any. - if (previous < subject.length) { - result.addSpecialSlice(previous, previous + 1); - } - previous++; - // Per ECMA-262 15.10.6.2, if the previous index is greater than the - // string length, there is no match - if (previous > subject.length) { - return result.generate(); - } - } - matchInfo = DoRegExpExec(regexp, subject, previous); - } while (!IS_NULL(matchInfo)); + var resultArray = reusableReplaceArray; + if (resultArray) { + reusableReplaceArray = null; } else { - do { - startOfMatch = matchInfo[CAPTURE0]; - result.addSpecialSlice(previous, startOfMatch); - previous = matchInfo[CAPTURE1]; - result.add(ApplyReplacementFunction(replace, matchInfo, subject)); - // Can't use matchInfo any more from here, since the function could - // overwrite it. - // Continue with the next match. - // Increment previous if we matched an empty string, as per ECMA-262 - // 15.5.4.10. - if (previous == startOfMatch) { - // Add the skipped character to the output, if any. - if (previous < subject.length) { - result.addSpecialSlice(previous, previous + 1); + // Inside a nested replace (replace called from the replacement function + // of another replace) or we have failed to set the reusable array + // back due to an exception in a replacement function. Create a new + // array to use in the future, or until the original is written back. + resultArray = $Array(16); + } + try { + // Must handle exceptions thrown by the replace functions correctly, + // including unregistering global regexps. + var res = %RegExpExecMultiple(regexp, + subject, + lastMatchInfo, + resultArray); + regexp.lastIndex = 0; + if (IS_NULL(res)) { + // No matches at all. + return subject; + } + var len = res.length; + var i = 0; + if (NUMBER_OF_CAPTURES(lastMatchInfo) == 2) { + var match_start = 0; + while (i < len) { + var elem = res[i]; + if (%_IsSmi(elem)) { + if (elem > 0) { + match_start = (elem >> 11) + (elem & 0x7ff); + } else { + match_start = res[++i] - elem; + } + } else { + var func_result = replace.call(null, elem, match_start, subject); + if (!IS_STRING(func_result)) func_result = TO_STRING(func_result); + res[i] = func_result; + match_start += elem.length; } - previous++; - // Per ECMA-262 15.10.6.2, if the previous index is greater than the - // string length, there is no match - if (previous > subject.length) { - return result.generate(); + i++; + } + } else { + while (i < len) { + var elem = res[i]; + if (!%_IsSmi(elem)) { + // elem must be an Array. + // Use the apply argument as backing for global RegExp properties. + lastMatchInfoOverride = elem; + var func_result = replace.apply(null, elem); + if (!IS_STRING(func_result)) func_result = TO_STRING(func_result); + res[i] = func_result; } + i++; } - matchInfo = DoRegExpExec(regexp, subject, previous); - } while (!IS_NULL(matchInfo)); + } + var result = new ReplaceResultBuilder(subject, res); + return result.generate(); + } finally { + lastMatchInfoOverride = null; + resultArray.length = 0; + reusableReplaceArray = resultArray; } - - // Tack on the final right substring after the last match. - result.addSpecialSlice(previous, subject.length); - } else { // Not a global regexp, no need to loop. + var matchInfo = DoRegExpExec(regexp, subject, 0); + if (IS_NULL(matchInfo)) return subject; + + var result = new ReplaceResultBuilder(subject); result.addSpecialSlice(0, matchInfo[CAPTURE0]); var endOfMatch = matchInfo[CAPTURE1]; result.add(ApplyReplacementFunction(replace, matchInfo, subject)); // Can't use matchInfo any more from here, since the function could // overwrite it. result.addSpecialSlice(endOfMatch, subject.length); + return result.generate(); } - - return result.generate(); } @@ -522,17 +516,15 @@ function ApplyReplacementFunction(replace, matchInfo, subject) { // ECMA-262 section 15.5.4.12 -function StringSearch(re) { - var regexp = new ORIGINAL_REGEXP(re); +function StringSearch(re) { + var regexp = new $RegExp(re); var s = TO_STRING_INLINE(this); - var last_idx = regexp.lastIndex; // keep old lastIndex - regexp.lastIndex = 0; // ignore re.global property - var result = regexp.exec(s); - regexp.lastIndex = last_idx; // restore lastIndex - if (result == null) - return -1; - else - return result.index; + var match = DoRegExpExec(regexp, s, 0); + if (match) { + lastMatchInfo = match; + return match[CAPTURE0]; + } + return -1; } @@ -896,7 +888,11 @@ function StringSup() { // ReplaceResultBuilder support. function ReplaceResultBuilder(str) { - this.elements = new $Array(); + if (%_ArgumentsLength() > 1) { + this.elements = %_Arguments(1); + } else { + this.elements = new $Array(); + } this.special_string = str; } diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h index 0e986dd8d7..0ca37e7e28 100644 --- a/deps/v8/src/stub-cache.h +++ b/deps/v8/src/stub-cache.h @@ -610,6 +610,22 @@ typedef Object* (*CustomCallGenerator)(CallStubCompiler* compiler, StubCompiler::CheckType check); +Object* CompileArrayPushCall(CallStubCompiler* compiler, + Object* object, + JSObject* holder, + JSFunction* function, + String* name, + StubCompiler::CheckType check); + + +Object* CompileArrayPopCall(CallStubCompiler* compiler, + Object* object, + JSObject* holder, + JSFunction* function, + String* name, + StubCompiler::CheckType check); + + } } // namespace v8::internal #endif // V8_STUB_CACHE_H_ diff --git a/deps/v8/src/top.cc b/deps/v8/src/top.cc index 0fcf458667..2f75c8fc1c 100644 --- a/deps/v8/src/top.cc +++ b/deps/v8/src/top.cc @@ -679,7 +679,7 @@ void Top::PrintCurrentStackTrace(FILE* out) { void Top::ComputeLocation(MessageLocation* target) { - *target = MessageLocation(empty_script(), -1, -1); + *target = MessageLocation(Handle<Script>(Heap::empty_script()), -1, -1); StackTraceFrameIterator it; if (!it.done()) { JavaScriptFrame* frame = it.frame(); diff --git a/deps/v8/src/type-info-inl.h b/deps/v8/src/type-info-inl.h new file mode 100644 index 0000000000..90d3f55f9b --- /dev/null +++ b/deps/v8/src/type-info-inl.h @@ -0,0 +1,55 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_TYPE_INFO_INL_H_ +#define V8_TYPE_INFO_INL_H_ + +#include "type-info.h" +#include "objects-inl.h" + +namespace v8 { +namespace internal { + + +TypeInfo TypeInfo::TypeFromValue(Handle<Object> value) { + TypeInfo info; + if (value->IsSmi()) { + info = TypeInfo::Smi(); + } else if (value->IsHeapNumber()) { + info = TypeInfo::IsInt32Double(HeapNumber::cast(*value)->value()) + ? TypeInfo::Integer32() + : TypeInfo::Double(); + } else { + info = TypeInfo::Unknown(); + } + return info; +} + + +} } // namespace v8::internal + +#endif // V8_TYPE_INFO_INL_H_ diff --git a/deps/v8/src/type-info.h b/deps/v8/src/type-info.h new file mode 100644 index 0000000000..15bc128023 --- /dev/null +++ b/deps/v8/src/type-info.h @@ -0,0 +1,239 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_TYPE_INFO_H_ +#define V8_TYPE_INFO_H_ + +#include "globals.h" + +namespace v8 { +namespace internal { + +// Unknown +// | +// PrimitiveType +// | \--------| +// Number String +// / | | +// Double Integer32 | +// | | / +// | Smi / +// | / / +// Uninitialized. + +class TypeInfo { + public: + TypeInfo() { } + + static inline TypeInfo Unknown(); + // We know it's a primitive type. + static inline TypeInfo Primitive(); + // We know it's a number of some sort. + static inline TypeInfo Number(); + // We know it's signed or unsigned 32 bit integer. + static inline TypeInfo Integer32(); + // We know it's a Smi. + static inline TypeInfo Smi(); + // We know it's a heap number. + static inline TypeInfo Double(); + // We know it's a string. + static inline TypeInfo String(); + // We haven't started collecting info yet. + static inline TypeInfo Uninitialized(); + + // Return compact representation. Very sensitive to enum values below! + // Compacting drops information about primtive types and strings types. + // We use the compact representation when we only care about number types. + int ThreeBitRepresentation() { + ASSERT(type_ != kUninitializedType); + int answer = type_ & 0xf; + answer = answer > 6 ? answer - 2 : answer; + ASSERT(answer >= 0); + ASSERT(answer <= 7); + return answer; + } + + // Decode compact representation. Very sensitive to enum values below! + static TypeInfo ExpandedRepresentation(int three_bit_representation) { + Type t = static_cast<Type>(three_bit_representation >= 6 ? + three_bit_representation + 2 : + three_bit_representation); + t = (t == kUnknownType) ? t : static_cast<Type>(t | kPrimitiveType); + ASSERT(t == kUnknownType || + t == kNumberType || + t == kInteger32Type || + t == kSmiType || + t == kDoubleType); + return TypeInfo(t); + } + + int ToInt() { + return type_; + } + + static TypeInfo FromInt(int bit_representation) { + Type t = static_cast<Type>(bit_representation); + ASSERT(t == kUnknownType || + t == kPrimitiveType || + t == kNumberType || + t == kInteger32Type || + t == kSmiType || + t == kDoubleType || + t == kStringType); + return TypeInfo(t); + } + + // Return the weakest (least precise) common type. + static TypeInfo Combine(TypeInfo a, TypeInfo b) { + return TypeInfo(static_cast<Type>(a.type_ & b.type_)); + } + + + // Integer32 is an integer that can be represented as either a signed + // 32-bit integer or as an unsigned 32-bit integer. It has to be + // in the range [-2^31, 2^32 - 1]. We also have to check for negative 0 + // as it is not an Integer32. + static inline bool IsInt32Double(double value) { + const DoubleRepresentation minus_zero(-0.0); + DoubleRepresentation rep(value); + if (rep.bits == minus_zero.bits) return false; + if (value >= kMinInt && value <= kMaxUInt32) { + if (value <= kMaxInt && value == static_cast<int32_t>(value)) { + return true; + } + if (value == static_cast<uint32_t>(value)) return true; + } + return false; + } + + static inline TypeInfo TypeFromValue(Handle<Object> value); + + inline bool IsUnknown() { + return type_ == kUnknownType; + } + + inline bool IsNumber() { + ASSERT(type_ != kUninitializedType); + return ((type_ & kNumberType) == kNumberType); + } + + inline bool IsSmi() { + ASSERT(type_ != kUninitializedType); + return ((type_ & kSmiType) == kSmiType); + } + + inline bool IsInteger32() { + ASSERT(type_ != kUninitializedType); + return ((type_ & kInteger32Type) == kInteger32Type); + } + + inline bool IsDouble() { + ASSERT(type_ != kUninitializedType); + return ((type_ & kDoubleType) == kDoubleType); + } + + inline bool IsUninitialized() { + return type_ == kUninitializedType; + } + + const char* ToString() { + switch (type_) { + case kUnknownType: return "UnknownType"; + case kPrimitiveType: return "PrimitiveType"; + case kNumberType: return "NumberType"; + case kInteger32Type: return "Integer32Type"; + case kSmiType: return "SmiType"; + case kDoubleType: return "DoubleType"; + case kStringType: return "StringType"; + case kUninitializedType: + UNREACHABLE(); + return "UninitializedType"; + } + UNREACHABLE(); + return "Unreachable code"; + } + + private: + // We use 6 bits to represent the types. + enum Type { + kUnknownType = 0, // 000000 + kPrimitiveType = 0x10, // 010000 + kNumberType = 0x11, // 010001 + kInteger32Type = 0x13, // 010011 + kSmiType = 0x17, // 010111 + kDoubleType = 0x19, // 011001 + kStringType = 0x30, // 110000 + kUninitializedType = 0x3f // 111111 + }; + explicit inline TypeInfo(Type t) : type_(t) { } + + Type type_; +}; + + +TypeInfo TypeInfo::Unknown() { + return TypeInfo(kUnknownType); +} + + +TypeInfo TypeInfo::Primitive() { + return TypeInfo(kPrimitiveType); +} + + +TypeInfo TypeInfo::Number() { + return TypeInfo(kNumberType); +} + + +TypeInfo TypeInfo::Integer32() { + return TypeInfo(kInteger32Type); +} + + +TypeInfo TypeInfo::Smi() { + return TypeInfo(kSmiType); +} + + +TypeInfo TypeInfo::Double() { + return TypeInfo(kDoubleType); +} + + +TypeInfo TypeInfo::String() { + return TypeInfo(kStringType); +} + + +TypeInfo TypeInfo::Uninitialized() { + return TypeInfo(kUninitializedType); +} + +} } // namespace v8::internal + +#endif // V8_TYPE_INFO_H_ diff --git a/deps/v8/src/v8-counters.h b/deps/v8/src/v8-counters.h index b595cd4922..a5f3594ca7 100644 --- a/deps/v8/src/v8-counters.h +++ b/deps/v8/src/v8-counters.h @@ -97,7 +97,11 @@ namespace internal { /* Amount of source code compiled with the old codegen. */ \ SC(total_old_codegen_source_size, V8.TotalOldCodegenSourceSize) \ /* Amount of source code compiled with the full codegen. */ \ - SC(total_full_codegen_source_size, V8.TotalFullCodegenSourceSize) + SC(total_full_codegen_source_size, V8.TotalFullCodegenSourceSize) \ + /* Number of contexts created from scratch. */ \ + SC(contexts_created_from_scratch, V8.ContextsCreatedFromScratch) \ + /* Number of contexts created by partial snapshot. */ \ + SC(contexts_created_by_snapshot, V8.ContextsCreatedBySnapshot) #define STATS_COUNTER_LIST_2(SC) \ @@ -187,6 +191,7 @@ namespace internal { SC(transcendental_cache_hit, V8.TranscendentalCacheHit) \ SC(transcendental_cache_miss, V8.TranscendentalCacheMiss) + // This file contains all the v8 counters that are in use. class Counters : AllStatic { public: diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc index 395401d91b..5af200348b 100644 --- a/deps/v8/src/v8.cc +++ b/deps/v8/src/v8.cc @@ -155,6 +155,14 @@ void V8::TearDown() { } +static uint32_t random_seed() { + if (FLAG_random_seed == 0) { + return random(); + } + return FLAG_random_seed; +} + + uint32_t V8::Random() { // Random number generator using George Marsaglia's MWC algorithm. static uint32_t hi = 0; @@ -164,8 +172,8 @@ uint32_t V8::Random() { // should ever become zero again, or if random() returns zero, we // avoid getting stuck with zero bits in hi or lo by re-initializing // them on demand. - if (hi == 0) hi = random(); - if (lo == 0) lo = random(); + if (hi == 0) hi = random_seed(); + if (lo == 0) lo = random_seed(); // Mix the bits. hi = 36969 * (hi & 0xFFFF) + (hi >> 16); diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js index 6a32d7bdd7..4a8dfab876 100644 --- a/deps/v8/src/v8natives.js +++ b/deps/v8/src/v8natives.js @@ -221,7 +221,7 @@ function ObjectHasOwnProperty(V) { // ECMA-262 - 15.2.4.6 function ObjectIsPrototypeOf(V) { - if (!IS_OBJECT(V) && !IS_FUNCTION(V)) return false; + if (!IS_OBJECT(V) && !IS_FUNCTION(V) && !IS_UNDETECTABLE(V)) return false; return %IsInPrototypeChain(this, V); } @@ -236,7 +236,7 @@ function ObjectPropertyIsEnumerable(V) { // Extensions for providing property getters and setters. function ObjectDefineGetter(name, fun) { - if (this == null) { + if (this == null && !IS_UNDETECTABLE(this)) { throw new $TypeError('Object.prototype.__defineGetter__: this is Null'); } if (!IS_FUNCTION(fun)) { @@ -247,7 +247,7 @@ function ObjectDefineGetter(name, fun) { function ObjectLookupGetter(name) { - if (this == null) { + if (this == null && !IS_UNDETECTABLE(this)) { throw new $TypeError('Object.prototype.__lookupGetter__: this is Null'); } return %LookupAccessor(ToObject(this), ToString(name), GETTER); @@ -255,7 +255,7 @@ function ObjectLookupGetter(name) { function ObjectDefineSetter(name, fun) { - if (this == null) { + if (this == null && !IS_UNDETECTABLE(this)) { throw new $TypeError('Object.prototype.__defineSetter__: this is Null'); } if (!IS_FUNCTION(fun)) { @@ -267,7 +267,7 @@ function ObjectDefineSetter(name, fun) { function ObjectLookupSetter(name) { - if (this == null) { + if (this == null && !IS_UNDETECTABLE(this)) { throw new $TypeError('Object.prototype.__lookupSetter__: this is Null'); } return %LookupAccessor(ToObject(this), ToString(name), SETTER); @@ -275,7 +275,8 @@ function ObjectLookupSetter(name) { function ObjectKeys(obj) { - if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj)) + if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj) && + !IS_UNDETECTABLE(obj)) throw MakeTypeError("obj_ctor_property_non_object", ["keys"]); return %LocalKeys(obj); } @@ -594,7 +595,8 @@ function DefineOwnProperty(obj, p, desc, should_throw) { // ES5 section 15.2.3.2. function ObjectGetPrototypeOf(obj) { - if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj)) + if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj) && + !IS_UNDETECTABLE(obj)) throw MakeTypeError("obj_ctor_property_non_object", ["getPrototypeOf"]); return obj.__proto__; } @@ -602,7 +604,8 @@ function ObjectGetPrototypeOf(obj) { // ES5 section 15.2.3.3 function ObjectGetOwnPropertyDescriptor(obj, p) { - if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj)) + if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj) && + !IS_UNDETECTABLE(obj)) throw MakeTypeError("obj_ctor_property_non_object", ["getOwnPropertyDescriptor"]); var desc = GetOwnProperty(obj, p); return FromPropertyDescriptor(desc); @@ -611,7 +614,8 @@ function ObjectGetOwnPropertyDescriptor(obj, p) { // ES5 section 15.2.3.4. function ObjectGetOwnPropertyNames(obj) { - if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj)) + if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj) && + !IS_UNDETECTABLE(obj)) throw MakeTypeError("obj_ctor_property_non_object", ["getOwnPropertyNames"]); // Find all the indexed properties. @@ -664,7 +668,8 @@ function ObjectCreate(proto, properties) { // ES5 section 15.2.3.6. function ObjectDefineProperty(obj, p, attributes) { - if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj)) + if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj) && + !IS_UNDETECTABLE(obj)) throw MakeTypeError("obj_ctor_property_non_object", ["defineProperty"]); var name = ToString(p); var desc = ToPropertyDescriptor(attributes); @@ -675,7 +680,8 @@ function ObjectDefineProperty(obj, p, attributes) { // ES5 section 15.2.3.7. function ObjectDefineProperties(obj, properties) { - if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj)) + if ((!IS_OBJECT(obj) || IS_NULL_OR_UNDEFINED(obj)) && !IS_FUNCTION(obj) && + !IS_UNDETECTABLE(obj)) throw MakeTypeError("obj_ctor_property_non_object", ["defineProperties"]); var props = ToObject(properties); var key_values = []; diff --git a/deps/v8/src/variables.h b/deps/v8/src/variables.h index a68aa337f4..618f6ace7d 100644 --- a/deps/v8/src/variables.h +++ b/deps/v8/src/variables.h @@ -70,8 +70,6 @@ class StaticType { private: Kind kind_; - - DISALLOW_COPY_AND_ASSIGN(StaticType); }; diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc index 74bef65b60..f7116d1982 100644 --- a/deps/v8/src/version.cc +++ b/deps/v8/src/version.cc @@ -34,8 +34,8 @@ // cannot be changed without changing the SCons build script. #define MAJOR_VERSION 2 #define MINOR_VERSION 1 -#define BUILD_NUMBER 7 -#define PATCH_LEVEL 0 +#define BUILD_NUMBER 9 +#define PATCH_LEVEL 1 #define CANDIDATE_VERSION false // Define SONAME to have the SCons build the put a specific SONAME into the diff --git a/deps/v8/src/virtual-frame-heavy-inl.h b/deps/v8/src/virtual-frame-heavy-inl.h new file mode 100644 index 0000000000..a4a0a9ba1f --- /dev/null +++ b/deps/v8/src/virtual-frame-heavy-inl.h @@ -0,0 +1,136 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_VIRTUAL_FRAME_HEAVY_INL_H_ +#define V8_VIRTUAL_FRAME_HEAVY_INL_H_ + +#include "type-info.h" +#include "register-allocator.h" +#include "scopes.h" + +namespace v8 { +namespace internal { + +// On entry to a function, the virtual frame already contains the receiver, +// the parameters, and a return address. All frame elements are in memory. +VirtualFrame::VirtualFrame() + : elements_(parameter_count() + local_count() + kPreallocatedElements), + stack_pointer_(parameter_count() + 1) { // 0-based index of TOS. + for (int i = 0; i <= stack_pointer_; i++) { + elements_.Add(FrameElement::MemoryElement(TypeInfo::Unknown())); + } + for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) { + register_locations_[i] = kIllegalIndex; + } +} + + +// When cloned, a frame is a deep copy of the original. +VirtualFrame::VirtualFrame(VirtualFrame* original) + : elements_(original->element_count()), + stack_pointer_(original->stack_pointer_) { + elements_.AddAll(original->elements_); + // Copy register locations from original. + memcpy(®ister_locations_, + original->register_locations_, + sizeof(register_locations_)); +} + + +void VirtualFrame::PushFrameSlotAt(int index) { + elements_.Add(CopyElementAt(index)); +} + + +void VirtualFrame::Push(Register reg, TypeInfo info) { + if (is_used(reg)) { + int index = register_location(reg); + FrameElement element = CopyElementAt(index, info); + elements_.Add(element); + } else { + Use(reg, element_count()); + FrameElement element = + FrameElement::RegisterElement(reg, FrameElement::NOT_SYNCED, info); + elements_.Add(element); + } +} + + +void VirtualFrame::Push(Handle<Object> value) { + FrameElement element = + FrameElement::ConstantElement(value, FrameElement::NOT_SYNCED); + elements_.Add(element); +} + + +bool VirtualFrame::Equals(VirtualFrame* other) { +#ifdef DEBUG + for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) { + if (register_location(i) != other->register_location(i)) { + return false; + } + } + if (element_count() != other->element_count()) return false; +#endif + if (stack_pointer_ != other->stack_pointer_) return false; + for (int i = 0; i < element_count(); i++) { + if (!elements_[i].Equals(other->elements_[i])) return false; + } + + return true; +} + + +void VirtualFrame::SetTypeForLocalAt(int index, TypeInfo info) { + elements_[local0_index() + index].set_type_info(info); +} + + +// Make the type of all elements be MEMORY. +void VirtualFrame::SpillAll() { + for (int i = 0; i < element_count(); i++) { + SpillElementAt(i); + } +} + + +void VirtualFrame::PrepareForReturn() { + // Spill all locals. This is necessary to make sure all locals have + // the right value when breaking at the return site in the debugger. + for (int i = 0; i < expression_base_index(); i++) { + SpillElementAt(i); + } +} + + +void VirtualFrame::SetTypeForParamAt(int index, TypeInfo info) { + elements_[param0_index() + index].set_type_info(info); +} + +} } // namespace v8::internal + +#endif // V8_VIRTUAL_FRAME_HEAVY_INL_H_ diff --git a/deps/v8/src/virtual-frame-heavy.cc b/deps/v8/src/virtual-frame-heavy.cc new file mode 100644 index 0000000000..854ed75cb7 --- /dev/null +++ b/deps/v8/src/virtual-frame-heavy.cc @@ -0,0 +1,298 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "v8.h" + +#include "codegen-inl.h" +#include "register-allocator-inl.h" +#include "virtual-frame-inl.h" + +namespace v8 { +namespace internal { + +void VirtualFrame::SetElementAt(int index, Result* value) { + int frame_index = element_count() - index - 1; + ASSERT(frame_index >= 0); + ASSERT(frame_index < element_count()); + ASSERT(value->is_valid()); + FrameElement original = elements_[frame_index]; + + // Early exit if the element is the same as the one being set. + bool same_register = original.is_register() + && value->is_register() + && original.reg().is(value->reg()); + bool same_constant = original.is_constant() + && value->is_constant() + && original.handle().is_identical_to(value->handle()); + if (same_register || same_constant) { + value->Unuse(); + return; + } + + InvalidateFrameSlotAt(frame_index); + + if (value->is_register()) { + if (is_used(value->reg())) { + // The register already appears on the frame. Either the existing + // register element, or the new element at frame_index, must be made + // a copy. + int i = register_location(value->reg()); + + if (i < frame_index) { + // The register FrameElement is lower in the frame than the new copy. + elements_[frame_index] = CopyElementAt(i); + } else { + // There was an early bailout for the case of setting a + // register element to itself. + ASSERT(i != frame_index); + elements_[frame_index] = elements_[i]; + elements_[i] = CopyElementAt(frame_index); + if (elements_[frame_index].is_synced()) { + elements_[i].set_sync(); + } + elements_[frame_index].clear_sync(); + set_register_location(value->reg(), frame_index); + for (int j = i + 1; j < element_count(); j++) { + if (elements_[j].is_copy() && elements_[j].index() == i) { + elements_[j].set_index(frame_index); + } + } + } + } else { + // The register value->reg() was not already used on the frame. + Use(value->reg(), frame_index); + elements_[frame_index] = + FrameElement::RegisterElement(value->reg(), + FrameElement::NOT_SYNCED, + value->type_info()); + } + } else { + ASSERT(value->is_constant()); + elements_[frame_index] = + FrameElement::ConstantElement(value->handle(), + FrameElement::NOT_SYNCED); + } + value->Unuse(); +} + + +// Create a duplicate of an existing valid frame element. +// We can pass an optional number type information that will override the +// existing information about the backing element. The new information must +// not conflict with the existing type information and must be equally or +// more precise. The default parameter value kUninitialized means that there +// is no additional information. +FrameElement VirtualFrame::CopyElementAt(int index, TypeInfo info) { + ASSERT(index >= 0); + ASSERT(index < element_count()); + + FrameElement target = elements_[index]; + FrameElement result; + + switch (target.type()) { + case FrameElement::CONSTANT: + // We do not copy constants and instead return a fresh unsynced + // constant. + result = FrameElement::ConstantElement(target.handle(), + FrameElement::NOT_SYNCED); + break; + + case FrameElement::COPY: + // We do not allow copies of copies, so we follow one link to + // the actual backing store of a copy before making a copy. + index = target.index(); + ASSERT(elements_[index].is_memory() || elements_[index].is_register()); + // Fall through. + + case FrameElement::MEMORY: // Fall through. + case FrameElement::REGISTER: { + // All copies are backed by memory or register locations. + result.set_type(FrameElement::COPY); + result.clear_copied(); + result.clear_sync(); + result.set_index(index); + elements_[index].set_copied(); + // Update backing element's number information. + TypeInfo existing = elements_[index].type_info(); + ASSERT(!existing.IsUninitialized()); + // Assert that the new type information (a) does not conflict with the + // existing one and (b) is equally or more precise. + ASSERT((info.ToInt() & existing.ToInt()) == existing.ToInt()); + ASSERT((info.ToInt() | existing.ToInt()) == info.ToInt()); + + elements_[index].set_type_info(!info.IsUninitialized() + ? info + : existing); + break; + } + case FrameElement::INVALID: + // We should not try to copy invalid elements. + UNREACHABLE(); + break; + } + return result; +} + + +// Modify the state of the virtual frame to match the actual frame by adding +// extra in-memory elements to the top of the virtual frame. The extra +// elements will be externally materialized on the actual frame (eg, by +// pushing an exception handler). No code is emitted. +void VirtualFrame::Adjust(int count) { + ASSERT(count >= 0); + ASSERT(stack_pointer_ == element_count() - 1); + + for (int i = 0; i < count; i++) { + elements_.Add(FrameElement::MemoryElement(TypeInfo::Unknown())); + } + stack_pointer_ += count; +} + + +void VirtualFrame::ForgetElements(int count) { + ASSERT(count >= 0); + ASSERT(element_count() >= count); + + for (int i = 0; i < count; i++) { + FrameElement last = elements_.RemoveLast(); + if (last.is_register()) { + // A hack to properly count register references for the code + // generator's current frame and also for other frames. The + // same code appears in PrepareMergeTo. + if (cgen()->frame() == this) { + Unuse(last.reg()); + } else { + set_register_location(last.reg(), kIllegalIndex); + } + } + } +} + + +// Make the type of the element at a given index be MEMORY. +void VirtualFrame::SpillElementAt(int index) { + if (!elements_[index].is_valid()) return; + + SyncElementAt(index); + // Number type information is preserved. + // Copies get their number information from their backing element. + TypeInfo info; + if (!elements_[index].is_copy()) { + info = elements_[index].type_info(); + } else { + info = elements_[elements_[index].index()].type_info(); + } + // The element is now in memory. Its copied flag is preserved. + FrameElement new_element = FrameElement::MemoryElement(info); + if (elements_[index].is_copied()) { + new_element.set_copied(); + } + if (elements_[index].is_untagged_int32()) { + new_element.set_untagged_int32(true); + } + if (elements_[index].is_register()) { + Unuse(elements_[index].reg()); + } + elements_[index] = new_element; +} + + +// Clear the dirty bit for the element at a given index. +void VirtualFrame::SyncElementAt(int index) { + if (index <= stack_pointer_) { + if (!elements_[index].is_synced()) SyncElementBelowStackPointer(index); + } else if (index == stack_pointer_ + 1) { + SyncElementByPushing(index); + } else { + SyncRange(stack_pointer_ + 1, index); + } +} + + +void VirtualFrame::PrepareMergeTo(VirtualFrame* expected) { + // Perform state changes on this frame that will make merge to the + // expected frame simpler or else increase the likelihood that his + // frame will match another. + for (int i = 0; i < element_count(); i++) { + FrameElement source = elements_[i]; + FrameElement target = expected->elements_[i]; + + if (!target.is_valid() || + (target.is_memory() && !source.is_memory() && source.is_synced())) { + // No code needs to be generated to invalidate valid elements. + // No code needs to be generated to move values to memory if + // they are already synced. We perform those moves here, before + // merging. + if (source.is_register()) { + // If the frame is the code generator's current frame, we have + // to decrement both the frame-internal and global register + // counts. + if (cgen()->frame() == this) { + Unuse(source.reg()); + } else { + set_register_location(source.reg(), kIllegalIndex); + } + } + elements_[i] = target; + } else if (target.is_register() && !target.is_synced() && + !source.is_memory()) { + // If an element's target is a register that doesn't need to be + // synced, and the element is not in memory, then the sync state + // of the element is irrelevant. We clear the sync bit. + ASSERT(source.is_valid()); + elements_[i].clear_sync(); + } + } +} + + +void VirtualFrame::PrepareForCall(int spilled_args, int dropped_args) { + ASSERT(height() >= dropped_args); + ASSERT(height() >= spilled_args); + ASSERT(dropped_args <= spilled_args); + + SyncRange(0, element_count() - 1); + // Spill registers. + for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) { + if (is_used(i)) { + SpillElementAt(register_location(i)); + } + } + + // Spill the arguments. + for (int i = element_count() - spilled_args; i < element_count(); i++) { + if (!elements_[i].is_memory()) { + SpillElementAt(i); + } + } + + // Forget the frame elements that will be popped by the call. + Forget(dropped_args); +} + + +} } // namespace v8::internal diff --git a/deps/v8/src/virtual-frame-inl.h b/deps/v8/src/virtual-frame-inl.h index 4050d715b6..e4c6e6e5e8 100644 --- a/deps/v8/src/virtual-frame-inl.h +++ b/deps/v8/src/virtual-frame-inl.h @@ -30,61 +30,15 @@ #include "virtual-frame.h" -namespace v8 { -namespace internal { - - -// On entry to a function, the virtual frame already contains the receiver, -// the parameters, and a return address. All frame elements are in memory. -VirtualFrame::VirtualFrame() - : elements_(parameter_count() + local_count() + kPreallocatedElements), - stack_pointer_(parameter_count() + 1) { // 0-based index of TOS. - for (int i = 0; i <= stack_pointer_; i++) { - elements_.Add(FrameElement::MemoryElement(NumberInfo::Unknown())); - } - for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) { - register_locations_[i] = kIllegalIndex; - } -} - - -// When cloned, a frame is a deep copy of the original. -VirtualFrame::VirtualFrame(VirtualFrame* original) - : elements_(original->element_count()), - stack_pointer_(original->stack_pointer_) { - elements_.AddAll(original->elements_); - // Copy register locations from original. - memcpy(®ister_locations_, - original->register_locations_, - sizeof(register_locations_)); -} - - -void VirtualFrame::PushFrameSlotAt(int index) { - elements_.Add(CopyElementAt(index)); -} - - -void VirtualFrame::Push(Register reg, NumberInfo info) { - if (is_used(reg)) { - int index = register_location(reg); - FrameElement element = CopyElementAt(index, info); - elements_.Add(element); - } else { - Use(reg, element_count()); - FrameElement element = - FrameElement::RegisterElement(reg, FrameElement::NOT_SYNCED, info); - elements_.Add(element); - } -} - +#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 +#include "virtual-frame-heavy-inl.h" +#else +#include "virtual-frame-light-inl.h" +#endif -void VirtualFrame::Push(Handle<Object> value) { - FrameElement element = - FrameElement::ConstantElement(value, FrameElement::NOT_SYNCED); - elements_.Add(element); -} +namespace v8 { +namespace internal { void VirtualFrame::Push(Smi* value) { Push(Handle<Object> (value)); @@ -101,35 +55,6 @@ void VirtualFrame::Nip(int num_dropped) { SetElementAt(0, &tos); } - -bool VirtualFrame::Equals(VirtualFrame* other) { -#ifdef DEBUG - for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) { - if (register_location(i) != other->register_location(i)) { - return false; - } - } - if (element_count() != other->element_count()) return false; -#endif - if (stack_pointer_ != other->stack_pointer_) return false; - for (int i = 0; i < element_count(); i++) { - if (!elements_[i].Equals(other->elements_[i])) return false; - } - - return true; -} - - -void VirtualFrame::SetTypeForLocalAt(int index, NumberInfo info) { - elements_[local0_index() + index].set_number_info(info); -} - - -void VirtualFrame::SetTypeForParamAt(int index, NumberInfo info) { - elements_[param0_index() + index].set_number_info(info); -} - - } } // namespace v8::internal #endif // V8_VIRTUAL_FRAME_INL_H_ diff --git a/deps/v8/src/virtual-frame-light-inl.h b/deps/v8/src/virtual-frame-light-inl.h new file mode 100644 index 0000000000..5c823ae5db --- /dev/null +++ b/deps/v8/src/virtual-frame-light-inl.h @@ -0,0 +1,95 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_VIRTUAL_FRAME_LIGHT_INL_H_ +#define V8_VIRTUAL_FRAME_LIGHT_INL_H_ + +#include "type-info.h" +#include "register-allocator.h" +#include "scopes.h" + +namespace v8 { +namespace internal { + +// On entry to a function, the virtual frame already contains the receiver, +// the parameters, and a return address. All frame elements are in memory. +VirtualFrame::VirtualFrame() + : element_count_(parameter_count() + 2), + stack_pointer_(parameter_count() + 1) { + for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) { + register_locations_[i] = kIllegalIndex; + } +} + + +// When cloned, a frame is a deep copy of the original. +VirtualFrame::VirtualFrame(VirtualFrame* original) + : element_count_(original->element_count()), + stack_pointer_(original->stack_pointer_) { + memcpy(®ister_locations_, + original->register_locations_, + sizeof(register_locations_)); +} + + +void VirtualFrame::Push(Handle<Object> value) { + UNIMPLEMENTED(); +} + + +bool VirtualFrame::Equals(VirtualFrame* other) { +#ifdef DEBUG + for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) { + if (register_location(i) != other->register_location(i)) { + return false; + } + } + if (element_count() != other->element_count()) return false; +#endif + if (stack_pointer_ != other->stack_pointer_) return false; + + return true; +} + + +void VirtualFrame::SetTypeForLocalAt(int index, TypeInfo info) { + UNIMPLEMENTED(); +} + + +// Everything is always spilled anyway. +void VirtualFrame::SpillAll() { +} + + +void VirtualFrame::PrepareForReturn() { +} + + +} } // namespace v8::internal + +#endif // V8_VIRTUAL_FRAME_LIGHT_INL_H_ diff --git a/deps/v8/src/virtual-frame-light.cc b/deps/v8/src/virtual-frame-light.cc new file mode 100644 index 0000000000..4662cf0163 --- /dev/null +++ b/deps/v8/src/virtual-frame-light.cc @@ -0,0 +1,52 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "v8.h" + +#include "codegen-inl.h" +#include "register-allocator-inl.h" +#include "virtual-frame-inl.h" + +namespace v8 { +namespace internal { + +void VirtualFrame::Adjust(int count) { + ASSERT(count >= 0); + ASSERT(stack_pointer_ == element_count() - 1); + + element_count_ += count; + stack_pointer_ += count; +} + + +// Make the type of the element at a given index be MEMORY. +void VirtualFrame::SpillElementAt(int index) { + UNIMPLEMENTED(); +} + + +} } // namespace v8::internal diff --git a/deps/v8/src/virtual-frame.cc b/deps/v8/src/virtual-frame.cc index 070424cf47..d618bc268d 100644 --- a/deps/v8/src/virtual-frame.cc +++ b/deps/v8/src/virtual-frame.cc @@ -37,99 +37,6 @@ namespace internal { // ------------------------------------------------------------------------- // VirtualFrame implementation. -// Create a duplicate of an existing valid frame element. -// We can pass an optional number type information that will override the -// existing information about the backing element. The new information must -// not conflict with the existing type information and must be equally or -// more precise. The default parameter value kUninitialized means that there -// is no additional information. -FrameElement VirtualFrame::CopyElementAt(int index, NumberInfo info) { - ASSERT(index >= 0); - ASSERT(index < element_count()); - - FrameElement target = elements_[index]; - FrameElement result; - - switch (target.type()) { - case FrameElement::CONSTANT: - // We do not copy constants and instead return a fresh unsynced - // constant. - result = FrameElement::ConstantElement(target.handle(), - FrameElement::NOT_SYNCED); - break; - - case FrameElement::COPY: - // We do not allow copies of copies, so we follow one link to - // the actual backing store of a copy before making a copy. - index = target.index(); - ASSERT(elements_[index].is_memory() || elements_[index].is_register()); - // Fall through. - - case FrameElement::MEMORY: // Fall through. - case FrameElement::REGISTER: { - // All copies are backed by memory or register locations. - result.set_type(FrameElement::COPY); - result.clear_copied(); - result.clear_sync(); - result.set_index(index); - elements_[index].set_copied(); - // Update backing element's number information. - NumberInfo existing = elements_[index].number_info(); - ASSERT(!existing.IsUninitialized()); - // Assert that the new type information (a) does not conflict with the - // existing one and (b) is equally or more precise. - ASSERT((info.ToInt() & existing.ToInt()) == existing.ToInt()); - ASSERT((info.ToInt() | existing.ToInt()) == info.ToInt()); - - elements_[index].set_number_info(!info.IsUninitialized() - ? info - : existing); - break; - } - case FrameElement::INVALID: - // We should not try to copy invalid elements. - UNREACHABLE(); - break; - } - return result; -} - - -// Modify the state of the virtual frame to match the actual frame by adding -// extra in-memory elements to the top of the virtual frame. The extra -// elements will be externally materialized on the actual frame (eg, by -// pushing an exception handler). No code is emitted. -void VirtualFrame::Adjust(int count) { - ASSERT(count >= 0); - ASSERT(stack_pointer_ == element_count() - 1); - - for (int i = 0; i < count; i++) { - elements_.Add(FrameElement::MemoryElement(NumberInfo::Unknown())); - } - stack_pointer_ += count; -} - - -void VirtualFrame::ForgetElements(int count) { - ASSERT(count >= 0); - ASSERT(element_count() >= count); - - for (int i = 0; i < count; i++) { - FrameElement last = elements_.RemoveLast(); - if (last.is_register()) { - // A hack to properly count register references for the code - // generator's current frame and also for other frames. The - // same code appears in PrepareMergeTo. - if (cgen()->frame() == this) { - Unuse(last.reg()); - } else { - set_register_location(last.reg(), kIllegalIndex); - } - } - } -} - - // If there are any registers referenced only by the frame, spill one. Register VirtualFrame::SpillAnyRegister() { // Find the leftmost (ordered by register number) register whose only @@ -145,191 +52,6 @@ Register VirtualFrame::SpillAnyRegister() { } -// Make the type of the element at a given index be MEMORY. -void VirtualFrame::SpillElementAt(int index) { - if (!elements_[index].is_valid()) return; - - SyncElementAt(index); - // Number type information is preserved. - // Copies get their number information from their backing element. - NumberInfo info; - if (!elements_[index].is_copy()) { - info = elements_[index].number_info(); - } else { - info = elements_[elements_[index].index()].number_info(); - } - // The element is now in memory. Its copied flag is preserved. - FrameElement new_element = FrameElement::MemoryElement(info); - if (elements_[index].is_copied()) { - new_element.set_copied(); - } - if (elements_[index].is_untagged_int32()) { - new_element.set_untagged_int32(true); - } - if (elements_[index].is_register()) { - Unuse(elements_[index].reg()); - } - elements_[index] = new_element; -} - - -// Clear the dirty bit for the element at a given index. -void VirtualFrame::SyncElementAt(int index) { - if (index <= stack_pointer_) { - if (!elements_[index].is_synced()) SyncElementBelowStackPointer(index); - } else if (index == stack_pointer_ + 1) { - SyncElementByPushing(index); - } else { - SyncRange(stack_pointer_ + 1, index); - } -} - - -// Make the type of all elements be MEMORY. -void VirtualFrame::SpillAll() { - for (int i = 0; i < element_count(); i++) { - SpillElementAt(i); - } -} - - -void VirtualFrame::PrepareMergeTo(VirtualFrame* expected) { - // Perform state changes on this frame that will make merge to the - // expected frame simpler or else increase the likelihood that his - // frame will match another. - for (int i = 0; i < element_count(); i++) { - FrameElement source = elements_[i]; - FrameElement target = expected->elements_[i]; - - if (!target.is_valid() || - (target.is_memory() && !source.is_memory() && source.is_synced())) { - // No code needs to be generated to invalidate valid elements. - // No code needs to be generated to move values to memory if - // they are already synced. We perform those moves here, before - // merging. - if (source.is_register()) { - // If the frame is the code generator's current frame, we have - // to decrement both the frame-internal and global register - // counts. - if (cgen()->frame() == this) { - Unuse(source.reg()); - } else { - set_register_location(source.reg(), kIllegalIndex); - } - } - elements_[i] = target; - } else if (target.is_register() && !target.is_synced() && - !source.is_memory()) { - // If an element's target is a register that doesn't need to be - // synced, and the element is not in memory, then the sync state - // of the element is irrelevant. We clear the sync bit. - ASSERT(source.is_valid()); - elements_[i].clear_sync(); - } - } -} - - -void VirtualFrame::PrepareForCall(int spilled_args, int dropped_args) { - ASSERT(height() >= dropped_args); - ASSERT(height() >= spilled_args); - ASSERT(dropped_args <= spilled_args); - - SyncRange(0, element_count() - 1); - // Spill registers. - for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) { - if (is_used(i)) { - SpillElementAt(register_location(i)); - } - } - - // Spill the arguments. - for (int i = element_count() - spilled_args; i < element_count(); i++) { - if (!elements_[i].is_memory()) { - SpillElementAt(i); - } - } - - // Forget the frame elements that will be popped by the call. - Forget(dropped_args); -} - - -void VirtualFrame::PrepareForReturn() { - // Spill all locals. This is necessary to make sure all locals have - // the right value when breaking at the return site in the debugger. - for (int i = 0; i < expression_base_index(); i++) { - SpillElementAt(i); - } -} - - -void VirtualFrame::SetElementAt(int index, Result* value) { - int frame_index = element_count() - index - 1; - ASSERT(frame_index >= 0); - ASSERT(frame_index < element_count()); - ASSERT(value->is_valid()); - FrameElement original = elements_[frame_index]; - - // Early exit if the element is the same as the one being set. - bool same_register = original.is_register() - && value->is_register() - && original.reg().is(value->reg()); - bool same_constant = original.is_constant() - && value->is_constant() - && original.handle().is_identical_to(value->handle()); - if (same_register || same_constant) { - value->Unuse(); - return; - } - - InvalidateFrameSlotAt(frame_index); - - if (value->is_register()) { - if (is_used(value->reg())) { - // The register already appears on the frame. Either the existing - // register element, or the new element at frame_index, must be made - // a copy. - int i = register_location(value->reg()); - - if (i < frame_index) { - // The register FrameElement is lower in the frame than the new copy. - elements_[frame_index] = CopyElementAt(i); - } else { - // There was an early bailout for the case of setting a - // register element to itself. - ASSERT(i != frame_index); - elements_[frame_index] = elements_[i]; - elements_[i] = CopyElementAt(frame_index); - if (elements_[frame_index].is_synced()) { - elements_[i].set_sync(); - } - elements_[frame_index].clear_sync(); - set_register_location(value->reg(), frame_index); - for (int j = i + 1; j < element_count(); j++) { - if (elements_[j].is_copy() && elements_[j].index() == i) { - elements_[j].set_index(frame_index); - } - } - } - } else { - // The register value->reg() was not already used on the frame. - Use(value->reg(), frame_index); - elements_[frame_index] = - FrameElement::RegisterElement(value->reg(), - FrameElement::NOT_SYNCED, - value->number_info()); - } - } else { - ASSERT(value->is_constant()); - elements_[frame_index] = - FrameElement::ConstantElement(value->handle(), - FrameElement::NOT_SYNCED); - } - value->Unuse(); -} - - // Specialization of List::ResizeAdd to non-inlined version for FrameElements. // The function ResizeAdd becomes a real function, whose implementation is the // inlined ResizeAddInternal. diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc index a6b0ffcef5..108384cf17 100644 --- a/deps/v8/src/x64/codegen-x64.cc +++ b/deps/v8/src/x64/codegen-x64.cc @@ -2229,9 +2229,8 @@ void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) { } -void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) { - ASSERT(boilerplate->IsBoilerplate()); - +void CodeGenerator::InstantiateFunction( + Handle<SharedFunctionInfo> function_info) { // The inevitable call will sync frame elements to memory anyway, so // we do it eagerly to allow us to push the arguments directly into // place. @@ -2239,16 +2238,16 @@ void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) { // Use the fast case closure allocation code that allocates in new // space for nested functions that don't need literals cloning. - if (scope()->is_function_scope() && boilerplate->NumberOfLiterals() == 0) { + if (scope()->is_function_scope() && function_info->num_literals() == 0) { FastNewClosureStub stub; - frame_->Push(boilerplate); + frame_->Push(function_info); Result answer = frame_->CallStub(&stub, 1); frame_->Push(&answer); } else { // Call the runtime to instantiate the function boilerplate // object. frame_->EmitPush(rsi); - frame_->EmitPush(boilerplate); + frame_->EmitPush(function_info); Result result = frame_->CallRuntime(Runtime::kNewClosure, 2); frame_->Push(&result); } @@ -2258,19 +2257,19 @@ void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) { void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) { Comment cmnt(masm_, "[ FunctionLiteral"); - // Build the function boilerplate and instantiate it. - Handle<JSFunction> boilerplate = - Compiler::BuildBoilerplate(node, script(), this); + // Build the function info and instantiate it. + Handle<SharedFunctionInfo> function_info = + Compiler::BuildFunctionInfo(node, script(), this); // Check for stack-overflow exception. if (HasStackOverflow()) return; - InstantiateBoilerplate(boilerplate); + InstantiateFunction(function_info); } -void CodeGenerator::VisitFunctionBoilerplateLiteral( - FunctionBoilerplateLiteral* node) { - Comment cmnt(masm_, "[ FunctionBoilerplateLiteral"); - InstantiateBoilerplate(node->boilerplate()); +void CodeGenerator::VisitSharedFunctionInfoLiteral( + SharedFunctionInfoLiteral* node) { + Comment cmnt(masm_, "[ SharedFunctionInfoLiteral"); + InstantiateFunction(node->shared_function_info()); } @@ -4369,7 +4368,7 @@ void CodeGenerator::ToBoolean(ControlDestination* dest) { if (value.is_number()) { Comment cmnt(masm_, "ONLY_NUMBER"); - // Fast case if NumberInfo indicates only numbers. + // Fast case if TypeInfo indicates only numbers. if (FLAG_debug_code) { __ AbortIfNotNumber(value.reg(), "ToBoolean operand is not a number."); } @@ -5068,9 +5067,9 @@ void CodeGenerator::Comparison(AstNode* node, Condition left_is_smi = masm_->CheckSmi(left_side.reg()); is_smi.Branch(left_is_smi); - bool is_for_loop_compare = (node->AsCompareOperation() != NULL) - && node->AsCompareOperation()->is_for_loop_condition(); - if (!is_for_loop_compare && right_val->IsSmi()) { + bool is_loop_condition = (node->AsExpression() != NULL) && + node->AsExpression()->is_loop_condition(); + if (!is_loop_condition && right_val->IsSmi()) { // Right side is a constant smi and left side has been checked // not to be a smi. JumpTarget not_number; @@ -5292,8 +5291,8 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op, } // Get number type of left and right sub-expressions. - NumberInfo operands_type = - NumberInfo::Combine(left.number_info(), right.number_info()); + TypeInfo operands_type = + TypeInfo::Combine(left.type_info(), right.type_info()); Result answer; if (left_is_non_smi_constant || right_is_non_smi_constant) { @@ -5325,13 +5324,13 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op, } } - // Set NumberInfo of result according to the operation performed. + // Set TypeInfo of result according to the operation performed. // We rely on the fact that smis have a 32 bit payload on x64. ASSERT(kSmiValueSize == 32); - NumberInfo result_type = NumberInfo::Unknown(); + TypeInfo result_type = TypeInfo::Unknown(); switch (op) { case Token::COMMA: - result_type = right.number_info(); + result_type = right.type_info(); break; case Token::OR: case Token::AND: @@ -5342,37 +5341,37 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op, case Token::BIT_XOR: case Token::BIT_AND: // Result is always a smi. - result_type = NumberInfo::Smi(); + result_type = TypeInfo::Smi(); break; case Token::SAR: case Token::SHL: // Result is always a smi. - result_type = NumberInfo::Smi(); + result_type = TypeInfo::Smi(); break; case Token::SHR: // Result of x >>> y is always a smi if y >= 1, otherwise a number. result_type = (right.is_constant() && right.handle()->IsSmi() && Smi::cast(*right.handle())->value() >= 1) - ? NumberInfo::Smi() - : NumberInfo::Number(); + ? TypeInfo::Smi() + : TypeInfo::Number(); break; case Token::ADD: // Result could be a string or a number. Check types of inputs. result_type = operands_type.IsNumber() - ? NumberInfo::Number() - : NumberInfo::Unknown(); + ? TypeInfo::Number() + : TypeInfo::Unknown(); break; case Token::SUB: case Token::MUL: case Token::DIV: case Token::MOD: // Result is always a number. - result_type = NumberInfo::Number(); + result_type = TypeInfo::Number(); break; default: UNREACHABLE(); } - answer.set_number_info(result_type); + answer.set_type_info(result_type); frame_->Push(&answer); } @@ -6361,12 +6360,12 @@ void Reference::SetValue(InitState init_state) { void FastNewClosureStub::Generate(MacroAssembler* masm) { - // Clone the boilerplate in new space. Set the context to the - // current context in rsi. + // Create a new closure from the given function info in new + // space. Set the context to the current context in rsi. Label gc; __ AllocateInNewSpace(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT); - // Get the boilerplate function from the stack. + // Get the function info from the stack. __ movq(rdx, Operand(rsp, 1 * kPointerSize)); // Compute the function map in the current global context and set that @@ -6376,18 +6375,16 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) { __ movq(rcx, Operand(rcx, Context::SlotOffset(Context::FUNCTION_MAP_INDEX))); __ movq(FieldOperand(rax, JSObject::kMapOffset), rcx); - // Clone the rest of the boilerplate fields. We don't have to update - // the write barrier because the allocated object is in new space. - for (int offset = kPointerSize; - offset < JSFunction::kSize; - offset += kPointerSize) { - if (offset == JSFunction::kContextOffset) { - __ movq(FieldOperand(rax, offset), rsi); - } else { - __ movq(rbx, FieldOperand(rdx, offset)); - __ movq(FieldOperand(rax, offset), rbx); - } - } + // Initialize the rest of the function. We don't have to update the + // write barrier because the allocated object is in new space. + __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex); + __ LoadRoot(rcx, Heap::kTheHoleValueRootIndex); + __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rbx); + __ movq(FieldOperand(rax, JSObject::kElementsOffset), rbx); + __ movq(FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset), rcx); + __ movq(FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset), rdx); + __ movq(FieldOperand(rax, JSFunction::kContextOffset), rsi); + __ movq(FieldOperand(rax, JSFunction::kLiteralsOffset), rbx); // Return and remove the on-stack parameter. __ ret(1 * kPointerSize); @@ -9105,52 +9102,58 @@ Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { int CompareStub::MinorKey() { - // Encode the three parameters in a unique 16 bit value. - ASSERT(static_cast<unsigned>(cc_) < (1 << 14)); - int nnn_value = (never_nan_nan_ ? 2 : 0); - if (cc_ != equal) nnn_value = 0; // Avoid duplicate stubs. - return (static_cast<unsigned>(cc_) << 2) | nnn_value | (strict_ ? 1 : 0); + // Encode the three parameters in a unique 16 bit value. To avoid duplicate + // stubs the never NaN NaN condition is only taken into account if the + // condition is equals. + ASSERT(static_cast<unsigned>(cc_) < (1 << 13)); + return ConditionField::encode(static_cast<unsigned>(cc_)) + | StrictField::encode(strict_) + | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false) + | IncludeNumberCompareField::encode(include_number_compare_); } +// Unfortunately you have to run without snapshots to see most of these +// names in the profile since most compare stubs end up in the snapshot. const char* CompareStub::GetName() { + if (name_ != NULL) return name_; + const int kMaxNameLength = 100; + name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength); + if (name_ == NULL) return "OOM"; + + const char* cc_name; switch (cc_) { - case less: return "CompareStub_LT"; - case greater: return "CompareStub_GT"; - case less_equal: return "CompareStub_LE"; - case greater_equal: return "CompareStub_GE"; - case not_equal: { - if (strict_) { - if (never_nan_nan_) { - return "CompareStub_NE_STRICT_NO_NAN"; - } else { - return "CompareStub_NE_STRICT"; - } - } else { - if (never_nan_nan_) { - return "CompareStub_NE_NO_NAN"; - } else { - return "CompareStub_NE"; - } - } - } - case equal: { - if (strict_) { - if (never_nan_nan_) { - return "CompareStub_EQ_STRICT_NO_NAN"; - } else { - return "CompareStub_EQ_STRICT"; - } - } else { - if (never_nan_nan_) { - return "CompareStub_EQ_NO_NAN"; - } else { - return "CompareStub_EQ"; - } - } - } - default: return "CompareStub"; + case less: cc_name = "LT"; break; + case greater: cc_name = "GT"; break; + case less_equal: cc_name = "LE"; break; + case greater_equal: cc_name = "GE"; break; + case equal: cc_name = "EQ"; break; + case not_equal: cc_name = "NE"; break; + default: cc_name = "UnknownCondition"; break; + } + + const char* strict_name = ""; + if (strict_ && (cc_ == equal || cc_ == not_equal)) { + strict_name = "_STRICT"; + } + + const char* never_nan_nan_name = ""; + if (never_nan_nan_ && (cc_ == equal || cc_ == not_equal)) { + never_nan_nan_name = "_NO_NAN"; } + + const char* include_number_compare_name = ""; + if (!include_number_compare_) { + include_number_compare_name = "_NO_NUMBER"; + } + + OS::SNPrintF(Vector<char>(name_, kMaxNameLength), + "CompareStub_%s%s%s%s", + cc_name, + strict_name, + never_nan_nan_name, + include_number_compare_name); + return name_; } diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h index 42b4993128..3f339181b8 100644 --- a/deps/v8/src/x64/codegen-x64.h +++ b/deps/v8/src/x64/codegen-x64.h @@ -527,8 +527,8 @@ class CodeGenerator: public AstVisitor { // name/value pairs. void DeclareGlobals(Handle<FixedArray> pairs); - // Instantiate the function boilerplate. - void InstantiateBoilerplate(Handle<JSFunction> boilerplate); + // Instantiate the function based on the shared function info. + void InstantiateFunction(Handle<SharedFunctionInfo> function_info); // Support for type checks. void GenerateIsSmi(ZoneList<Expression*>* args); @@ -667,7 +667,7 @@ class GenericBinaryOpStub: public CodeStub { GenericBinaryOpStub(Token::Value op, OverwriteMode mode, GenericBinaryFlags flags, - NumberInfo operands_type = NumberInfo::Unknown()) + TypeInfo operands_type = TypeInfo::Unknown()) : op_(op), mode_(mode), flags_(flags), @@ -687,7 +687,7 @@ class GenericBinaryOpStub: public CodeStub { args_in_registers_(ArgsInRegistersBits::decode(key)), args_reversed_(ArgsReversedBits::decode(key)), use_sse3_(SSE3Bits::decode(key)), - static_operands_type_(NumberInfo::ExpandedRepresentation( + static_operands_type_(TypeInfo::ExpandedRepresentation( StaticTypeInfoBits::decode(key))), runtime_operands_type_(type_info), name_(NULL) { @@ -714,7 +714,7 @@ class GenericBinaryOpStub: public CodeStub { bool use_sse3_; // Number type information of operands, determined by code generator. - NumberInfo static_operands_type_; + TypeInfo static_operands_type_; // Operand type information determined at runtime. BinaryOpIC::TypeInfo runtime_operands_type_; diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc index 65f99a3fe2..ab737063c7 100644 --- a/deps/v8/src/x64/full-codegen-x64.cc +++ b/deps/v8/src/x64/full-codegen-x64.cc @@ -781,15 +781,13 @@ void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) { Comment cmnt(masm_, "[ FunctionLiteral"); // Build the function boilerplate and instantiate it. - Handle<JSFunction> boilerplate = - Compiler::BuildBoilerplate(expr, script(), this); + Handle<SharedFunctionInfo> function_info = + Compiler::BuildFunctionInfo(expr, script(), this); if (HasStackOverflow()) return; - ASSERT(boilerplate->IsBoilerplate()); - // Create a new closure. __ push(rsi); - __ Push(boilerplate); + __ Push(function_info); __ CallRuntime(Runtime::kNewClosure, 2); Apply(context_, rax); } diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc index 1d28a1f488..77043ce559 100644 --- a/deps/v8/src/x64/ic-x64.cc +++ b/deps/v8/src/x64/ic-x64.cc @@ -253,22 +253,6 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm, } -// Helper function used to check that a value is either not an object -// or is loaded if it is an object. -static void GenerateCheckNonObjectOrLoaded(MacroAssembler* masm, Label* miss, - Register value) { - Label done; - // Check if the value is a Smi. - __ JumpIfSmi(value, &done); - // Check if the object has been loaded. - __ movq(kScratchRegister, FieldOperand(value, JSFunction::kMapOffset)); - __ testb(FieldOperand(kScratchRegister, Map::kBitField2Offset), - Immediate(1 << Map::kNeedsLoading)); - __ j(not_zero, miss); - __ bind(&done); -} - - // One byte opcode for test eax,0xXXXXXXXX. static const byte kTestEaxByte = 0xA9; @@ -522,7 +506,6 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { rdx, rax, DICTIONARY_CHECK_DONE); - GenerateCheckNonObjectOrLoaded(masm, &slow, rcx); __ movq(rax, rcx); __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1); __ ret(0); @@ -1231,10 +1214,6 @@ static void GenerateNormalHelper(MacroAssembler* masm, // Check that the value is a JavaScript function. __ CmpObjectType(rdx, JS_FUNCTION_TYPE, rdx); __ j(not_equal, miss); - // Check that the function has been loaded. - __ testb(FieldOperand(rdx, Map::kBitField2Offset), - Immediate(1 << Map::kNeedsLoading)); - __ j(not_zero, miss); // Patch the receiver with the global proxy if necessary. if (is_global_object) { @@ -1431,7 +1410,6 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) { // Search the dictionary placing the result in rax. __ bind(&probe); GenerateDictionaryLoad(masm, &miss, rdx, rax, rbx, rcx, CHECK_DICTIONARY); - GenerateCheckNonObjectOrLoaded(masm, &miss, rax); __ ret(0); // Global object access: Check access rights. diff --git a/deps/v8/src/x64/register-allocator-x64.cc b/deps/v8/src/x64/register-allocator-x64.cc index 9dc97b845e..cf295935b9 100644 --- a/deps/v8/src/x64/register-allocator-x64.cc +++ b/deps/v8/src/x64/register-allocator-x64.cc @@ -44,7 +44,7 @@ void Result::ToRegister() { ASSERT(fresh.is_valid()); CodeGeneratorScope::Current()->masm()->Move(fresh.reg(), handle()); // This result becomes a copy of the fresh one. - fresh.set_number_info(number_info()); + fresh.set_type_info(type_info()); *this = fresh; } ASSERT(is_register()); @@ -62,7 +62,7 @@ void Result::ToRegister(Register target) { ASSERT(is_constant()); CodeGeneratorScope::Current()->masm()->Move(fresh.reg(), handle()); } - fresh.set_number_info(number_info()); + fresh.set_type_info(type_info()); *this = fresh; } else if (is_register() && reg().is(target)) { ASSERT(CodeGeneratorScope::Current()->has_valid_frame()); diff --git a/deps/v8/src/x64/virtual-frame-x64.cc b/deps/v8/src/x64/virtual-frame-x64.cc index 79be20b1bd..fa04b632f7 100644 --- a/deps/v8/src/x64/virtual-frame-x64.cc +++ b/deps/v8/src/x64/virtual-frame-x64.cc @@ -177,7 +177,7 @@ void VirtualFrame::EmitPop(const Operand& operand) { } -void VirtualFrame::EmitPush(Register reg, NumberInfo info) { +void VirtualFrame::EmitPush(Register reg, TypeInfo info) { ASSERT(stack_pointer_ == element_count() - 1); elements_.Add(FrameElement::MemoryElement(info)); stack_pointer_++; @@ -185,7 +185,7 @@ void VirtualFrame::EmitPush(Register reg, NumberInfo info) { } -void VirtualFrame::EmitPush(const Operand& operand, NumberInfo info) { +void VirtualFrame::EmitPush(const Operand& operand, TypeInfo info) { ASSERT(stack_pointer_ == element_count() - 1); elements_.Add(FrameElement::MemoryElement(info)); stack_pointer_++; @@ -193,7 +193,7 @@ void VirtualFrame::EmitPush(const Operand& operand, NumberInfo info) { } -void VirtualFrame::EmitPush(Immediate immediate, NumberInfo info) { +void VirtualFrame::EmitPush(Immediate immediate, TypeInfo info) { ASSERT(stack_pointer_ == element_count() - 1); elements_.Add(FrameElement::MemoryElement(info)); stack_pointer_++; @@ -203,7 +203,7 @@ void VirtualFrame::EmitPush(Immediate immediate, NumberInfo info) { void VirtualFrame::EmitPush(Smi* smi_value) { ASSERT(stack_pointer_ == element_count() - 1); - elements_.Add(FrameElement::MemoryElement(NumberInfo::Smi())); + elements_.Add(FrameElement::MemoryElement(TypeInfo::Smi())); stack_pointer_++; __ Push(smi_value); } @@ -211,19 +211,14 @@ void VirtualFrame::EmitPush(Smi* smi_value) { void VirtualFrame::EmitPush(Handle<Object> value) { ASSERT(stack_pointer_ == element_count() - 1); - NumberInfo info = NumberInfo::Unknown(); - if (value->IsSmi()) { - info = NumberInfo::Smi(); - } else if (value->IsHeapNumber()) { - info = NumberInfo::HeapNumber(); - } + TypeInfo info = TypeInfo::TypeFromValue(value); elements_.Add(FrameElement::MemoryElement(info)); stack_pointer_++; __ Push(value); } -void VirtualFrame::EmitPush(Heap::RootListIndex index, NumberInfo info) { +void VirtualFrame::EmitPush(Heap::RootListIndex index, TypeInfo info) { ASSERT(stack_pointer_ == element_count() - 1); elements_.Add(FrameElement::MemoryElement(info)); stack_pointer_++; @@ -297,12 +292,12 @@ int VirtualFrame::InvalidateFrameSlotAt(int index) { elements_[new_backing_index] = FrameElement::RegisterElement(backing_reg, FrameElement::SYNCED, - original.number_info()); + original.type_info()); } else { elements_[new_backing_index] = FrameElement::RegisterElement(backing_reg, FrameElement::NOT_SYNCED, - original.number_info()); + original.type_info()); } // Update the other copies. for (int i = new_backing_index + 1; i < element_count(); i++) { @@ -334,7 +329,7 @@ void VirtualFrame::TakeFrameSlotAt(int index) { FrameElement new_element = FrameElement::RegisterElement(fresh.reg(), FrameElement::NOT_SYNCED, - original.number_info()); + original.type_info()); Use(fresh.reg(), element_count()); elements_.Add(new_element); __ movq(fresh.reg(), Operand(rbp, fp_relative(index))); @@ -480,7 +475,7 @@ void VirtualFrame::MakeMergable() { if (element.is_constant() || element.is_copy()) { if (element.is_synced()) { // Just spill. - elements_[i] = FrameElement::MemoryElement(NumberInfo::Unknown()); + elements_[i] = FrameElement::MemoryElement(TypeInfo::Unknown()); } else { // Allocate to a register. FrameElement backing_element; // Invalid if not a copy. @@ -492,7 +487,7 @@ void VirtualFrame::MakeMergable() { elements_[i] = FrameElement::RegisterElement(fresh.reg(), FrameElement::NOT_SYNCED, - NumberInfo::Unknown()); + TypeInfo::Unknown()); Use(fresh.reg(), i); // Emit a move. @@ -521,7 +516,7 @@ void VirtualFrame::MakeMergable() { // The copy flag is not relied on before the end of this loop, // including when registers are spilled. elements_[i].clear_copied(); - elements_[i].set_number_info(NumberInfo::Unknown()); + elements_[i].set_type_info(TypeInfo::Unknown()); } } } @@ -728,11 +723,11 @@ Result VirtualFrame::Pop() { ASSERT(element.is_valid()); // Get number type information of the result. - NumberInfo info; + TypeInfo info; if (!element.is_copy()) { - info = element.number_info(); + info = element.type_info(); } else { - info = elements_[element.index()].number_info(); + info = elements_[element.index()].type_info(); } bool pop_needed = (stack_pointer_ == index); @@ -742,7 +737,7 @@ Result VirtualFrame::Pop() { Result temp = cgen()->allocator()->Allocate(); ASSERT(temp.is_valid()); __ pop(temp.reg()); - temp.set_number_info(info); + temp.set_type_info(info); return temp; } @@ -772,7 +767,7 @@ Result VirtualFrame::Pop() { FrameElement new_element = FrameElement::RegisterElement(temp.reg(), FrameElement::SYNCED, - element.number_info()); + element.type_info()); // Preserve the copy flag on the element. if (element.is_copied()) new_element.set_copied(); elements_[index] = new_element; diff --git a/deps/v8/src/x64/virtual-frame-x64.h b/deps/v8/src/x64/virtual-frame-x64.h index 3e9f6dd651..15653edb81 100644 --- a/deps/v8/src/x64/virtual-frame-x64.h +++ b/deps/v8/src/x64/virtual-frame-x64.h @@ -28,7 +28,7 @@ #ifndef V8_X64_VIRTUAL_FRAME_X64_H_ #define V8_X64_VIRTUAL_FRAME_X64_H_ -#include "number-info.h" +#include "type-info.h" #include "register-allocator.h" #include "scopes.h" @@ -83,7 +83,7 @@ class VirtualFrame : public ZoneObject { // Create a duplicate of an existing valid frame element. FrameElement CopyElementAt(int index, - NumberInfo info = NumberInfo::Uninitialized()); + TypeInfo info = TypeInfo::Uninitialized()); // The number of elements on the virtual frame. int element_count() { return elements_.length(); } @@ -139,7 +139,7 @@ class VirtualFrame : public ZoneObject { void ForgetElements(int count); // Spill all values from the frame to memory. - void SpillAll(); + inline void SpillAll(); // Spill all occurrences of a specific register from the frame. void Spill(Register reg) { @@ -200,7 +200,7 @@ class VirtualFrame : public ZoneObject { // Prepare for returning from the frame by spilling locals. This // avoids generating unnecessary merge code when jumping to the // shared return site. Emits code for spills. - void PrepareForReturn(); + inline void PrepareForReturn(); // Number of local variables after when we use a loop for allocating. static const int kLocalVarBound = 7; @@ -383,19 +383,19 @@ class VirtualFrame : public ZoneObject { // Push an element on top of the expression stack and emit a // corresponding push instruction. void EmitPush(Register reg, - NumberInfo info = NumberInfo::Unknown()); + TypeInfo info = TypeInfo::Unknown()); void EmitPush(const Operand& operand, - NumberInfo info = NumberInfo::Unknown()); + TypeInfo info = TypeInfo::Unknown()); void EmitPush(Heap::RootListIndex index, - NumberInfo info = NumberInfo::Unknown()); + TypeInfo info = TypeInfo::Unknown()); void EmitPush(Immediate immediate, - NumberInfo info = NumberInfo::Unknown()); + TypeInfo info = TypeInfo::Unknown()); void EmitPush(Smi* value); // Uses kScratchRegister, emits appropriate relocation info. void EmitPush(Handle<Object> value); // Push an element on the virtual frame. - inline void Push(Register reg, NumberInfo info = NumberInfo::Unknown()); + inline void Push(Register reg, TypeInfo info = TypeInfo::Unknown()); inline void Push(Handle<Object> value); inline void Push(Smi* value); @@ -403,7 +403,7 @@ class VirtualFrame : public ZoneObject { // frame). void Push(Result* result) { if (result->is_register()) { - Push(result->reg(), result->number_info()); + Push(result->reg(), result->type_info()); } else { ASSERT(result->is_constant()); Push(result->handle()); @@ -416,8 +416,8 @@ class VirtualFrame : public ZoneObject { // the frame. Nip(k) is equivalent to x = Pop(), Drop(k), Push(x). inline void Nip(int num_dropped); - inline void SetTypeForLocalAt(int index, NumberInfo info); - inline void SetTypeForParamAt(int index, NumberInfo info); + inline void SetTypeForLocalAt(int index, TypeInfo info); + inline void SetTypeForParamAt(int index, TypeInfo info); private: static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset; diff --git a/deps/v8/test/cctest/SConscript b/deps/v8/test/cctest/SConscript index 0d59b214e8..9c19c2bc25 100644 --- a/deps/v8/test/cctest/SConscript +++ b/deps/v8/test/cctest/SConscript @@ -83,7 +83,7 @@ SOURCES = { 'arch:x64': ['test-assembler-x64.cc', 'test-macro-assembler-x64.cc', 'test-log-stack-tracer.cc'], - 'arch:mips': ['test-assembler-mips.cc'], + 'arch:mips': ['test-assembler-mips.cc', 'test-mips.cc'], 'os:linux': ['test-platform-linux.cc'], 'os:macos': ['test-platform-macos.cc'], 'os:nullos': ['test-platform-nullos.cc'], diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status index eefe71ca9e..7689371a08 100644 --- a/deps/v8/test/cctest/cctest.status +++ b/deps/v8/test/cctest/cctest.status @@ -40,7 +40,6 @@ test-api/ApplyInterruption: PASS || TIMEOUT test-serialize/TestThatAlwaysFails: FAIL test-serialize/DependentTestThatAlwaysFails: FAIL - [ $arch == arm ] # BUG(240): Test seems flaky on ARM. @@ -60,6 +59,7 @@ test-accessors: SKIP test-alloc: SKIP test-api: SKIP test-compiler: SKIP +test-cpu-profiler: SKIP test-debug: SKIP test-decls: SKIP test-func-name-inference: SKIP diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc index e996a07303..e57914370d 100644 --- a/deps/v8/test/cctest/test-api.cc +++ b/deps/v8/test/cctest/test-api.cc @@ -38,7 +38,7 @@ #include "utils.h" #include "cctest.h" -static const bool kLogThreading = false; +static const bool kLogThreading = true; static bool IsNaN(double x) { #ifdef WIN32 @@ -4465,7 +4465,7 @@ TEST(ContextDetachGlobal) { // Enter env2 env2->Enter(); - // Create a function in env1 + // Create a function in env2 and add a reference to it in env1. Local<v8::Object> global2 = env2->Global(); global2->Set(v8_str("prop"), v8::Integer::New(1)); CompileRun("function getProp() {return prop;}"); @@ -4473,7 +4473,7 @@ TEST(ContextDetachGlobal) { env1->Global()->Set(v8_str("getProp"), global2->Get(v8_str("getProp"))); - // Detach env1's global, and reuse the global object of env1 + // Detach env2's global, and reuse the global object of env2 env2->Exit(); env2->DetachGlobal(); // env2 has a new global object. @@ -4513,6 +4513,85 @@ TEST(ContextDetachGlobal) { } +TEST(DetachAndReattachGlobal) { + v8::HandleScope scope; + LocalContext env1; + + // Create second environment. + v8::Persistent<Context> env2 = Context::New(); + + Local<Value> foo = v8_str("foo"); + + // Set same security token for env1 and env2. + env1->SetSecurityToken(foo); + env2->SetSecurityToken(foo); + + // Create a property on the global object in env2. + { + v8::Context::Scope scope(env2); + env2->Global()->Set(v8_str("p"), v8::Integer::New(42)); + } + + // Create a reference to env2 global from env1 global. + env1->Global()->Set(v8_str("other"), env2->Global()); + + // Check that we have access to other.p in env2 from env1. + Local<Value> result = CompileRun("other.p"); + CHECK(result->IsInt32()); + CHECK_EQ(42, result->Int32Value()); + + // Hold on to global from env2 and detach global from env2. + Local<v8::Object> global2 = env2->Global(); + env2->DetachGlobal(); + + // Check that the global has been detached. No other.p property can + // be found. + result = CompileRun("other.p"); + CHECK(result->IsUndefined()); + + // Reuse global2 for env3. + v8::Persistent<Context> env3 = + Context::New(0, v8::Handle<v8::ObjectTemplate>(), global2); + CHECK_EQ(global2, env3->Global()); + + // Start by using the same security token for env3 as for env1 and env2. + env3->SetSecurityToken(foo); + + // Create a property on the global object in env3. + { + v8::Context::Scope scope(env3); + env3->Global()->Set(v8_str("p"), v8::Integer::New(24)); + } + + // Check that other.p is now the property in env3 and that we have access. + result = CompileRun("other.p"); + CHECK(result->IsInt32()); + CHECK_EQ(24, result->Int32Value()); + + // Change security token for env3 to something different from env1 and env2. + env3->SetSecurityToken(v8_str("bar")); + + // Check that we do not have access to other.p in env1. |other| is now + // the global object for env3 which has a different security token, + // so access should be blocked. + result = CompileRun("other.p"); + CHECK(result->IsUndefined()); + + // Detach the global for env3 and reattach it to env2. + env3->DetachGlobal(); + env2->ReattachGlobal(global2); + + // Check that we have access to other.p again in env1. |other| is now + // the global object for env2 which has the same security token as env1. + result = CompileRun("other.p"); + CHECK(result->IsInt32()); + CHECK_EQ(42, result->Int32Value()); + + env2.Dispose(); + env3.Dispose(); +} + + static bool NamedAccessBlocker(Local<v8::Object> global, Local<Value> name, v8::AccessType type, @@ -4605,6 +4684,7 @@ THREADED_TEST(AccessControl) { value = v8_compile("other.accessible_prop = 3")->Run(); CHECK(value->IsNumber()); CHECK_EQ(3, value->Int32Value()); + CHECK_EQ(3, g_echo_value); value = v8_compile("other.accessible_prop")->Run(); CHECK(value->IsNumber()); @@ -9955,3 +10035,57 @@ TEST(Bug618) { CHECK_EQ(23, c1->Get(v8_str("y"))->Int32Value()); } } + +int prologue_call_count = 0; +int epilogue_call_count = 0; +int prologue_call_count_second = 0; +int epilogue_call_count_second = 0; + +void PrologueCallback(v8::GCType, v8::GCCallbackFlags) { + ++prologue_call_count; +} + +void EpilogueCallback(v8::GCType, v8::GCCallbackFlags) { + ++epilogue_call_count; +} + +void PrologueCallbackSecond(v8::GCType, v8::GCCallbackFlags) { + ++prologue_call_count_second; +} + +void EpilogueCallbackSecond(v8::GCType, v8::GCCallbackFlags) { + ++epilogue_call_count_second; +} + +TEST(GCCallbacks) { + LocalContext context; + + v8::V8::AddGCPrologueCallback(PrologueCallback); + v8::V8::AddGCEpilogueCallback(EpilogueCallback); + CHECK_EQ(0, prologue_call_count); + CHECK_EQ(0, epilogue_call_count); + i::Heap::CollectAllGarbage(false); + CHECK_EQ(1, prologue_call_count); + CHECK_EQ(1, epilogue_call_count); + v8::V8::AddGCPrologueCallback(PrologueCallbackSecond); + v8::V8::AddGCEpilogueCallback(EpilogueCallbackSecond); + i::Heap::CollectAllGarbage(false); + CHECK_EQ(2, prologue_call_count); + CHECK_EQ(2, epilogue_call_count); + CHECK_EQ(1, prologue_call_count_second); + CHECK_EQ(1, epilogue_call_count_second); + v8::V8::RemoveGCPrologueCallback(PrologueCallback); + v8::V8::RemoveGCEpilogueCallback(EpilogueCallback); + i::Heap::CollectAllGarbage(false); + CHECK_EQ(2, prologue_call_count); + CHECK_EQ(2, epilogue_call_count); + CHECK_EQ(2, prologue_call_count_second); + CHECK_EQ(2, epilogue_call_count_second); + v8::V8::RemoveGCPrologueCallback(PrologueCallbackSecond); + v8::V8::RemoveGCEpilogueCallback(EpilogueCallbackSecond); + i::Heap::CollectAllGarbage(false); + CHECK_EQ(2, prologue_call_count); + CHECK_EQ(2, epilogue_call_count); + CHECK_EQ(2, prologue_call_count_second); + CHECK_EQ(2, epilogue_call_count_second); +} diff --git a/deps/v8/test/cctest/test-assembler-arm.cc b/deps/v8/test/cctest/test-assembler-arm.cc index 459b8624c0..7f3404cf20 100644 --- a/deps/v8/test/cctest/test-assembler-arm.cc +++ b/deps/v8/test/cctest/test-assembler-arm.cc @@ -47,9 +47,6 @@ static v8::Persistent<v8::Context> env; // The test framework does not accept flags on the command line, so we set them static void InitializeVM() { - // disable compilation of natives by specifying an empty natives file - FLAG_natives_file = ""; - // enable generation of comments FLAG_debug_code = true; diff --git a/deps/v8/test/cctest/test-assembler-mips.cc b/deps/v8/test/cctest/test-assembler-mips.cc index ab011a73ed..0a2310e3e8 100644 --- a/deps/v8/test/cctest/test-assembler-mips.cc +++ b/deps/v8/test/cctest/test-assembler-mips.cc @@ -49,8 +49,8 @@ static v8::Persistent<v8::Context> env; // The test framework does not accept flags on the command line, so we set them. static void InitializeVM() { - // Disable compilation of natives by specifying an empty natives file. - FLAG_natives_file = ""; + // Disable compilation of natives. + FLAG_disable_native_files = true; // Enable generation of comments. FLAG_debug_code = true; diff --git a/deps/v8/test/cctest/test-circular-queue.cc b/deps/v8/test/cctest/test-circular-queue.cc index bb69c1bc02..3fa49bfaf3 100644 --- a/deps/v8/test/cctest/test-circular-queue.cc +++ b/deps/v8/test/cctest/test-circular-queue.cc @@ -61,8 +61,6 @@ TEST(SamplingCircularQueue) { SamplingCircularQueue scq(sizeof(Record), kRecordsPerChunk * sizeof(Record), 3); - scq.SetUpProducer(); - scq.SetUpConsumer(); // Check that we are using non-reserved values. CHECK_NE(SamplingCircularQueue::kClear, 1); @@ -121,7 +119,103 @@ TEST(SamplingCircularQueue) { // Consumption must still be possible as the first cell of the // last chunk is not clean. CHECK_NE(NULL, scq.StartDequeue()); +} + + +namespace { + +class ProducerThread: public i::Thread { + public: + typedef SamplingCircularQueue::Cell Record; + + ProducerThread(SamplingCircularQueue* scq, + int records_per_chunk, + Record value, + i::Semaphore* finished) + : scq_(scq), + records_per_chunk_(records_per_chunk), + value_(value), + finished_(finished) { } + + virtual void Run() { + for (Record i = value_; i < value_ + records_per_chunk_; ++i) { + Record* rec = reinterpret_cast<Record*>(scq_->Enqueue()); + CHECK_NE(NULL, rec); + *rec = i; + } + + finished_->Signal(); + } + + private: + SamplingCircularQueue* scq_; + const int records_per_chunk_; + Record value_; + i::Semaphore* finished_; +}; + +} // namespace + +TEST(SamplingCircularQueueMultithreading) { + // Emulate multiple VM threads working 'one thread at a time.' + // This test enqueues data from different threads. This corresponds + // to the case of profiling under Linux, where signal handler that + // does sampling is called in the context of different VM threads. + + typedef ProducerThread::Record Record; + const int kRecordsPerChunk = 4; + SamplingCircularQueue scq(sizeof(Record), + kRecordsPerChunk * sizeof(Record), + 3); + i::Semaphore* semaphore = i::OS::CreateSemaphore(0); + // Don't poll ahead, making possible to check data in the buffer + // immediately after enqueuing. + scq.FlushResidualRecords(); + + // Check that we are using non-reserved values. + CHECK_NE(SamplingCircularQueue::kClear, 1); + CHECK_NE(SamplingCircularQueue::kEnd, 1); + ProducerThread producer1(&scq, kRecordsPerChunk, 1, semaphore); + ProducerThread producer2(&scq, kRecordsPerChunk, 10, semaphore); + ProducerThread producer3(&scq, kRecordsPerChunk, 20, semaphore); + + CHECK_EQ(NULL, scq.StartDequeue()); + producer1.Start(); + semaphore->Wait(); + for (Record i = 1; i < 1 + kRecordsPerChunk; ++i) { + Record* rec = reinterpret_cast<Record*>(scq.StartDequeue()); + CHECK_NE(NULL, rec); + CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec)); + CHECK_EQ(rec, reinterpret_cast<Record*>(scq.StartDequeue())); + scq.FinishDequeue(); + CHECK_NE(rec, reinterpret_cast<Record*>(scq.StartDequeue())); + } + + CHECK_EQ(NULL, scq.StartDequeue()); + producer2.Start(); + semaphore->Wait(); + for (Record i = 10; i < 10 + kRecordsPerChunk; ++i) { + Record* rec = reinterpret_cast<Record*>(scq.StartDequeue()); + CHECK_NE(NULL, rec); + CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec)); + CHECK_EQ(rec, reinterpret_cast<Record*>(scq.StartDequeue())); + scq.FinishDequeue(); + CHECK_NE(rec, reinterpret_cast<Record*>(scq.StartDequeue())); + } + + CHECK_EQ(NULL, scq.StartDequeue()); + producer3.Start(); + semaphore->Wait(); + for (Record i = 20; i < 20 + kRecordsPerChunk; ++i) { + Record* rec = reinterpret_cast<Record*>(scq.StartDequeue()); + CHECK_NE(NULL, rec); + CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(*rec)); + CHECK_EQ(rec, reinterpret_cast<Record*>(scq.StartDequeue())); + scq.FinishDequeue(); + CHECK_NE(rec, reinterpret_cast<Record*>(scq.StartDequeue())); + } + + CHECK_EQ(NULL, scq.StartDequeue()); - scq.TearDownConsumer(); - scq.TearDownProducer(); + delete semaphore; } diff --git a/deps/v8/test/cctest/test-compiler.cc b/deps/v8/test/cctest/test-compiler.cc index d61a2a108c..96549a37e7 100644 --- a/deps/v8/test/cctest/test-compiler.cc +++ b/deps/v8/test/cctest/test-compiler.cc @@ -114,7 +114,7 @@ static void SetGlobalProperty(const char* name, Object* value) { static Handle<JSFunction> Compile(const char* source) { Handle<String> source_code(Factory::NewStringFromUtf8(CStrVector(source))); - Handle<JSFunction> boilerplate = + Handle<SharedFunctionInfo> shared_function = Compiler::Compile(source_code, Handle<String>(), 0, @@ -123,8 +123,8 @@ static Handle<JSFunction> Compile(const char* source) { NULL, Handle<String>::null(), NOT_NATIVES_CODE); - return Factory::NewFunctionFromBoilerplate(boilerplate, - Top::global_context()); + return Factory::NewFunctionFromSharedFunctionInfo(shared_function, + Top::global_context()); } diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc index bd966fa21b..2fff4fae02 100644 --- a/deps/v8/test/cctest/test-cpu-profiler.cc +++ b/deps/v8/test/cctest/test-cpu-profiler.cc @@ -64,7 +64,6 @@ TEST(CodeEvents) { ProfileGenerator generator(&profiles); ProfilerEventsProcessor processor(&generator); processor.Start(); - processor.SetUpSamplesProducer(); while (!processor.running()) { i::Thread::YieldCPU(); } @@ -117,8 +116,6 @@ TEST(CodeEvents) { CodeEntry* entry5 = generator.code_map()->FindEntry(ToAddress(0x1700)); CHECK_NE(NULL, entry5); CHECK_EQ(aaa_str, entry5->name()); - - processor.TearDownSamplesProducer(); } @@ -133,7 +130,6 @@ TEST(TickEvents) { ProfileGenerator generator(&profiles); ProfilerEventsProcessor processor(&generator); processor.Start(); - processor.SetUpSamplesProducer(); while (!processor.running()) { i::Thread::YieldCPU(); } @@ -197,6 +193,4 @@ TEST(TickEvents) { bottom_up_ddd_children.last()->GetChildren(&bottom_up_ddd_stub_children); CHECK_EQ(1, bottom_up_ddd_stub_children.length()); CHECK_EQ("bbb", bottom_up_ddd_stub_children.last()->entry()->name()); - - processor.TearDownSamplesProducer(); } diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc index d0726b9bdc..720ab58f19 100644 --- a/deps/v8/test/cctest/test-debug.cc +++ b/deps/v8/test/cctest/test-debug.cc @@ -4655,11 +4655,9 @@ TEST(CallFunctionInDebugger) { v8::Script::Compile( v8::String::New(debugger_call_with_closure_source))->Run()); - // Calling a function through the debugger returns undefined if there are no - // JavaScript frames. - CHECK(v8::Debug::Call(frame_count)->IsUndefined()); - CHECK(v8::Debug::Call(frame_source_line)->IsUndefined()); - CHECK(v8::Debug::Call(debugger_call_with_data)->IsUndefined()); + // Calling a function through the debugger returns 0 frames if there are + // no JavaScript frames. + CHECK_EQ(v8::Integer::New(0), v8::Debug::Call(frame_count)); // Test that the number of frames can be retrieved. v8::Script::Compile(v8::String::New("CheckFrameCount(1)"))->Run(); diff --git a/deps/v8/test/cctest/test-func-name-inference.cc b/deps/v8/test/cctest/test-func-name-inference.cc index 28e8649f9f..67791fb732 100644 --- a/deps/v8/test/cctest/test-func-name-inference.cc +++ b/deps/v8/test/cctest/test-func-name-inference.cc @@ -62,8 +62,16 @@ static void CheckFunctionName(v8::Handle<v8::Script> script, const char* func_pos_src, const char* ref_inferred_name) { // Get script source. - Handle<JSFunction> fun = v8::Utils::OpenHandle(*script); - Handle<Script> i_script(Script::cast(fun->shared()->script())); + Handle<Object> obj = v8::Utils::OpenHandle(*script); + Handle<SharedFunctionInfo> shared_function; + if (obj->IsSharedFunctionInfo()) { + shared_function = + Handle<SharedFunctionInfo>(SharedFunctionInfo::cast(*obj)); + } else { + shared_function = + Handle<SharedFunctionInfo>(JSFunction::cast(*obj)->shared()); + } + Handle<Script> i_script(Script::cast(shared_function->script())); CHECK(i_script->source()->IsString()); Handle<String> script_src(String::cast(i_script->source())); diff --git a/deps/v8/test/cctest/test-heap.cc b/deps/v8/test/cctest/test-heap.cc index 45c516038d..2b88f0f94e 100644 --- a/deps/v8/test/cctest/test-heap.cc +++ b/deps/v8/test/cctest/test-heap.cc @@ -809,3 +809,46 @@ TEST(Iteration) { CHECK_EQ(objs_count, next_objs_index); CHECK_EQ(objs_count, ObjectsFoundInHeap(objs, objs_count)); } + + +TEST(LargeObjectSpaceContains) { + InitializeVM(); + + int free_bytes = Heap::MaxObjectSizeInPagedSpace(); + CHECK(Heap::CollectGarbage(free_bytes, NEW_SPACE)); + + Address current_top = Heap::new_space()->top(); + Page* page = Page::FromAddress(current_top); + Address current_page = page->address(); + Address next_page = current_page + Page::kPageSize; + int bytes_to_page = next_page - current_top; + if (bytes_to_page <= FixedArray::kHeaderSize) { + // Alas, need to cross another page to be able to + // put desired value. + next_page += Page::kPageSize; + bytes_to_page = next_page - current_top; + } + CHECK(bytes_to_page > FixedArray::kHeaderSize); + + int* is_normal_page_ptr = &Page::FromAddress(next_page)->is_normal_page; + Address is_normal_page_addr = reinterpret_cast<Address>(is_normal_page_ptr); + + int bytes_to_allocate = (is_normal_page_addr - current_top) + kPointerSize; + + int n_elements = (bytes_to_allocate - FixedArray::kHeaderSize) / + kPointerSize; + CHECK_EQ(bytes_to_allocate, FixedArray::SizeFor(n_elements)); + FixedArray* array = FixedArray::cast( + Heap::AllocateFixedArray(n_elements)); + + int index = n_elements - 1; + CHECK_EQ(is_normal_page_ptr, + HeapObject::RawField(array, FixedArray::OffsetOfElementAt(index))); + array->set(index, Smi::FromInt(0)); + // This chould have turned next page into LargeObjectPage: + // CHECK(Page::FromAddress(next_page)->IsLargeObjectPage()); + + HeapObject* addr = HeapObject::FromAddress(next_page + 2 * kPointerSize); + CHECK(Heap::new_space()->Contains(addr)); + CHECK(!Heap::lo_space()->Contains(addr)); +} diff --git a/deps/v8/test/cctest/test-log-stack-tracer.cc b/deps/v8/test/cctest/test-log-stack-tracer.cc index aa912a377f..532311bfbc 100644 --- a/deps/v8/test/cctest/test-log-stack-tracer.cc +++ b/deps/v8/test/cctest/test-log-stack-tracer.cc @@ -191,7 +191,9 @@ static void InitializeVM() { static Handle<JSFunction> CompileFunction(const char* source) { - return v8::Utils::OpenHandle(*Script::Compile(String::New(source))); + Handle<JSFunction> result(JSFunction::cast( + *v8::Utils::OpenHandle(*Script::Compile(String::New(source))))); + return result; } @@ -201,16 +203,16 @@ static Local<Value> GetGlobalProperty(const char* name) { static Handle<JSFunction> GetGlobalJSFunction(const char* name) { - Handle<JSFunction> js_func(JSFunction::cast( - *(v8::Utils::OpenHandle( - *GetGlobalProperty(name))))); - return js_func; + Handle<JSFunction> result(JSFunction::cast( + *v8::Utils::OpenHandle(*GetGlobalProperty(name)))); + return result; } static void CheckRetAddrIsInJSFunction(const char* func_name, - Address ret_addr) { - CheckRetAddrIsInJSFunction(func_name, ret_addr, + Address ret_addr) { + CheckRetAddrIsInJSFunction(func_name, + ret_addr, GetGlobalJSFunction(func_name)); } @@ -278,23 +280,39 @@ static void CreateTraceCallerFunction(const char* func_name, #endif SetGlobalProperty(func_name, v8::ToApi<Value>(func)); + CHECK_EQ(*func, *GetGlobalJSFunction(func_name)); } +// This test verifies that stack tracing works when called during +// execution of a native function called from JS code. In this case, +// StackTracer uses Top::c_entry_fp as a starting point for stack +// walking. TEST(CFromJSStackTrace) { TickSample sample; InitTraceEnv(&sample); InitializeVM(); v8::HandleScope scope; + // Create global function JSFuncDoTrace which calls + // extension function trace() with the current frame pointer value. CreateTraceCallerFunction("JSFuncDoTrace", "trace"); - CompileRun( + Local<Value> result = CompileRun( "function JSTrace() {" " JSFuncDoTrace();" "};\n" - "JSTrace();"); + "JSTrace();\n" + "true;"); + CHECK(!result.IsEmpty()); + // When stack tracer is invoked, the stack should look as follows: + // script [JS] + // JSTrace() [JS] + // JSFuncDoTrace() [JS] [captures EBP value and encodes it as Smi] + // trace(EBP encoded as Smi) [native (extension)] + // DoTrace(EBP) [native] + // StackTracer::Trace CHECK_GT(sample.frames_count, 1); - // Stack sampling will start from the first JS function, i.e. "JSFuncDoTrace" + // Stack tracing will start from the first JS function, i.e. "JSFuncDoTrace" CheckRetAddrIsInJSFunction("JSFuncDoTrace", sample.stack[0]); CheckRetAddrIsInJSFunction("JSTrace", @@ -302,22 +320,41 @@ TEST(CFromJSStackTrace) { } +// This test verifies that stack tracing works when called during +// execution of JS code. However, as calling StackTracer requires +// entering native code, we can only emulate pure JS by erasing +// Top::c_entry_fp value. In this case, StackTracer uses passed frame +// pointer value as a starting point for stack walking. TEST(PureJSStackTrace) { TickSample sample; InitTraceEnv(&sample); InitializeVM(); v8::HandleScope scope; + // Create global function JSFuncDoTrace which calls + // extension function js_trace() with the current frame pointer value. CreateTraceCallerFunction("JSFuncDoTrace", "js_trace"); - CompileRun( + Local<Value> result = CompileRun( "function JSTrace() {" " JSFuncDoTrace();" "};\n" "function OuterJSTrace() {" " JSTrace();" "};\n" - "OuterJSTrace();"); - // The last JS function called. + "OuterJSTrace();\n" + "true;"); + CHECK(!result.IsEmpty()); + // When stack tracer is invoked, the stack should look as follows: + // script [JS] + // OuterJSTrace() [JS] + // JSTrace() [JS] + // JSFuncDoTrace() [JS] [captures EBP value and encodes it as Smi] + // js_trace(EBP encoded as Smi) [native (extension)] + // DoTraceHideCEntryFPAddress(EBP) [native] + // StackTracer::Trace + // + // The last JS function called. It is only visible through + // sample.function, as its return address is above captured EBP value. CHECK_EQ(GetGlobalJSFunction("JSFuncDoTrace")->address(), sample.function); CHECK_GT(sample.frames_count, 1); @@ -354,6 +391,9 @@ static int CFunc(int depth) { } +// This test verifies that stack tracing doesn't crash when called on +// pure native code. StackTracer only unrolls JS code, so we can't +// get any meaningful info here. TEST(PureCStackTrace) { TickSample sample; InitTraceEnv(&sample); diff --git a/deps/v8/test/cctest/test-mips.cc b/deps/v8/test/cctest/test-mips.cc new file mode 100644 index 0000000000..efd4cc975d --- /dev/null +++ b/deps/v8/test/cctest/test-mips.cc @@ -0,0 +1,52 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +#include "v8.h" +#include "execution.h" + +#include "cctest.h" + +using ::v8::Local; +using ::v8::String; +using ::v8::Script; + +namespace i = ::v8::internal; + +TEST(MIPSFunctionCalls) { + // Disable compilation of natives. + i::FLAG_disable_native_files = true; + i::FLAG_full_compiler = false; + + v8::HandleScope scope; + LocalContext env; // from cctest.h + + const char* c_source = "function foo() { return 0x1234; }; foo();"; + Local<String> source = ::v8::String::New(c_source); + Local<Script> script = ::v8::Script::Compile(source); + CHECK_EQ(0x1234, script->Run()->Int32Value()); +} diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc index 4308ff5ef6..54e69a1d54 100644 --- a/deps/v8/test/cctest/test-serialize.cc +++ b/deps/v8/test/cctest/test-serialize.cc @@ -289,65 +289,68 @@ static void SanityCheck() { DEPENDENT_TEST(Deserialize, Serialize) { - v8::HandleScope scope; + // The serialize-deserialize tests only work if the VM is built without + // serialization. That doesn't matter. We don't need to be able to + // serialize a snapshot in a VM that is booted from a snapshot. + if (!Snapshot::IsEnabled()) { + v8::HandleScope scope; - Deserialize(); + Deserialize(); - v8::Persistent<v8::Context> env = v8::Context::New(); - env->Enter(); + v8::Persistent<v8::Context> env = v8::Context::New(); + env->Enter(); - SanityCheck(); + SanityCheck(); + } } DEPENDENT_TEST(DeserializeFromSecondSerialization, SerializeTwice) { - // BUG(632): Disable this test until the partial_snapshots branch is - // merged back. - return; + if (!Snapshot::IsEnabled()) { + v8::HandleScope scope; - v8::HandleScope scope; + Deserialize(); - Deserialize(); + v8::Persistent<v8::Context> env = v8::Context::New(); + env->Enter(); - v8::Persistent<v8::Context> env = v8::Context::New(); - env->Enter(); - - SanityCheck(); + SanityCheck(); + } } DEPENDENT_TEST(DeserializeAndRunScript2, Serialize) { - v8::HandleScope scope; + if (!Snapshot::IsEnabled()) { + v8::HandleScope scope; - Deserialize(); + Deserialize(); - v8::Persistent<v8::Context> env = v8::Context::New(); - env->Enter(); + v8::Persistent<v8::Context> env = v8::Context::New(); + env->Enter(); - const char* c_source = "\"1234\".length"; - v8::Local<v8::String> source = v8::String::New(c_source); - v8::Local<v8::Script> script = v8::Script::Compile(source); - CHECK_EQ(4, script->Run()->Int32Value()); + const char* c_source = "\"1234\".length"; + v8::Local<v8::String> source = v8::String::New(c_source); + v8::Local<v8::Script> script = v8::Script::Compile(source); + CHECK_EQ(4, script->Run()->Int32Value()); + } } DEPENDENT_TEST(DeserializeFromSecondSerializationAndRunScript2, SerializeTwice) { - // BUG(632): Disable this test until the partial_snapshots branch is - // merged back. - return; - - v8::HandleScope scope; + if (!Snapshot::IsEnabled()) { + v8::HandleScope scope; - Deserialize(); + Deserialize(); - v8::Persistent<v8::Context> env = v8::Context::New(); - env->Enter(); + v8::Persistent<v8::Context> env = v8::Context::New(); + env->Enter(); - const char* c_source = "\"1234\".length"; - v8::Local<v8::String> source = v8::String::New(c_source); - v8::Local<v8::Script> script = v8::Script::Compile(source); - CHECK_EQ(4, script->Run()->Int32Value()); + const char* c_source = "\"1234\".length"; + v8::Local<v8::String> source = v8::String::New(c_source); + v8::Local<v8::Script> script = v8::Script::Compile(source); + CHECK_EQ(4, script->Run()->Int32Value()); + } } @@ -400,14 +403,8 @@ TEST(PartialSerialization) { } -DEPENDENT_TEST(PartialDeserialization, PartialSerialization) { - int file_name_length = StrLength(FLAG_testing_serialization_file) + 10; - Vector<char> startup_name = Vector<char>::New(file_name_length + 1); - OS::SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file); - - CHECK(Snapshot::Initialize(startup_name.start())); - - const char* file_name = FLAG_testing_serialization_file; +static void ReserveSpaceForPartialSnapshot(const char* file_name) { + int file_name_length = StrLength(file_name) + 10; Vector<char> name = Vector<char>::New(file_name_length + 1); OS::SNPrintF(name, "%s.size", file_name); FILE* fp = OS::FOpen(name.start(), "r"); @@ -436,26 +433,122 @@ DEPENDENT_TEST(PartialDeserialization, PartialSerialization) { map_size, cell_size, large_size); - int snapshot_size = 0; - byte* snapshot = ReadBytes(file_name, &snapshot_size); +} - Object* root; - { - SnapshotByteSource source(snapshot, snapshot_size); - Deserializer deserializer(&source); - deserializer.DeserializePartial(&root); - CHECK(root->IsString()); + +DEPENDENT_TEST(PartialDeserialization, PartialSerialization) { + if (!Snapshot::IsEnabled()) { + int file_name_length = StrLength(FLAG_testing_serialization_file) + 10; + Vector<char> startup_name = Vector<char>::New(file_name_length + 1); + OS::SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file); + + CHECK(Snapshot::Initialize(startup_name.start())); + + const char* file_name = FLAG_testing_serialization_file; + ReserveSpaceForPartialSnapshot(file_name); + + int snapshot_size = 0; + byte* snapshot = ReadBytes(file_name, &snapshot_size); + + Object* root; + { + SnapshotByteSource source(snapshot, snapshot_size); + Deserializer deserializer(&source); + deserializer.DeserializePartial(&root); + CHECK(root->IsString()); + } + v8::HandleScope handle_scope; + Handle<Object>root_handle(root); + + Object* root2; + { + SnapshotByteSource source(snapshot, snapshot_size); + Deserializer deserializer(&source); + deserializer.DeserializePartial(&root2); + CHECK(root2->IsString()); + CHECK(*root_handle == root2); + } } - v8::HandleScope handle_scope; - Handle<Object>root_handle(root); +} - Object* root2; - { - SnapshotByteSource source(snapshot, snapshot_size); - Deserializer deserializer(&source); - deserializer.DeserializePartial(&root2); - CHECK(root2->IsString()); - CHECK(*root_handle == root2); + +TEST(ContextSerialization) { + Serializer::Enable(); + v8::V8::Initialize(); + + v8::Persistent<v8::Context> env = v8::Context::New(); + ASSERT(!env.IsEmpty()); + env->Enter(); + // Make sure all builtin scripts are cached. + { HandleScope scope; + for (int i = 0; i < Natives::GetBuiltinsCount(); i++) { + Bootstrapper::NativesSourceLookup(i); + } + } + // If we don't do this then we end up with a stray root pointing at the + // context even after we have disposed of env. + Heap::CollectAllGarbage(true); + + int file_name_length = StrLength(FLAG_testing_serialization_file) + 10; + Vector<char> startup_name = Vector<char>::New(file_name_length + 1); + OS::SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file); + + env->Exit(); + + Object* raw_context = *(v8::Utils::OpenHandle(*env)); + + env.Dispose(); + + FileByteSink startup_sink(startup_name.start()); + StartupSerializer startup_serializer(&startup_sink); + startup_serializer.SerializeStrongReferences(); + + FileByteSink partial_sink(FLAG_testing_serialization_file); + PartialSerializer p_ser(&startup_serializer, &partial_sink); + p_ser.Serialize(&raw_context); + startup_serializer.SerializeWeakReferences(); + partial_sink.WriteSpaceUsed(p_ser.CurrentAllocationAddress(NEW_SPACE), + p_ser.CurrentAllocationAddress(OLD_POINTER_SPACE), + p_ser.CurrentAllocationAddress(OLD_DATA_SPACE), + p_ser.CurrentAllocationAddress(CODE_SPACE), + p_ser.CurrentAllocationAddress(MAP_SPACE), + p_ser.CurrentAllocationAddress(CELL_SPACE), + p_ser.CurrentAllocationAddress(LO_SPACE)); +} + + +DEPENDENT_TEST(ContextDeserialization, ContextSerialization) { + if (!Snapshot::IsEnabled()) { + int file_name_length = StrLength(FLAG_testing_serialization_file) + 10; + Vector<char> startup_name = Vector<char>::New(file_name_length + 1); + OS::SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file); + + CHECK(Snapshot::Initialize(startup_name.start())); + + const char* file_name = FLAG_testing_serialization_file; + ReserveSpaceForPartialSnapshot(file_name); + + int snapshot_size = 0; + byte* snapshot = ReadBytes(file_name, &snapshot_size); + + Object* root; + { + SnapshotByteSource source(snapshot, snapshot_size); + Deserializer deserializer(&source); + deserializer.DeserializePartial(&root); + CHECK(root->IsContext()); + } + v8::HandleScope handle_scope; + Handle<Object>root_handle(root); + + Object* root2; + { + SnapshotByteSource source(snapshot, snapshot_size); + Deserializer deserializer(&source); + deserializer.DeserializePartial(&root2); + CHECK(root2->IsContext()); + CHECK(*root_handle != root2); + } } } @@ -463,6 +556,7 @@ DEPENDENT_TEST(PartialDeserialization, PartialSerialization) { TEST(LinearAllocation) { v8::V8::Initialize(); int new_space_max = 512 * KB; + for (int size = 1000; size < 5 * MB; size += size >> 1) { int new_space_size = (size < new_space_max) ? size : new_space_max; Heap::ReserveSpace( diff --git a/deps/v8/test/mjsunit/codegen-coverage.js b/deps/v8/test/mjsunit/codegen-coverage.js index 42c371ba26..8e7f1891a2 100644 --- a/deps/v8/test/mjsunit/codegen-coverage.js +++ b/deps/v8/test/mjsunit/codegen-coverage.js @@ -33,6 +33,13 @@ function identity(x) { return x; } +function lookup(w, a) { + // This function tests a code path in the generation of a keyed load IC + // where the key and the value are both in the same register. + a = a; + w[a] = a; +} + function cover_codegen_paths() { var x = 1; @@ -131,6 +138,12 @@ function cover_codegen_paths() { assertEquals(1073741824, 1 - di); x = 3; + var w = { }; + lookup(w, x); + lookup(w, x); + lookup(w, x); + + x = 3; // Terminate while loop. } } diff --git a/deps/v8/test/mjsunit/date.js b/deps/v8/test/mjsunit/date.js index a592e4c4ae..b264a19c3d 100644 --- a/deps/v8/test/mjsunit/date.js +++ b/deps/v8/test/mjsunit/date.js @@ -46,12 +46,18 @@ assertEquals(date2, date3); var dMax = new Date(8.64e15); assertEquals(8.64e15, dMax.getTime()); +assertEquals(275760, dMax.getFullYear()); +assertEquals(8, dMax.getMonth()); +assertEquals(13, dMax.getUTCDate()); var dOverflow = new Date(8.64e15+1); assertTrue(isNaN(dOverflow.getTime())); var dMin = new Date(-8.64e15); assertEquals(-8.64e15, dMin.getTime()); +assertEquals(-271821, dMin.getFullYear()); +assertEquals(3, dMin.getMonth()); +assertEquals(20, dMin.getUTCDate()); var dUnderflow = new Date(-8.64e15-1); assertTrue(isNaN(dUnderflow.getTime())); diff --git a/deps/v8/test/mjsunit/debug-script.js b/deps/v8/test/mjsunit/debug-script.js index 402f90cf83..643dd8ce42 100644 --- a/deps/v8/test/mjsunit/debug-script.js +++ b/deps/v8/test/mjsunit/debug-script.js @@ -52,7 +52,7 @@ for (i = 0; i < scripts.length; i++) { } // This has to be updated if the number of native scripts change. -assertEquals(13, named_native_count); +assertEquals(14, named_native_count); // If no snapshot is used, only the 'gc' extension is loaded. // If snapshot is used, all extensions are cached in the snapshot. assertTrue(extension_count == 1 || extension_count == 5); diff --git a/deps/v8/test/mjsunit/div-mod.js b/deps/v8/test/mjsunit/div-mod.js index 1d352b5566..3e343de1cc 100644 --- a/deps/v8/test/mjsunit/div-mod.js +++ b/deps/v8/test/mjsunit/div-mod.js @@ -169,3 +169,24 @@ function compute_mod(dividend, divisor) { assertEquals(somenum, somenum % -0x40000000, "%minsmi-32"); assertEquals(somenum, somenum % -0x80000000, "%minsmi-64"); })(); + + +// Side-effect-free expressions containing bit operations use +// an optimized compiler with int32 values. Ensure that modulus +// produces negative zeros correctly. +function negative_zero_modulus_test() { + var x = 4; + var y = -4; + x = x + x - x; + y = y + y - y; + var z = (y | y | y | y) % x; + assertEquals(-1 / 0, 1 / z); + z = (x | x | x | x) % x; + assertEquals(1 / 0, 1 / z); + z = (y | y | y | y) % y; + assertEquals(-1 / 0, 1 / z); + z = (x | x | x | x) % y; + assertEquals(1 / 0, 1 / z); +} + +negative_zero_modulus_test(); diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-39160.js b/deps/v8/test/mjsunit/regress/regress-crbug-39160.js new file mode 100644 index 0000000000..a8a8567903 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-crbug-39160.js @@ -0,0 +1,41 @@ +// Copyright 2010 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// See http://crbug.com/39160 + +// To reproduce the bug we need an inlined comparison (i.e. in a loop) where +// the left hand side is known to be a smi (max smi value is 1073741823). This +// test crashes with the bug. +function f(a) { + for (var i = 1073741820; i < 1073741822; i++) { + if (a < i) { + a += i; + } + } +} + +f(5) diff --git a/deps/v8/tools/gyp/v8.gyp b/deps/v8/tools/gyp/v8.gyp index 958a5922a4..e12946e773 100644 --- a/deps/v8/tools/gyp/v8.gyp +++ b/deps/v8/tools/gyp/v8.gyp @@ -309,9 +309,9 @@ '../../src/ic.h', '../../src/interpreter-irregexp.cc', '../../src/interpreter-irregexp.h', + '../../src/jump-target-inl.h', '../../src/jump-target.cc', '../../src/jump-target.h', - '../../src/jump-target-inl.h', '../../src/jsregexp.cc', '../../src/jsregexp.h', '../../src/list-inl.h', @@ -330,7 +330,6 @@ '../../src/messages.cc', '../../src/messages.h', '../../src/natives.h', - '../../src/number-info.h', '../../src/objects-debug.cc', '../../src/objects-inl.h', '../../src/objects.cc', @@ -387,6 +386,8 @@ '../../src/token.h', '../../src/top.cc', '../../src/top.h', + '../../src/type-info-inl.h', + '../../src/type-info.h', '../../src/unicode-inl.h', '../../src/unicode.cc', '../../src/unicode.h', @@ -416,6 +417,10 @@ ], 'sources': [ '../../src/fast-codegen.cc', + '../../src/jump-target-light-inl.h', + '../../src/jump-target-light.cc', + '../../src/virtual-frame-light-inl.h', + '../../src/virtual-frame-light.cc', '../../src/arm/assembler-arm-inl.h', '../../src/arm/assembler-arm.cc', '../../src/arm/assembler-arm.h', @@ -457,6 +462,10 @@ '../../src/ia32', ], 'sources': [ + '../../src/jump-target-heavy-inl.h', + '../../src/jump-target-heavy.cc', + '../../src/virtual-frame-heavy-inl.h', + '../../src/virtual-frame-heavy.cc', '../../src/ia32/assembler-ia32-inl.h', '../../src/ia32/assembler-ia32.cc', '../../src/ia32/assembler-ia32.h', @@ -489,6 +498,10 @@ ], 'sources': [ '../../src/fast-codegen.cc', + '../../src/jump-target-heavy-inl.h', + '../../src/jump-target-heavy.cc', + '../../src/virtual-frame-heavy-inl.h', + '../../src/virtual-frame-heavy.cc', '../../src/x64/assembler-x64-inl.h', '../../src/x64/assembler-x64.cc', '../../src/x64/assembler-x64.h', @@ -573,12 +586,12 @@ '../../src/math.js', '../../src/messages.js', '../../src/apinatives.js', - '../../src/debug-delay.js', - '../../src/liveedit-delay.js', - '../../src/mirror-delay.js', - '../../src/date-delay.js', - '../../src/json-delay.js', - '../../src/regexp-delay.js', + '../../src/debug-debugger.js', + '../../src/mirror-debugger.js', + '../../src/liveedit-debugger.js', + '../../src/date.js', + '../../src/json.js', + '../../src/regexp.js', '../../src/macros.py', ], }, diff --git a/deps/v8/tools/js2c.py b/deps/v8/tools/js2c.py index b889530d5a..64de7d3156 100755 --- a/deps/v8/tools/js2c.py +++ b/deps/v8/tools/js2c.py @@ -220,8 +220,8 @@ namespace internal { } template <> - int NativesCollection<%(type)s>::GetDelayCount() { - return %(delay_count)i; + int NativesCollection<%(type)s>::GetDebuggerCount() { + return %(debugger_count)i; } template <> @@ -252,23 +252,23 @@ SOURCE_DECLARATION = """\ """ -GET_DELAY_INDEX_CASE = """\ +GET_DEBUGGER_INDEX_CASE = """\ if (strcmp(name, "%(id)s") == 0) return %(i)i; """ -GET_DELAY_SCRIPT_SOURCE_CASE = """\ +GET_DEBUGGER_SCRIPT_SOURCE_CASE = """\ if (index == %(i)i) return Vector<const char>(%(id)s, %(length)i); """ -GET_DELAY_SCRIPT_NAME_CASE = """\ +GET_DEBUGGER_SCRIPT_NAME_CASE = """\ if (index == %(i)i) return Vector<const char>("%(name)s", %(length)i); """ def JS2C(source, target, env): ids = [] - delay_ids = [] + debugger_ids = [] modules = [] # Locate the macros file name. consts = {} @@ -287,7 +287,7 @@ def JS2C(source, target, env): source_lines_empty = [] for module in modules: filename = str(module) - delay = filename.endswith('-delay.js') + debugger = filename.endswith('-debugger.js') lines = ReadFile(filename) lines = ExpandConstants(lines, consts) lines = ExpandMacros(lines, macros) @@ -295,29 +295,29 @@ def JS2C(source, target, env): lines = minifier.JSMinify(lines) data = ToCArray(lines) id = (os.path.split(filename)[1])[:-3] - if delay: id = id[:-6] - if delay: - delay_ids.append((id, len(lines))) + if debugger: id = id[:-9] + if debugger: + debugger_ids.append((id, len(lines))) else: ids.append((id, len(lines))) source_lines.append(SOURCE_DECLARATION % { 'id': id, 'data': data }) source_lines_empty.append(SOURCE_DECLARATION % { 'id': id, 'data': data }) - # Build delay support functions + # Build debugger support functions get_index_cases = [ ] get_script_source_cases = [ ] get_script_name_cases = [ ] i = 0 - for (id, length) in delay_ids: + for (id, length) in debugger_ids: native_name = "native %s.js" % id - get_index_cases.append(GET_DELAY_INDEX_CASE % { 'id': id, 'i': i }) - get_script_source_cases.append(GET_DELAY_SCRIPT_SOURCE_CASE % { + get_index_cases.append(GET_DEBUGGER_INDEX_CASE % { 'id': id, 'i': i }) + get_script_source_cases.append(GET_DEBUGGER_SCRIPT_SOURCE_CASE % { 'id': id, 'length': length, 'i': i }) - get_script_name_cases.append(GET_DELAY_SCRIPT_NAME_CASE % { + get_script_name_cases.append(GET_DEBUGGER_SCRIPT_NAME_CASE % { 'name': native_name, 'length': len(native_name), 'i': i @@ -326,13 +326,13 @@ def JS2C(source, target, env): for (id, length) in ids: native_name = "native %s.js" % id - get_index_cases.append(GET_DELAY_INDEX_CASE % { 'id': id, 'i': i }) - get_script_source_cases.append(GET_DELAY_SCRIPT_SOURCE_CASE % { + get_index_cases.append(GET_DEBUGGER_INDEX_CASE % { 'id': id, 'i': i }) + get_script_source_cases.append(GET_DEBUGGER_SCRIPT_SOURCE_CASE % { 'id': id, 'length': length, 'i': i }) - get_script_name_cases.append(GET_DELAY_SCRIPT_NAME_CASE % { + get_script_name_cases.append(GET_DEBUGGER_SCRIPT_NAME_CASE % { 'name': native_name, 'length': len(native_name), 'i': i @@ -342,8 +342,8 @@ def JS2C(source, target, env): # Emit result output = open(str(target[0]), "w") output.write(HEADER_TEMPLATE % { - 'builtin_count': len(ids) + len(delay_ids), - 'delay_count': len(delay_ids), + 'builtin_count': len(ids) + len(debugger_ids), + 'debugger_count': len(debugger_ids), 'source_lines': "\n".join(source_lines), 'get_index_cases': "".join(get_index_cases), 'get_script_source_cases': "".join(get_script_source_cases), @@ -355,8 +355,8 @@ def JS2C(source, target, env): if len(target) > 1: output = open(str(target[1]), "w") output.write(HEADER_TEMPLATE % { - 'builtin_count': len(ids) + len(delay_ids), - 'delay_count': len(delay_ids), + 'builtin_count': len(ids) + len(debugger_ids), + 'debugger_count': len(debugger_ids), 'source_lines': "\n".join(source_lines_empty), 'get_index_cases': "".join(get_index_cases), 'get_script_source_cases': "".join(get_script_source_cases), diff --git a/deps/v8/tools/v8.xcodeproj/project.pbxproj b/deps/v8/tools/v8.xcodeproj/project.pbxproj index 591ba4bf29..0493da693b 100644 --- a/deps/v8/tools/v8.xcodeproj/project.pbxproj +++ b/deps/v8/tools/v8.xcodeproj/project.pbxproj @@ -27,16 +27,20 @@ /* Begin PBXBuildFile section */ 58950D5E0F55519800F3E8BA /* jump-target.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D500F55514900F3E8BA /* jump-target.cc */; }; 58950D5F0F55519D00F3E8BA /* jump-target-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D4F0F55514900F3E8BA /* jump-target-ia32.cc */; }; + 58950D5F0F55519D00F3E8BA /* jump-target-heavy.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D4F0F55514900F3E8BA /* jump-target-heavy.cc */; }; 58950D600F5551A300F3E8BA /* jump-target.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D500F55514900F3E8BA /* jump-target.cc */; }; 58950D610F5551A400F3E8BA /* jump-target-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D4E0F55514900F3E8BA /* jump-target-arm.cc */; }; + 58950D610F5551A400F3E8BA /* jump-target-light.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D4E0F55514900F3E8BA /* jump-target-light.cc */; }; 58950D620F5551AF00F3E8BA /* register-allocator-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D530F55514900F3E8BA /* register-allocator-ia32.cc */; }; 58950D630F5551AF00F3E8BA /* register-allocator.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D540F55514900F3E8BA /* register-allocator.cc */; }; 58950D640F5551B500F3E8BA /* register-allocator.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D540F55514900F3E8BA /* register-allocator.cc */; }; 58950D650F5551B600F3E8BA /* register-allocator-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D520F55514900F3E8BA /* register-allocator-arm.cc */; }; 58950D660F5551C200F3E8BA /* virtual-frame.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D5A0F55514900F3E8BA /* virtual-frame.cc */; }; 58950D670F5551C400F3E8BA /* virtual-frame-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D580F55514900F3E8BA /* virtual-frame-ia32.cc */; }; + 58950D670F5551C400F3E8BA /* virtual-frame-heavy.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D580F55514900F3E8BA /* virtual-frame-heavy.cc */; }; 58950D680F5551CB00F3E8BA /* virtual-frame.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D5A0F55514900F3E8BA /* virtual-frame.cc */; }; 58950D690F5551CE00F3E8BA /* virtual-frame-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D560F55514900F3E8BA /* virtual-frame-arm.cc */; }; + 58950D690F5551CE00F3E8BA /* virtual-frame-light.cc in Sources */ = {isa = PBXBuildFile; fileRef = 58950D560F55514900F3E8BA /* virtual-frame-light.cc */; }; 8900116C0E71CA2300F91F35 /* libraries.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8900116B0E71CA2300F91F35 /* libraries.cc */; }; 890A13FE0EE9C47F00E49346 /* interpreter-irregexp.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89A15C660EE4665300B48DEB /* interpreter-irregexp.cc */; }; 890A14010EE9C4B000E49346 /* regexp-macro-assembler-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89A15C700EE466D000B48DEB /* regexp-macro-assembler-arm.cc */; }; @@ -291,7 +295,9 @@ 22A76C900FF259E600FDC694 /* log-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "log-inl.h"; sourceTree = "<group>"; }; 58242A1E0FA1F14D00BD6F59 /* json-delay.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; path = "json-delay.js"; sourceTree = "<group>"; }; 58950D4E0F55514900F3E8BA /* jump-target-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "jump-target-arm.cc"; path = "arm/jump-target-arm.cc"; sourceTree = "<group>"; }; + 58950D4E0F55514900F3E8BA /* jump-target-light.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "jump-target-light.cc"; path = "jump-target-light.cc"; sourceTree = "<group>"; }; 58950D4F0F55514900F3E8BA /* jump-target-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "jump-target-ia32.cc"; path = "ia32/jump-target-ia32.cc"; sourceTree = "<group>"; }; + 58950D4F0F55514900F3E8BA /* jump-target-heavy.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "jump-target-heavy.cc"; path = "jump-target-heavy.cc"; sourceTree = "<group>"; }; 58950D500F55514900F3E8BA /* jump-target.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "jump-target.cc"; sourceTree = "<group>"; }; 58950D510F55514900F3E8BA /* jump-target.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "jump-target.h"; sourceTree = "<group>"; }; 58950D520F55514900F3E8BA /* register-allocator-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "register-allocator-arm.cc"; path = "arm/register-allocator-arm.cc"; sourceTree = "<group>"; }; @@ -299,8 +305,10 @@ 58950D540F55514900F3E8BA /* register-allocator.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "register-allocator.cc"; sourceTree = "<group>"; }; 58950D550F55514900F3E8BA /* register-allocator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "register-allocator.h"; sourceTree = "<group>"; }; 58950D560F55514900F3E8BA /* virtual-frame-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "virtual-frame-arm.cc"; path = "arm/virtual-frame-arm.cc"; sourceTree = "<group>"; }; + 58950D560F55514900F3E8BA /* virtual-frame-light.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "virtual-frame-light.cc"; path = "virtual-frame-light.cc"; sourceTree = "<group>"; }; 58950D570F55514900F3E8BA /* virtual-frame-arm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "virtual-frame-arm.h"; path = "arm/virtual-frame-arm.h"; sourceTree = "<group>"; }; 58950D580F55514900F3E8BA /* virtual-frame-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "virtual-frame-ia32.cc"; path = "ia32/virtual-frame-ia32.cc"; sourceTree = "<group>"; }; + 58950D580F55514900F3E8BA /* virtual-frame-heavy.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "virtual-frame-heavy.cc"; path = "virtual-frame-heavy.cc"; sourceTree = "<group>"; }; 58950D590F55514900F3E8BA /* virtual-frame-ia32.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "virtual-frame-ia32.h"; path = "ia32/virtual-frame-ia32.h"; sourceTree = "<group>"; }; 58950D5A0F55514900F3E8BA /* virtual-frame.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "virtual-frame.cc"; sourceTree = "<group>"; }; 58950D5B0F55514900F3E8BA /* virtual-frame.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "virtual-frame.h"; sourceTree = "<group>"; }; @@ -321,6 +329,8 @@ 8956B6CD0F5D86570033B5A2 /* debug-agent.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "debug-agent.cc"; sourceTree = "<group>"; }; 8956B6CE0F5D86570033B5A2 /* debug-agent.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "debug-agent.h"; sourceTree = "<group>"; }; 895FA720107FFB15006F39D4 /* jump-target-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "jump-target-inl.h"; sourceTree = "<group>"; }; + 895FA720107FFB15006F39D4 /* jump-target-light-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "jump-target-light-inl.h"; sourceTree = "<group>"; }; + 895FA720107FFB15006F39D4 /* jump-target-heavy-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "jump-target-heavy-inl.h"; sourceTree = "<group>"; }; 895FA725107FFB57006F39D4 /* codegen-ia32-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "codegen-ia32-inl.h"; path = "ia32/codegen-ia32-inl.h"; sourceTree = "<group>"; }; 895FA72A107FFB85006F39D4 /* register-allocator-ia32-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "register-allocator-ia32-inl.h"; path = "ia32/register-allocator-ia32-inl.h"; sourceTree = "<group>"; }; 895FA72B107FFB85006F39D4 /* register-allocator-ia32.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "register-allocator-ia32.h"; path = "ia32/register-allocator-ia32.h"; sourceTree = "<group>"; }; @@ -785,8 +795,12 @@ 897FF14E0E719B8F00D62E90 /* jsregexp.cc */, 897FF14F0E719B8F00D62E90 /* jsregexp.h */, 895FA720107FFB15006F39D4 /* jump-target-inl.h */, + 895FA720107FFB15006F39D4 /* jump-target-heavy-inl.h */, + 895FA720107FFB15006F39D4 /* jump-target-light-inl.h */, 58950D4E0F55514900F3E8BA /* jump-target-arm.cc */, + 58950D4E0F55514900F3E8BA /* jump-target-light.cc */, 58950D4F0F55514900F3E8BA /* jump-target-ia32.cc */, + 58950D4F0F55514900F3E8BA /* jump-target-heavy.cc */, 58950D500F55514900F3E8BA /* jump-target.cc */, 58950D510F55514900F3E8BA /* jump-target.h */, 897FF1500E719B8F00D62E90 /* list-inl.h */, @@ -905,8 +919,10 @@ 897FF32F0FAA0ED200136CF6 /* version.cc */, 897FF3300FAA0ED200136CF6 /* version.h */, 58950D560F55514900F3E8BA /* virtual-frame-arm.cc */, + 58950D560F55514900F3E8BA /* virtual-frame-light.cc */, 58950D570F55514900F3E8BA /* virtual-frame-arm.h */, 58950D580F55514900F3E8BA /* virtual-frame-ia32.cc */, + 58950D580F55514900F3E8BA /* virtual-frame-heavy.cc */, 58950D590F55514900F3E8BA /* virtual-frame-ia32.h */, 58950D5A0F55514900F3E8BA /* virtual-frame.cc */, 58950D5B0F55514900F3E8BA /* virtual-frame.h */, @@ -1213,6 +1229,7 @@ 89A88E0E0E71A66F0043BA31 /* jsregexp.cc in Sources */, 58950D5E0F55519800F3E8BA /* jump-target.cc in Sources */, 58950D5F0F55519D00F3E8BA /* jump-target-ia32.cc in Sources */, + 58950D5F0F55519D00F3E8BA /* jump-target-heavy.cc in Sources */, 8900116C0E71CA2300F91F35 /* libraries.cc in Sources */, 89A88E0F0E71A6740043BA31 /* log.cc in Sources */, 89A88E100E71A6770043BA31 /* macro-assembler-ia32.cc in Sources */, @@ -1256,6 +1273,7 @@ 89A88E2D0E71A6D50043BA31 /* variables.cc in Sources */, 89B933AF0FAA0F9600201304 /* version.cc in Sources */, 58950D660F5551C200F3E8BA /* virtual-frame.cc in Sources */, + 58950D660F5551C200F3E8BA /* virtual-frame-heavy.cc in Sources */, 58950D670F5551C400F3E8BA /* virtual-frame-ia32.cc in Sources */, 89A88E2E0E71A6D60043BA31 /* zone.cc in Sources */, 9F4B7B890FCC877A00DC4117 /* log-utils.cc in Sources */, @@ -1324,6 +1342,7 @@ 89F23C610E78D5B2006B2466 /* jsregexp.cc in Sources */, 58950D600F5551A300F3E8BA /* jump-target.cc in Sources */, 58950D610F5551A400F3E8BA /* jump-target-arm.cc in Sources */, + 58950D610F5551A400F3E8BA /* jump-target-light.cc in Sources */, 89F23C620E78D5B2006B2466 /* libraries.cc in Sources */, 89F23C630E78D5B2006B2466 /* log.cc in Sources */, 89F23C9E0E78D5FD006B2466 /* macro-assembler-arm.cc in Sources */, @@ -1368,6 +1387,7 @@ 89F23C810E78D5B2006B2466 /* variables.cc in Sources */, 89B933B00FAA0F9D00201304 /* version.cc in Sources */, 58950D680F5551CB00F3E8BA /* virtual-frame.cc in Sources */, + 58950D680F5551CB00F3E8BA /* virtual-frame-light.cc in Sources */, 58950D690F5551CE00F3E8BA /* virtual-frame-arm.cc in Sources */, 89F23C820E78D5B2006B2466 /* zone.cc in Sources */, 9F4B7B8A0FCC877A00DC4117 /* log-utils.cc in Sources */, diff --git a/deps/v8/tools/visual_studio/js2c.cmd b/deps/v8/tools/visual_studio/js2c.cmd index 54b1bfb552..82722ffdb9 100644 --- a/deps/v8/tools/visual_studio/js2c.cmd +++ b/deps/v8/tools/visual_studio/js2c.cmd @@ -3,4 +3,4 @@ set SOURCE_DIR=%1 set TARGET_DIR=%2 set PYTHON="..\..\..\third_party\python_24\python.exe" if not exist %PYTHON% set PYTHON=python.exe -%PYTHON% ..\js2c.py %TARGET_DIR%\natives.cc %TARGET_DIR%\natives-empty.cc CORE %SOURCE_DIR%\macros.py %SOURCE_DIR%\runtime.js %SOURCE_DIR%\v8natives.js %SOURCE_DIR%\array.js %SOURCE_DIR%\string.js %SOURCE_DIR%\uri.js %SOURCE_DIR%\math.js %SOURCE_DIR%\messages.js %SOURCE_DIR%\apinatives.js %SOURCE_DIR%\debug-delay.js %SOURCE_DIR%\liveedit-delay.js %SOURCE_DIR%\mirror-delay.js %SOURCE_DIR%\date-delay.js %SOURCE_DIR%\regexp-delay.js %SOURCE_DIR%\json-delay.js +%PYTHON% ..\js2c.py %TARGET_DIR%\natives.cc %TARGET_DIR%\natives-empty.cc CORE %SOURCE_DIR%\macros.py %SOURCE_DIR%\runtime.js %SOURCE_DIR%\v8natives.js %SOURCE_DIR%\array.js %SOURCE_DIR%\string.js %SOURCE_DIR%\uri.js %SOURCE_DIR%\math.js %SOURCE_DIR%\messages.js %SOURCE_DIR%\apinatives.js %SOURCE_DIR%\debug-debugger.js %SOURCE_DIR%\liveedit-debugger.js %SOURCE_DIR%\mirror-debugger.js %SOURCE_DIR%\date.js %SOURCE_DIR%\regexp.js %SOURCE_DIR%\json.js diff --git a/deps/v8/tools/visual_studio/v8.vcproj b/deps/v8/tools/visual_studio/v8.vcproj index 3122c6d598..30b488f44f 100644 --- a/deps/v8/tools/visual_studio/v8.vcproj +++ b/deps/v8/tools/visual_studio/v8.vcproj @@ -135,15 +135,15 @@ > </File> <File - RelativePath="..\..\src\date-delay.js" + RelativePath="..\..\src\date.js" > </File> <File - RelativePath="..\..\src\debug-delay.js" + RelativePath="..\..\src\debug-debugger.js" > </File> <File - RelativePath="..\..\src\liveedit-delay.js" + RelativePath="..\..\src\liveedit-debugger.js" > </File> <File @@ -159,15 +159,15 @@ > </File> <File - RelativePath="..\..\src\mirror-delay.js" + RelativePath="..\..\src\mirror-debugger.js" > </File> <File - RelativePath="..\..\src\regexp-delay.js" + RelativePath="..\..\src\regexp.js" > </File> <File - RelativePath="..\..\src\json-delay.js" + RelativePath="..\..\src\json.js" > </File> <File @@ -192,7 +192,7 @@ Name="VCCustomBuildTool" Description="Processing js files..." CommandLine=".\js2c.cmd ..\..\src "$(IntDir)\DerivedSources"" - AdditionalDependencies="..\..\src\macros.py;..\..\src\runtime.js;..\..\src\v8natives.js;..\..\src\array.js;..\..\src\string.js;..\..\src\uri.js;..\..\src\math.js;..\..\src\messages.js;..\..\src\apinatives.js;..\..\src\debug-delay.js;..\..\src\mirror-delay.js;..\..\src\date-delay.js;..\..\src\regexp-delay.js;..\..\src\json-delay.js" + AdditionalDependencies="..\..\src\macros.py;..\..\src\runtime.js;..\..\src\v8natives.js;..\..\src\array.js;..\..\src\string.js;..\..\src\uri.js;..\..\src\math.js;..\..\src\messages.js;..\..\src\apinatives.js;..\..\src\debug-debugger.js;..\..\src\mirror-debugger.js;..\..\src\liveedit-debugger.js;..\..\src\date.js;..\..\src\regexp.js;..\..\src\json.js" Outputs="$(IntDir)\DerivedSources\natives.cc;$(IntDir)\DerivedSources\natives-empty.cc" /> </FileConfiguration> @@ -203,7 +203,7 @@ Name="VCCustomBuildTool" Description="Processing js files..." CommandLine=".\js2c.cmd ..\..\src "$(IntDir)\DerivedSources"" - AdditionalDependencies="..\..\src\macros.py;..\..\src\runtime.js;..\..\src\v8natives.js;..\..\src\array.js;..\..\src\string.js;..\..\src\uri.js;..\..\src\math.js;..\..\src\messages.js;..\..\src\apinatives.js;..\..\src\debug-delay.js;..\..\src\mirror-delay.js;..\..\src\date-delay.js;..\..\src\regexp-delay.js;..\..\src\json-delay.js" + AdditionalDependencies="..\..\src\macros.py;..\..\src\runtime.js;..\..\src\v8natives.js;..\..\src\array.js;..\..\src\string.js;..\..\src\uri.js;..\..\src\math.js;..\..\src\messages.js;..\..\src\apinatives.js;..\..\src\debug-debugger.js;..\..\src\mirror-debugger.js;..\..\src\liveedit-debugger.js;..\..\src\date.js;..\..\src\regexp.js;..\..\src\json.js" Outputs="$(IntDir)\DerivedSources\natives.cc;$(IntDir)\DerivedSources\natives-empty.cc" /> </FileConfiguration> diff --git a/deps/v8/tools/visual_studio/v8_arm.vcproj b/deps/v8/tools/visual_studio/v8_arm.vcproj index cb7519bdc7..cdba58e3c3 100644 --- a/deps/v8/tools/visual_studio/v8_arm.vcproj +++ b/deps/v8/tools/visual_studio/v8_arm.vcproj @@ -135,15 +135,15 @@ >
</File>
<File
- RelativePath="..\..\src\date-delay.js"
+ RelativePath="..\..\src\date.js"
>
</File>
<File
- RelativePath="..\..\src\debug-delay.js"
+ RelativePath="..\..\src\debug-debugger.js"
>
</File>
<File
- RelativePath="..\..\src\liveedit-delay.js"
+ RelativePath="..\..\src\liveedit-debugger.js"
>
</File>
<File
@@ -159,15 +159,15 @@ >
</File>
<File
- RelativePath="..\..\src\mirror-delay.js"
+ RelativePath="..\..\src\mirror-debugger.js"
>
</File>
<File
- RelativePath="..\..\src\regexp-delay.js"
+ RelativePath="..\..\src\regexp.js"
>
</File>
<File
- RelativePath="..\..\src\json-delay.js"
+ RelativePath="..\..\src\json.js"
>
</File>
<File
@@ -192,7 +192,7 @@ Name="VCCustomBuildTool"
Description="Processing js files..."
CommandLine=".\js2c.cmd ..\..\src "$(IntDir)\DerivedSources""
- AdditionalDependencies="..\..\src\macros.py;..\..\src\runtime.js;..\..\src\v8natives.js;..\..\src\array.js;..\..\src\string.js;..\..\src\uri.js;..\..\src\math.js;..\..\src\messages.js;..\..\src\apinatives.js;..\..\src\debug-delay.js;..\..\src\mirror-delay.js;..\..\src\date-delay.js;..\..\src\regexp-delay.js;..\..\src\json-delay.js"
+ AdditionalDependencies="..\..\src\macros.py;..\..\src\runtime.js;..\..\src\v8natives.js;..\..\src\array.js;..\..\src\string.js;..\..\src\uri.js;..\..\src\math.js;..\..\src\messages.js;..\..\src\apinatives.js;..\..\src\debug-debugger.js;..\..\src\mirror-debugger.js;..\..\src\liveedit-debugger.js;..\..\src\date.js;..\..\src\regexp.js;..\..\src\json.js"
Outputs="$(IntDir)\DerivedSources\natives.cc;$(IntDir)\DerivedSources\natives-empty.cc"
/>
</FileConfiguration>
@@ -203,7 +203,7 @@ Name="VCCustomBuildTool"
Description="Processing js files..."
CommandLine=".\js2c.cmd ..\..\src "$(IntDir)\DerivedSources""
- AdditionalDependencies="..\..\src\macros.py;..\..\src\runtime.js;..\..\src\v8natives.js;..\..\src\array.js;..\..\src\string.js;..\..\src\uri.js;..\..\src\math.js;..\..\src\messages.js;..\..\src\apinatives.js;..\..\src\debug-delay.js;..\..\src\mirror-delay.js;..\..\src\date-delay.js;..\..\src\regexp-delay.js;..\..\src\json-delay.js"
+ AdditionalDependencies="..\..\src\macros.py;..\..\src\runtime.js;..\..\src\v8natives.js;..\..\src\array.js;..\..\src\string.js;..\..\src\uri.js;..\..\src\math.js;..\..\src\messages.js;..\..\src\apinatives.js;..\..\src\debug-debugger.js;..\..\src\mirror-debugger.js;..\..\src\liveedit-debugger.js;..\..\src\date.js;..\..\src\regexp.js;..\..\src\json.js"
Outputs="$(IntDir)\DerivedSources\natives.cc;$(IntDir)\DerivedSources\natives-empty.cc"
/>
</FileConfiguration>
diff --git a/deps/v8/tools/visual_studio/v8_base.vcproj b/deps/v8/tools/visual_studio/v8_base.vcproj index 1d6d605325..fe4d863665 100644 --- a/deps/v8/tools/visual_studio/v8_base.vcproj +++ b/deps/v8/tools/visual_studio/v8_base.vcproj @@ -237,6 +237,10 @@ > </File> <File + RelativePath="..\..\src\cached-powers.h" + > + </File> + <File RelativePath="..\..\src\char-predicates-inl.h" > </File> @@ -405,6 +409,18 @@ > </File> <File + RelativePath="..\..\src\diy-fp.cc" + > + </File> + <File + RelativePath="..\..\src\diy-fp.h" + > + </File> + <File + RelativePath="..\..\src\double.h" + > + </File> + <File RelativePath="..\..\src\execution.cc" > </File> @@ -433,6 +449,14 @@ > </File> <File + RelativePath="..\..\src\fast-dtoa.cc" + > + </File> + <File + RelativePath="..\..\src\fast-dtoa.h" + > + </File> + <File RelativePath="..\..\src\flags.cc" > </File> @@ -577,6 +601,10 @@ > </File> <File + RelativePath="..\..\src\jump-target-heavy-inl.h" + > + </File> + <File RelativePath="..\..\src\jump-target.cc" > </File> @@ -585,6 +613,10 @@ > </File> <File + RelativePath="..\..\src\jump-target-heavy.cc" + > + </File> + <File RelativePath="..\..\src\jsregexp.cc" > </File> @@ -665,10 +697,6 @@ > </File> <File - RelativePath="..\..\src\number-info.h" - > - </File> - <File RelativePath="..\..\src\objects-debug.cc" > <FileConfiguration @@ -909,6 +937,14 @@ > </File> <File + RelativePath="..\..\src\type-info-inl.h" + > + </File> + <File + RelativePath="..\..\src\type-info.h" + > + </File> + <File RelativePath="..\..\src\unicode-inl.h" > </File> @@ -969,6 +1005,10 @@ > </File> <File + RelativePath="..\..\src\virtual-frame-heavy-inl.h" + > + </File> + <File RelativePath="..\..\src\virtual-frame.h" > </File> @@ -985,6 +1025,10 @@ > </File> <File + RelativePath="..\..\src\virtual-frame-heavy.cc" + > + </File> + <File RelativePath="..\..\src\zone-inl.h" > </File> diff --git a/deps/v8/tools/visual_studio/v8_base_arm.vcproj b/deps/v8/tools/visual_studio/v8_base_arm.vcproj index 9043d58acc..2fc45133a7 100644 --- a/deps/v8/tools/visual_studio/v8_base_arm.vcproj +++ b/deps/v8/tools/visual_studio/v8_base_arm.vcproj @@ -581,6 +581,14 @@ > </File> <File + RelativePath="..\..\src\jump-target-inl.h" + > + </File> + <File + RelativePath="..\..\src\jump-target-light-inl.h" + > + </File> + <File RelativePath="..\..\src\jump-target.cc" > </File> @@ -589,6 +597,10 @@ > </File> <File + RelativePath="..\..\src\jump-target-light.cc" + > + </File> + <File RelativePath="..\..\src\jsregexp.cc" > </File> @@ -669,10 +681,6 @@ > </File> <File - RelativePath="..\..\src\number-info.h" - > - </File> - <File RelativePath="..\..\src\objects-debug.cc" > <FileConfiguration @@ -921,6 +929,14 @@ > </File> <File + RelativePath="..\..\src\type-info-inl.h" + > + </File> + <File + RelativePath="..\..\src\type-info.h" + > + </File> + <File RelativePath="..\..\src\unicode-inl.h" > </File> @@ -981,6 +997,10 @@ > </File> <File + RelativePath="..\..\src\virtual-frame-light-inl.h" + > + </File> + <File RelativePath="..\..\src\virtual-frame.h" > </File> @@ -997,6 +1017,10 @@ > </File> <File + RelativePath="..\..\src\virtual-frame-light.cc" + > + </File> + <File RelativePath="..\..\src\zone-inl.h" > </File> diff --git a/deps/v8/tools/visual_studio/v8_base_x64.vcproj b/deps/v8/tools/visual_studio/v8_base_x64.vcproj index 30437530a9..0adf24a90c 100644 --- a/deps/v8/tools/visual_studio/v8_base_x64.vcproj +++ b/deps/v8/tools/visual_studio/v8_base_x64.vcproj @@ -578,6 +578,10 @@ > </File> <File + RelativePath="..\..\src\jump-target-heavy-inl.h" + > + </File> + <File RelativePath="..\..\src\jump-target.cc" > </File> @@ -586,6 +590,10 @@ > </File> <File + RelativePath="..\..\src\jump-target-heavy.cc" + > + </File> + <File RelativePath="..\..\src\jsregexp.cc" > </File> @@ -666,10 +674,6 @@ > </File> <File - RelativePath="..\..\src\number-info.h" - > - </File> - <File RelativePath="..\..\src\objects-debug.cc" > <FileConfiguration @@ -910,6 +914,14 @@ > </File> <File + RelativePath="..\..\src\type-info-inl.h" + > + </File> + <File + RelativePath="..\..\src\type-info.h" + > + </File> + <File RelativePath="..\..\src\unicode-inl.h" > </File> @@ -970,6 +982,10 @@ > </File> <File + RelativePath="..\..\src\virtual-frame-heavy-inl.h" + > + </File> + <File RelativePath="..\..\src\virtual-frame.h" > </File> @@ -986,6 +1002,10 @@ > </File> <File + RelativePath="..\..\src\virtual-frame-heavy.cc" + > + </File> + <File RelativePath="..\..\src\zone-inl.h" > </File> diff --git a/deps/v8/tools/visual_studio/v8_x64.vcproj b/deps/v8/tools/visual_studio/v8_x64.vcproj index a476d7dca7..5ffd291480 100644 --- a/deps/v8/tools/visual_studio/v8_x64.vcproj +++ b/deps/v8/tools/visual_studio/v8_x64.vcproj @@ -135,15 +135,15 @@ > </File> <File - RelativePath="..\..\src\date-delay.js" + RelativePath="..\..\src\date.js" > </File> <File - RelativePath="..\..\src\debug-delay.js" + RelativePath="..\..\src\debug-debugger.js" > </File> <File - RelativePath="..\..\src\liveedit-delay.js" + RelativePath="..\..\src\liveedit-debugger.js" > </File> <File @@ -159,15 +159,15 @@ > </File> <File - RelativePath="..\..\src\mirror-delay.js" + RelativePath="..\..\src\mirror-debugger.js" > </File> <File - RelativePath="..\..\src\regexp-delay.js" + RelativePath="..\..\src\regexp.js" > </File> <File - RelativePath="..\..\src\json-delay.js" + RelativePath="..\..\src\json.js" > </File> <File @@ -192,7 +192,7 @@ Name="VCCustomBuildTool" Description="Processing js files..." CommandLine=".\js2c.cmd ..\..\src "$(IntDir)\DerivedSources"" - AdditionalDependencies="..\..\src\macros.py;..\..\src\runtime.js;..\..\src\v8natives.js;..\..\src\array.js;..\..\src\string.js;..\..\src\uri.js;..\..\src\math.js;..\..\src\messages.js;..\..\src\apinatives.js;..\..\src\debug-delay.js;..\..\src\mirror-delay.js;..\..\src\date-delay.js;..\..\src\regexp-delay.js;..\..\src\json-delay.js" + AdditionalDependencies="..\..\src\macros.py;..\..\src\runtime.js;..\..\src\v8natives.js;..\..\src\array.js;..\..\src\string.js;..\..\src\uri.js;..\..\src\math.js;..\..\src\messages.js;..\..\src\apinatives.js;..\..\src\debug-debugger.js;..\..\src\mirror-debugger.js;..\..\src\liveedit-debugger.js;..\..\src\date.js;..\..\src\regexp.js;..\..\src\json.js" Outputs="$(IntDir)\DerivedSources\natives.cc;$(IntDir)\DerivedSources\natives-empty.cc" /> </FileConfiguration> @@ -203,7 +203,7 @@ Name="VCCustomBuildTool" Description="Processing js files..." CommandLine=".\js2c.cmd ..\..\src "$(IntDir)\DerivedSources"" - AdditionalDependencies="..\..\src\macros.py;..\..\src\runtime.js;..\..\src\v8natives.js;..\..\src\array.js;..\..\src\string.js;..\..\src\uri.js;..\..\src\math.js;..\..\src\messages.js;..\..\src\apinatives.js;..\..\src\debug-delay.js;..\..\src\mirror-delay.js;..\..\src\date-delay.js;..\..\src\regexp-delay.js;..\..\src\json-delay.js" + AdditionalDependencies="..\..\src\macros.py;..\..\src\runtime.js;..\..\src\v8natives.js;..\..\src\array.js;..\..\src\string.js;..\..\src\uri.js;..\..\src\math.js;..\..\src\messages.js;..\..\src\apinatives.js;..\..\src\debug-debugger.js;..\..\src\mirror-debugger.js;..\..\src\liveedit-debugger.js;..\..\src\date.js;..\..\src\regexp.js;..\..\src\json.js" Outputs="$(IntDir)\DerivedSources\natives.cc;$(IntDir)\DerivedSources\natives-empty.cc" /> </FileConfiguration> |