diff options
124 files changed, 7951 insertions, 2392 deletions
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog index 83ebc02530..a932e0bc30 100644 --- a/deps/v8/ChangeLog +++ b/deps/v8/ChangeLog @@ -1,3 +1,50 @@ +2009-07-30: Version 1.3.1 + + Speed improvements to accessors and interceptors. + + Added support for capturing stack information on custom errors. + + Added support for morphing an object into a pixel array where its + indexed properties are stored in an external byte array. Values written + are always clamped to the 0..255 interval. + + Profiler on x64 now handles C/C++ functions from shared libraries. + + Changed the debugger to avoid stepping into function.call/apply if the + function is a built-in. + + Initial implementation of constructor heap profile for JS objects. + + More fine grained control of profiling aspects through the API. + + Optimized the called as constructor check for API calls. + + +2009-07-27: Version 1.3.0 + + Allowed RegExp objects to be called as functions (issue 132). + + Fixed issue where global property cells would escape after + detaching the global object; see http://crbug.com/16276. + + Added support for stepping into setters and getters in the + debugger. + + Changed the debugger to avoid stopping in its own JavaScript code + and in the code of built-in functions. + + Fixed issue 345 by avoiding duplicate escaping labels. + + Fixed ARM code generator crash in short-circuited boolean + expressions and added regression tests. + + Added an external allocation limit to avoid issues where small V8 + objects would hold on to large amounts of external memory without + causing garbage collections. + + Finished more of the inline caching stubs for x64 targets. + + 2009-07-13: Version 1.2.14 Added separate paged heap space for global property cells and diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct index 78b050d735..dbcd616862 100644 --- a/deps/v8/SConstruct +++ b/deps/v8/SConstruct @@ -149,31 +149,22 @@ LIBRARY_FLAGS = { '-Wstrict-aliasing=2'], 'CPPPATH': ANDROID_INCLUDES, }, - 'wordsize:32': { - 'arch:x64': { - 'CCFLAGS': ['-m64'], - 'LINKFLAGS': ['-m64'] - } - }, - 'wordsize:64': { - 'arch:ia32': { - 'CCFLAGS': ['-m32'], - 'LINKFLAGS': ['-m32'] - }, - 'arch:arm': { - 'CCFLAGS': ['-m32'], - 'LINKFLAGS': ['-m32'] - } - }, 'arch:ia32': { - 'CPPDEFINES': ['V8_TARGET_ARCH_IA32'] + 'CPPDEFINES': ['V8_TARGET_ARCH_IA32'], + 'CCFLAGS': ['-m32'], + 'LINKFLAGS': ['-m32'] }, 'arch:arm': { 'CPPDEFINES': ['V8_TARGET_ARCH_ARM'] }, + 'simulator:arm': { + 'CCFLAGS': ['-m32'], + 'LINKFLAGS': ['-m32'] + }, 'arch:x64': { - 'CCFLAGS': ['-fno-strict-aliasing'], - 'CPPDEFINES': ['V8_TARGET_ARCH_X64'] + 'CPPDEFINES': ['V8_TARGET_ARCH_X64'], + 'CCFLAGS': ['-fno-strict-aliasing', '-m64'], + 'LINKFLAGS': ['-m64'], }, 'prof:oprofile': { 'CPPDEFINES': ['ENABLE_OPROFILE_AGENT'] @@ -341,22 +332,6 @@ CCTEST_EXTRA_FLAGS = { 'CPPDEFINES': ['SK_RELEASE', 'NDEBUG'] } }, - 'wordsize:32': { - 'arch:x64': { - 'CCFLAGS': ['-m64'], - 'LINKFLAGS': ['-m64'] - } - }, - 'wordsize:64': { - 'arch:ia32': { - 'CCFLAGS': ['-m32'], - 'LINKFLAGS': ['-m32'] - }, - 'arch:arm': { - 'CCFLAGS': ['-m32'], - 'LINKFLAGS': ['-m32'] - } - } }, 'msvc': { 'all': { @@ -408,21 +383,17 @@ SAMPLE_FLAGS = { 'CPPDEFINES': ['SK_RELEASE', 'NDEBUG'] } }, - 'wordsize:32': { - 'arch:x64': { - 'CCFLAGS': ['-m64'], - 'LINKFLAGS': ['-m64'] - } + 'arch:ia32': { + 'CCFLAGS': ['-m32'], + 'LINKFLAGS': ['-m32'] }, - 'wordsize:64': { - 'arch:ia32': { - 'CCFLAGS': ['-m32'], - 'LINKFLAGS': ['-m32'] - }, - 'arch:arm': { - 'CCFLAGS': ['-m32'], - 'LINKFLAGS': ['-m32'] - } + 'arch:x64': { + 'CCFLAGS': ['-m64'], + 'LINKFLAGS': ['-m64'] + }, + 'simulator:arm': { + 'CCFLAGS': ['-m32'], + 'LINKFLAGS': ['-m32'] }, 'mode:release': { 'CCFLAGS': ['-O2'] @@ -533,7 +504,6 @@ def GuessToolchain(os): OS_GUESS = utils.GuessOS() TOOLCHAIN_GUESS = GuessToolchain(OS_GUESS) ARCH_GUESS = utils.GuessArchitecture() -WORDSIZE_GUESS = utils.GuessWordsize() SIMPLE_OPTIONS = { @@ -587,11 +557,6 @@ SIMPLE_OPTIONS = { 'default': 'on', 'help': 'use Microsoft Visual C++ link-time code generation' }, - 'wordsize': { - 'values': ['64', '32'], - 'default': WORDSIZE_GUESS, - 'help': 'the word size' - }, 'simulator': { 'values': ['arm', 'none'], 'default': 'none', diff --git a/deps/v8/benchmarks/run.html b/deps/v8/benchmarks/run.html index 050764e013..ef2c186412 100755..100644 --- a/deps/v8/benchmarks/run.html +++ b/deps/v8/benchmarks/run.html @@ -55,9 +55,35 @@ function Run() { NotifyScore: AddScore }); } +function ShowWarningIfObsolete() { + // If anything goes wrong we will just catch the exception and no + // warning is shown, i.e., no harm is done. + try { + var xmlhttp; + var next_version = parseInt(BenchmarkSuite.version) + 1; + var next_version_url = "../v" + next_version + "/run.html"; + if (window.XMLHttpRequest) { + xmlhttp = new window.XMLHttpRequest(); + } else if (window.ActiveXObject) { + xmlhttp = new window.ActiveXObject("Microsoft.XMLHTTP"); + } + xmlhttp.open('GET', next_version_url, true); + xmlhttp.onreadystatechange = function() { + if (xmlhttp.readyState == 4 && xmlhttp.status == 200) { + document.getElementById('obsolete').style.display="block"; + } + }; + xmlhttp.send(null); + } catch(e) { + // Ignore exception if check for next version fails. + // Hence no warning is displayed. + } +} + function Load() { var version = BenchmarkSuite.version; document.getElementById("version").innerHTML = version; + ShowWarningIfObsolete(); setTimeout(Run, 200); } </script> @@ -65,6 +91,12 @@ function Load() { <body onload="Load()"> <div> <div class="title"><h1>V8 Benchmark Suite - version <span id="version">?</span></h1></div> + <div class="warning" id="obsolete"> +Warning! This is not the latest version of the V8 benchmark +suite. Consider running the +<a href="http://v8.googlecode.com/svn/data/benchmarks/current/run.html"> +latest version</a>. + </div> <table> <tr> <td class="contents"> diff --git a/deps/v8/benchmarks/style.css b/deps/v8/benchmarks/style.css index 46320c1ebe..d9f4dbfc0c 100755..100644 --- a/deps/v8/benchmarks/style.css +++ b/deps/v8/benchmarks/style.css @@ -55,6 +55,15 @@ div.run { border: 1px solid rgb(51, 102, 204); } +div.warning { + background: #ffffd9; + border: 1px solid #d2d26a; + display: none; + margin: 1em 0 2em; + padding: 8px; + text-align: center; +} + #status { text-align: center; margin-top: 50px; diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index 8f22c81b65..5e3dbffb68 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -180,7 +180,7 @@ template <class T> class V8EXPORT_INLINE Handle { /** * Creates an empty handle. */ - Handle(); + inline Handle(); /** * Creates a new handle for the specified value. @@ -264,7 +264,7 @@ template <class T> class V8EXPORT_INLINE Handle { */ template <class T> class V8EXPORT_INLINE Local : public Handle<T> { public: - Local(); + inline Local(); template <class S> inline Local(Local<S> that) : Handle<T>(reinterpret_cast<T*>(*that)) { /** @@ -284,7 +284,7 @@ template <class T> class V8EXPORT_INLINE Local : public Handle<T> { * The referee is kept alive by the local handle even when * the original handle is destroyed/disposed. */ - static Local<T> New(Handle<T> that); + inline static Local<T> New(Handle<T> that); }; @@ -312,7 +312,7 @@ template <class T> class V8EXPORT_INLINE Persistent : public Handle<T> { * Creates an empty persistent handle that doesn't point to any * storage cell. */ - Persistent(); + inline Persistent(); /** * Creates a persistent handle for the same storage cell as the @@ -353,7 +353,7 @@ template <class T> class V8EXPORT_INLINE Persistent : public Handle<T> { * Creates a new persistent handle for an existing local or * persistent handle. */ - static Persistent<T> New(Handle<T> that); + inline static Persistent<T> New(Handle<T> that); /** * Releases the storage cell referenced by this persistent handle. @@ -361,7 +361,7 @@ template <class T> class V8EXPORT_INLINE Persistent : public Handle<T> { * This handle's reference, and any any other references to the storage * cell remain and IsEmpty will still return false. */ - void Dispose(); + inline void Dispose(); /** * Make the reference to this object weak. When only weak handles @@ -369,20 +369,20 @@ template <class T> class V8EXPORT_INLINE Persistent : public Handle<T> { * callback to the given V8::WeakReferenceCallback function, passing * it the object reference and the given parameters. */ - void MakeWeak(void* parameters, WeakReferenceCallback callback); + inline void MakeWeak(void* parameters, WeakReferenceCallback callback); /** Clears the weak reference to this object.*/ - void ClearWeak(); + inline void ClearWeak(); /** *Checks if the handle holds the only reference to an object. */ - bool IsNearDeath() const; + inline bool IsNearDeath() const; /** * Returns true if the handle's reference is weak. */ - bool IsWeak() const; + inline bool IsWeak() const; private: friend class ImplementationUtilities; @@ -1113,6 +1113,13 @@ class V8EXPORT Object : public Value { /** Sets the value in an internal field. */ void SetInternalField(int index, Handle<Value> value); + // The two functions below do not perform index bounds checks and + // they do not check that the VM is still running. Use with caution. + /** Gets a native pointer from an internal field. */ + void* GetPointerFromInternalField(int index); + /** Sets a native pointer in an internal field. */ + void SetPointerInInternalField(int index, void* value); + // Testers for local properties. bool HasRealNamedProperty(Handle<String> key); bool HasRealIndexedProperty(uint32_t index); @@ -1162,6 +1169,15 @@ class V8EXPORT Object : public Value { */ Local<Object> Clone(); + /** + * Set the backing store of the indexed properties to be managed by the + * embedding layer. Access to the indexed properties will follow the rules + * spelled out in CanvasPixelArray. + * Note: The embedding program still owns the data and needs to ensure that + * the backing store is preserved while V8 has a reference. + */ + void SetIndexedPropertiesToPixelData(uint8_t* data, int length); + static Local<Object> New(); static Object* Cast(Value* obj); private: @@ -1951,6 +1967,20 @@ typedef Persistent<Context> (*ContextGenerator)(); /** + * Profiler modules. + * + * In V8, profiler consists of several modules: CPU profiler, and different + * kinds of heap profiling. Each can be turned on / off independently. + */ +enum ProfilerModules { + PROFILER_MODULE_NONE = 0, + PROFILER_MODULE_CPU = 1, + PROFILER_MODULE_HEAP_STATS = 1 << 1, + PROFILER_MODULE_JS_CONSTRUCTORS = 1 << 2 +}; + + +/** * Container class for static utility functions. */ class V8EXPORT V8 { @@ -2104,6 +2134,32 @@ class V8EXPORT V8 { static bool IsProfilerPaused(); /** + * Resumes specified profiler modules. + * "ResumeProfiler" is equivalent to "ResumeProfilerEx(PROFILER_MODULE_CPU)". + * See ProfilerModules enum. + * + * \param flags Flags specifying profiler modules. + */ + static void ResumeProfilerEx(int flags); + + /** + * Pauses specified profiler modules. + * "PauseProfiler" is equivalent to "PauseProfilerEx(PROFILER_MODULE_CPU)". + * See ProfilerModules enum. + * + * \param flags Flags specifying profiler modules. + */ + static void PauseProfilerEx(int flags); + + /** + * Returns active (resumed) profiler modules. + * See ProfilerModules enum. + * + * \returns active profiler modules. + */ + static int GetActiveProfilerModules(); + + /** * If logging is performed into a memory buffer (via --logfile=*), allows to * retrieve previously written messages. This can be used for retrieving * profiler log data in the application. This function is thread-safe. diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript index f1ca8753c2..f9f9634081 100755 --- a/deps/v8/src/SConscript +++ b/deps/v8/src/SConscript @@ -40,7 +40,7 @@ SOURCES = { 'codegen.cc', 'compilation-cache.cc', 'compiler.cc', 'contexts.cc', 'conversions.cc', 'counters.cc', 'dateparser.cc', 'debug.cc', 'debug-agent.cc', 'disassembler.cc', 'execution.cc', 'factory.cc', - 'flags.cc', 'frames.cc', 'func-name-inferrer.cc', + 'flags.cc', 'frame-element.cc', 'frames.cc', 'func-name-inferrer.cc', 'global-handles.cc', 'handles.cc', 'hashmap.cc', 'heap.cc', 'ic.cc', 'interpreter-irregexp.cc', 'jsregexp.cc', 'jump-target.cc', 'log.cc', 'log-utils.cc', 'mark-compact.cc', 'messages.cc', diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index b9e0cec8b6..08281012bd 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -1085,8 +1085,9 @@ Local<Script> Script::Compile(v8::Handle<String> source, // handle it if it turns out not to be in release mode. ASSERT(pre_data == NULL || pre_data->SanityCheck()); // If the pre-data isn't sane we simply ignore it - if (pre_data != NULL && !pre_data->SanityCheck()) + if (pre_data != NULL && !pre_data->SanityCheck()) { pre_data = NULL; + } i::Handle<i::JSFunction> boilerplate = i::Compiler::Compile(str, name_obj, line_offset, @@ -2193,6 +2194,25 @@ bool v8::Object::DeleteHiddenValue(v8::Handle<v8::String> key) { } +void v8::Object::SetIndexedPropertiesToPixelData(uint8_t* data, int length) { + ON_BAILOUT("v8::SetElementsToPixelData()", return); + ENTER_V8; + if (!ApiCheck(i::Smi::IsValid(length), + "v8::Object::SetIndexedPropertiesToPixelData()", + "length exceeds max acceptable value")) { + return; + } + i::Handle<i::JSObject> self = Utils::OpenHandle(this); + if (!ApiCheck(!self->IsJSArray(), + "v8::Object::SetIndexedPropertiesToPixelData()", + "JSArray is not supported")) { + return; + } + i::Handle<i::PixelArray> pixels = i::Factory::NewPixelArray(length, data); + self->set_elements(*pixels); +} + + Local<v8::Object> Function::NewInstance() const { return NewInstance(0, NULL); } @@ -2464,6 +2484,44 @@ void v8::Object::SetInternalField(int index, v8::Handle<Value> value) { } +void* v8::Object::GetPointerFromInternalField(int index) { + i::Handle<i::JSObject> obj = Utils::OpenHandle(this); + i::Object* pointer = obj->GetInternalField(index); + if (pointer->IsSmi()) { + // Fast case, aligned native pointer. + return pointer; + } + + // Read from uninitialized field. + if (!pointer->IsProxy()) { + // Play safe even if it's something unexpected. + ASSERT(pointer->IsUndefined()); + return NULL; + } + + // Unaligned native pointer. + return reinterpret_cast<void*>(i::Proxy::cast(pointer)->proxy()); +} + + +void v8::Object::SetPointerInInternalField(int index, void* value) { + i::Handle<i::JSObject> obj = Utils::OpenHandle(this); + i::Object* as_object = reinterpret_cast<i::Object*>(value); + if (as_object->IsSmi()) { + // Aligned pointer, store as is. + obj->SetInternalField(index, as_object); + } else { + // Currently internal fields are used by DOM wrappers which only + // get garbage collected by the mark-sweep collector, so we + // pretenure the proxy. + HandleScope scope; + i::Handle<i::Proxy> proxy = + i::Factory::NewProxy(reinterpret_cast<i::Address>(value), i::TENURED); + if (!proxy.is_null()) obj->SetInternalField(index, *proxy); + } +} + + // --- E n v i r o n m e n t --- bool v8::V8::Initialize() { @@ -3018,7 +3076,7 @@ Local<Object> Array::CloneElementAt(uint32_t index) { if (!self->HasFastElements()) { return Local<Object>(); } - i::FixedArray* elms = self->elements(); + i::FixedArray* elms = i::FixedArray::cast(self->elements()); i::Object* paragon = elms->get(index); if (!paragon->IsJSObject()) { return Local<Object>(); @@ -3177,6 +3235,46 @@ bool V8::IsProfilerPaused() { } +void V8::ResumeProfilerEx(int flags) { +#ifdef ENABLE_LOGGING_AND_PROFILING + if (flags & PROFILER_MODULE_CPU) { + i::Logger::ResumeProfiler(); + } + if (flags & (PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS)) { + i::FLAG_log_gc = true; + } +#endif +} + + +void V8::PauseProfilerEx(int flags) { +#ifdef ENABLE_LOGGING_AND_PROFILING + if (flags & PROFILER_MODULE_CPU) { + i::Logger::PauseProfiler(); + } + if (flags & (PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS)) { + i::FLAG_log_gc = false; + } +#endif +} + + +int V8::GetActiveProfilerModules() { +#ifdef ENABLE_LOGGING_AND_PROFILING + int result = PROFILER_MODULE_NONE; + if (!i::Logger::IsProfilerPaused()) { + result |= PROFILER_MODULE_CPU; + } + if (i::FLAG_log_gc) { + result |= PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS; + } + return result; +#else + return PROFILER_MODULE_NONE; +#endif +} + + int V8::GetLogLines(int from_pos, char* dest_buf, int max_size) { #ifdef ENABLE_LOGGING_AND_PROFILING return i::Logger::GetLogLines(from_pos, dest_buf, max_size); diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc index 3f7ccf54ac..5f8149e2ea 100644 --- a/deps/v8/src/arm/codegen-arm.cc +++ b/deps/v8/src/arm/codegen-arm.cc @@ -2897,7 +2897,7 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) { __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset)); // Write to the indexed properties array. - int offset = i * kPointerSize + Array::kHeaderSize; + int offset = i * kPointerSize + FixedArray::kHeaderSize; __ str(r0, FieldMemOperand(r1, offset)); // Update the write barrier for the array address. @@ -3737,7 +3737,8 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { } frame_->EmitPush(r0); // r0 has result } - ASSERT((has_cc() && frame_->height() == original_height) || + ASSERT(!has_valid_frame() || + (has_cc() && frame_->height() == original_height) || (!has_cc() && frame_->height() == original_height + 1)); } @@ -3871,22 +3872,12 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { &is_true, false_target(), false); - if (has_cc()) { - Branch(false, false_target()); - - // Evaluate right side expression. - is_true.Bind(); - LoadConditionAndSpill(node->right(), - NOT_INSIDE_TYPEOF, - true_target(), - false_target(), - false); - - } else { + if (has_valid_frame() && !has_cc()) { + // The left-hand side result is on top of the virtual frame. JumpTarget pop_and_continue; JumpTarget exit; - __ ldr(r0, frame_->Top()); // dup the stack top + __ ldr(r0, frame_->Top()); // Duplicate the stack top. frame_->EmitPush(r0); // Avoid popping the result if it converts to 'false' using the // standard ToBoolean() conversion as described in ECMA-262, @@ -3904,6 +3895,22 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { // Exit (always with a materialized value). exit.Bind(); + } else if (has_cc() || is_true.is_linked()) { + // The left-hand side is either (a) partially compiled to + // control flow with a final branch left to emit or (b) fully + // compiled to control flow and possibly true. + if (has_cc()) { + Branch(false, false_target()); + } + is_true.Bind(); + LoadConditionAndSpill(node->right(), + NOT_INSIDE_TYPEOF, + true_target(), + false_target(), + false); + } else { + // Nothing to do. + ASSERT(!has_valid_frame() && !has_cc() && !is_true.is_linked()); } } else if (op == Token::OR) { @@ -3913,18 +3920,8 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { true_target(), &is_false, false); - if (has_cc()) { - Branch(true, true_target()); - - // Evaluate right side expression. - is_false.Bind(); - LoadConditionAndSpill(node->right(), - NOT_INSIDE_TYPEOF, - true_target(), - false_target(), - false); - - } else { + if (has_valid_frame() && !has_cc()) { + // The left-hand side result is on top of the virtual frame. JumpTarget pop_and_continue; JumpTarget exit; @@ -3946,6 +3943,22 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { // Exit (always with a materialized value). exit.Bind(); + } else if (has_cc() || is_false.is_linked()) { + // The left-hand side is either (a) partially compiled to + // control flow with a final branch left to emit or (b) fully + // compiled to control flow and possibly false. + if (has_cc()) { + Branch(true, true_target()); + } + is_false.Bind(); + LoadConditionAndSpill(node->right(), + NOT_INSIDE_TYPEOF, + true_target(), + false_target(), + false); + } else { + // Nothing to do. + ASSERT(!has_valid_frame() && !has_cc() && !is_false.is_linked()); } } else { @@ -3989,7 +4002,8 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { } frame_->EmitPush(r0); } - ASSERT((has_cc() && frame_->height() == original_height) || + ASSERT(!has_valid_frame() || + (has_cc() && frame_->height() == original_height) || (!has_cc() && frame_->height() == original_height + 1)); } diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h index 6391a8eb9a..42c59081c5 100644 --- a/deps/v8/src/arm/codegen-arm.h +++ b/deps/v8/src/arm/codegen-arm.h @@ -215,7 +215,7 @@ class CodeGenerator: public AstVisitor { #define DEF_VISIT(type) \ void Visit##type(type* node); - NODE_LIST(DEF_VISIT) + AST_NODE_LIST(DEF_VISIT) #undef DEF_VISIT // Visit a statement and then spill the virtual frame if control flow can @@ -374,7 +374,7 @@ class CodeGenerator: public AstVisitor { // information. void CodeForFunctionPosition(FunctionLiteral* fun); void CodeForReturnPosition(FunctionLiteral* fun); - void CodeForStatementPosition(Node* node); + void CodeForStatementPosition(AstNode* node); void CodeForSourcePosition(int pos); #ifdef DEBUG diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc index 2ca74a9547..8781256643 100644 --- a/deps/v8/src/arm/ic-arm.cc +++ b/deps/v8/src/arm/ic-arm.cc @@ -77,6 +77,13 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, __ cmp(r3, Operand(JS_GLOBAL_PROXY_TYPE)); __ b(eq, miss); + // Possible work-around for http://crbug.com/16276. + // See also: http://codereview.chromium.org/155418. + __ cmp(r3, Operand(JS_GLOBAL_OBJECT_TYPE)); + __ b(eq, miss); + __ cmp(r3, Operand(JS_BUILTINS_OBJECT_TYPE)); + __ b(eq, miss); + // Check that the properties array is a dictionary. __ ldr(t0, FieldMemOperand(t1, JSObject::kPropertiesOffset)); __ ldr(r3, FieldMemOperand(t0, HeapObject::kMapOffset)); @@ -84,14 +91,14 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, __ b(ne, miss); // Compute the capacity mask. - const int kCapacityOffset = - Array::kHeaderSize + StringDictionary::kCapacityIndex * kPointerSize; + const int kCapacityOffset = StringDictionary::kHeaderSize + + StringDictionary::kCapacityIndex * kPointerSize; __ ldr(r3, FieldMemOperand(t0, kCapacityOffset)); __ mov(r3, Operand(r3, ASR, kSmiTagSize)); // convert smi to int __ sub(r3, r3, Operand(1)); - const int kElementsStartOffset = - Array::kHeaderSize + StringDictionary::kElementsStartIndex * kPointerSize; + const int kElementsStartOffset = StringDictionary::kHeaderSize + + StringDictionary::kElementsStartIndex * kPointerSize; // Generate an unrolled loop that performs a few probes before // giving up. Measurements done on Gmail indicate that 2 probes @@ -575,8 +582,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset)); // Check that the object is in fast mode (not dictionary). __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset)); - __ cmp(r3, Operand(Factory::hash_table_map())); - __ b(eq, &slow); + __ cmp(r3, Operand(Factory::fixed_array_map())); + __ b(ne, &slow); // Check that the key (index) is within bounds. __ ldr(r3, FieldMemOperand(r1, Array::kLengthOffset)); __ cmp(r0, Operand(r3)); @@ -592,7 +599,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { // Fast case: Do the load. __ bind(&fast); - __ add(r3, r1, Operand(Array::kHeaderSize - kHeapObjectTag)); + __ add(r3, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); __ ldr(r0, MemOperand(r3, r0, LSL, kPointerSizeLog2)); __ cmp(r0, Operand(Factory::the_hole_value())); // In case the loaded value is the_hole we have to consult GetProperty @@ -654,14 +661,14 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { __ ldr(r3, FieldMemOperand(r3, JSObject::kElementsOffset)); // Check that the object is in fast mode (not dictionary). __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); - __ cmp(r2, Operand(Factory::hash_table_map())); - __ b(eq, &slow); + __ cmp(r2, Operand(Factory::fixed_array_map())); + __ b(ne, &slow); // Untag the key (for checking against untagged length in the fixed array). __ mov(r1, Operand(r1, ASR, kSmiTagSize)); // Compute address to store into and check array bounds. - __ add(r2, r3, Operand(Array::kHeaderSize - kHeapObjectTag)); + __ add(r2, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2)); - __ ldr(ip, FieldMemOperand(r3, Array::kLengthOffset)); + __ ldr(ip, FieldMemOperand(r3, FixedArray::kLengthOffset)); __ cmp(r1, Operand(ip)); __ b(lo, &fast); @@ -689,7 +696,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { __ mov(r3, Operand(r2)); // NOTE: Computing the address to store into must take the fact // that the key has been incremented into account. - int displacement = Array::kHeaderSize - kHeapObjectTag - + int displacement = FixedArray::kHeaderSize - kHeapObjectTag - ((1 << kSmiTagSize) * 2); __ add(r2, r2, Operand(displacement)); __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize)); @@ -703,8 +710,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { __ bind(&array); __ ldr(r2, FieldMemOperand(r3, JSObject::kElementsOffset)); __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset)); - __ cmp(r1, Operand(Factory::hash_table_map())); - __ b(eq, &slow); + __ cmp(r1, Operand(Factory::fixed_array_map())); + __ b(ne, &slow); // Check the key against the length in the array, compute the // address to store into and fall through to fast case. @@ -714,7 +721,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { __ cmp(r1, Operand(ip)); __ b(hs, &extra); __ mov(r3, Operand(r2)); - __ add(r2, r2, Operand(Array::kHeaderSize - kHeapObjectTag)); + __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize)); diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index 47e2749c1e..875c91e964 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -226,7 +226,7 @@ void MacroAssembler::RecordWrite(Register object, Register offset, // Add the page header (including remembered set), array header, and array // body size to the page address. add(object, object, Operand(Page::kObjectStartOffset - + Array::kHeaderSize)); + + FixedArray::kHeaderSize)); add(object, object, Operand(scratch)); bind(&fast); diff --git a/deps/v8/src/arm/register-allocator-arm-inl.h b/deps/v8/src/arm/register-allocator-arm-inl.h index d98818f0f6..4691f29743 100644 --- a/deps/v8/src/arm/register-allocator-arm-inl.h +++ b/deps/v8/src/arm/register-allocator-arm-inl.h @@ -60,7 +60,7 @@ bool RegisterAllocator::IsReserved(Register reg) { int RegisterAllocator::ToNumber(Register reg) { ASSERT(reg.is_valid() && !IsReserved(reg)); - static int numbers[] = { + const int kNumbers[] = { 0, // r0 1, // r1 2, // r2 @@ -78,15 +78,15 @@ int RegisterAllocator::ToNumber(Register reg) { 11, // lr -1 // pc }; - return numbers[reg.code()]; + return kNumbers[reg.code()]; } Register RegisterAllocator::ToRegister(int num) { ASSERT(num >= 0 && num < kNumRegisters); - static Register registers[] = + const Register kRegisters[] = { r0, r1, r2, r3, r4, r5, r6, r7, r9, r10, ip, lr }; - return registers[num]; + return kRegisters[num]; } diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc index 6d9ace84cf..393db59e49 100644 --- a/deps/v8/src/arm/stub-cache-arm.cc +++ b/deps/v8/src/arm/stub-cache-arm.cc @@ -164,7 +164,7 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm, __ ldr(dst, FieldMemOperand(src, offset)); } else { // Calculate the offset into the properties array. - int offset = index * kPointerSize + Array::kHeaderSize; + int offset = index * kPointerSize + FixedArray::kHeaderSize; __ ldr(dst, FieldMemOperand(src, JSObject::kPropertiesOffset)); __ ldr(dst, FieldMemOperand(dst, offset)); } @@ -330,7 +330,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, __ RecordWrite(receiver_reg, name_reg, scratch); } else { // Write to the properties array. - int offset = index * kPointerSize + Array::kHeaderSize; + int offset = index * kPointerSize + FixedArray::kHeaderSize; // Get the properties array __ ldr(scratch, FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset)); __ str(r0, FieldMemOperand(scratch, offset)); @@ -467,21 +467,23 @@ void StubCompiler::GenerateLoadCallback(JSObject* object, // Push the arguments on the JS stack of the caller. __ push(receiver); // receiver + __ push(reg); // holder __ mov(ip, Operand(Handle<AccessorInfo>(callback))); // callback data __ push(ip); + __ ldr(reg, FieldMemOperand(ip, AccessorInfo::kDataOffset)); + __ push(reg); __ push(name_reg); // name - __ push(reg); // holder // Do tail-call to the runtime system. ExternalReference load_callback_property = ExternalReference(IC_Utility(IC::kLoadCallbackProperty)); - __ TailCallRuntime(load_callback_property, 4); + __ TailCallRuntime(load_callback_property, 5); } void StubCompiler::GenerateLoadInterceptor(JSObject* object, JSObject* holder, - Smi* lookup_hint, + LookupResult* lookup, Register receiver, Register name_reg, Register scratch1, @@ -500,13 +502,18 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object, __ push(receiver); // receiver __ push(reg); // holder __ push(name_reg); // name - __ mov(scratch1, Operand(lookup_hint)); + + InterceptorInfo* interceptor = holder->GetNamedInterceptor(); + ASSERT(!Heap::InNewSpace(interceptor)); + __ mov(scratch1, Operand(Handle<Object>(interceptor))); __ push(scratch1); + __ ldr(scratch2, FieldMemOperand(scratch1, InterceptorInfo::kDataOffset)); + __ push(scratch2); // Do tail-call to the runtime system. ExternalReference load_ic_property = - ExternalReference(IC_Utility(IC::kLoadInterceptorProperty)); - __ TailCallRuntime(load_ic_property, 4); + ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad)); + __ TailCallRuntime(load_ic_property, 5); } @@ -676,13 +683,13 @@ Object* CallStubCompiler::CompileCallConstant(Object* object, case JSARRAY_HAS_FAST_ELEMENTS_CHECK: CheckPrototypes(JSObject::cast(object), r1, holder, r3, r2, name, &miss); - // Make sure object->elements()->map() != Heap::hash_table_map() + // Make sure object->HasFastElements(). // Get the elements array of the object. __ ldr(r3, FieldMemOperand(r1, JSObject::kElementsOffset)); // Check that the object is in fast mode (not dictionary). __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); - __ cmp(r2, Operand(Factory::hash_table_map())); - __ b(eq, &miss); + __ cmp(r2, Operand(Factory::fixed_array_map())); + __ b(ne, &miss); break; default: @@ -744,8 +751,6 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object, // ----------------------------------- Label miss; - __ IncrementCounter(&Counters::call_global_inline, 1, r1, r3); - // Get the number of arguments. const int argc = arguments().immediate(); @@ -782,6 +787,7 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object, __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); // Jump to the cached code (tail call). + __ IncrementCounter(&Counters::call_global_inline, 1, r1, r3); ASSERT(function->is_compiled()); Handle<Code> code(function->code()); ParameterCount expected(function->shared()->formal_parameter_count()); @@ -790,7 +796,6 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object, // Handle call cache miss. __ bind(&miss); - __ DecrementCounter(&Counters::call_global_inline, 1, r1, r3); __ IncrementCounter(&Counters::call_global_inline_miss, 1, r1, r3); Handle<Code> ic = ComputeCallMiss(arguments().immediate()); __ Jump(ic, RelocInfo::CODE_TARGET); @@ -951,8 +956,6 @@ Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object, // ----------------------------------- Label miss; - __ IncrementCounter(&Counters::named_store_global_inline, 1, r1, r3); - // Check that the map of the global has not changed. __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset)); @@ -963,11 +966,11 @@ Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object, __ mov(r2, Operand(Handle<JSGlobalPropertyCell>(cell))); __ str(r0, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset)); + __ IncrementCounter(&Counters::named_store_global_inline, 1, r1, r3); __ Ret(); // Handle store cache miss. __ bind(&miss); - __ DecrementCounter(&Counters::named_store_global_inline, 1, r1, r3); __ IncrementCounter(&Counters::named_store_global_inline_miss, 1, r1, r3); Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss)); __ Jump(ic, RelocInfo::CODE_TARGET); @@ -1054,9 +1057,11 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* object, __ ldr(r0, MemOperand(sp, 0)); + LookupResult lookup; + holder->LocalLookupRealNamedProperty(name, &lookup); GenerateLoadInterceptor(object, holder, - holder->InterceptorPropertyLookupHint(name), + &lookup, r0, r2, r3, @@ -1083,8 +1088,6 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object, // ----------------------------------- Label miss; - __ IncrementCounter(&Counters::named_load_global_inline, 1, r1, r3); - // Get the receiver from the stack. __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); @@ -1109,10 +1112,10 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object, __ b(eq, &miss); } + __ IncrementCounter(&Counters::named_load_global_inline, 1, r1, r3); __ Ret(); __ bind(&miss); - __ DecrementCounter(&Counters::named_load_global_inline, 1, r1, r3); __ IncrementCounter(&Counters::named_load_global_inline_miss, 1, r1, r3); GenerateLoadMiss(masm(), Code::LOAD_IC); @@ -1121,8 +1124,6 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object, } -// TODO(1224671): IC stubs for keyed loads have not been implemented -// for ARM. Object* KeyedLoadStubCompiler::CompileLoadField(String* name, JSObject* receiver, JSObject* holder, @@ -1217,9 +1218,11 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver, __ cmp(r2, Operand(Handle<String>(name))); __ b(ne, &miss); + LookupResult lookup; + holder->LocalLookupRealNamedProperty(name, &lookup); GenerateLoadInterceptor(receiver, holder, - Smi::FromInt(JSObject::kLookupInHolder), + &lookup, r0, r2, r3, diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc index d8a323267d..2b6074200f 100644 --- a/deps/v8/src/ast.cc +++ b/deps/v8/src/ast.cc @@ -51,7 +51,7 @@ CallEval CallEval::sentinel_(NULL, NULL, 0); if (v->CheckStackOverflow()) return; \ v->Visit##type(this); \ } -NODE_LIST(DECL_ACCEPT) +AST_NODE_LIST(DECL_ACCEPT) #undef DECL_ACCEPT diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h index 64d61cca35..406d43d056 100644 --- a/deps/v8/src/ast.h +++ b/deps/v8/src/ast.h @@ -53,9 +53,8 @@ namespace internal { // Nodes of the abstract syntax tree. Only concrete classes are // enumerated here. -#define NODE_LIST(V) \ +#define STATEMENT_NODE_LIST(V) \ V(Block) \ - V(Declaration) \ V(ExpressionStatement) \ V(EmptyStatement) \ V(IfStatement) \ @@ -69,7 +68,9 @@ namespace internal { V(ForInStatement) \ V(TryCatch) \ V(TryFinally) \ - V(DebuggerStatement) \ + V(DebuggerStatement) + +#define EXPRESSION_NODE_LIST(V) \ V(FunctionLiteral) \ V(FunctionBoilerplateLiteral) \ V(Conditional) \ @@ -93,13 +94,17 @@ namespace internal { V(CompareOperation) \ V(ThisFunction) +#define AST_NODE_LIST(V) \ + V(Declaration) \ + STATEMENT_NODE_LIST(V) \ + EXPRESSION_NODE_LIST(V) // Forward declarations class TargetCollector; class MaterializedLiteral; #define DEF_FORWARD_DECLARATION(type) class type; -NODE_LIST(DEF_FORWARD_DECLARATION) +AST_NODE_LIST(DEF_FORWARD_DECLARATION) #undef DEF_FORWARD_DECLARATION @@ -108,10 +113,10 @@ NODE_LIST(DEF_FORWARD_DECLARATION) typedef ZoneList<Handle<String> > ZoneStringList; -class Node: public ZoneObject { +class AstNode: public ZoneObject { public: - Node(): statement_pos_(RelocInfo::kNoPosition) { } - virtual ~Node() { } + AstNode(): statement_pos_(RelocInfo::kNoPosition) { } + virtual ~AstNode() { } virtual void Accept(AstVisitor* v) = 0; // Type testing & conversion. @@ -143,7 +148,7 @@ class Node: public ZoneObject { }; -class Statement: public Node { +class Statement: public AstNode { public: virtual Statement* AsStatement() { return this; } virtual ReturnStatement* AsReturnStatement() { return NULL; } @@ -152,7 +157,7 @@ class Statement: public Node { }; -class Expression: public Node { +class Expression: public AstNode { public: virtual Expression* AsExpression() { return this; } @@ -240,7 +245,7 @@ class Block: public BreakableStatement { }; -class Declaration: public Node { +class Declaration: public AstNode { public: Declaration(VariableProxy* proxy, Variable::Mode mode, FunctionLiteral* fun) : proxy_(proxy), @@ -523,7 +528,7 @@ class IfStatement: public Statement { // NOTE: TargetCollectors are represented as nodes to fit in the target // stack in the compiler; this should probably be reworked. -class TargetCollector: public Node { +class TargetCollector: public AstNode { public: explicit TargetCollector(ZoneList<BreakTarget*>* targets) : targets_(targets) { @@ -1678,7 +1683,7 @@ class AstVisitor BASE_EMBEDDED { virtual ~AstVisitor() { } // Dispatch - void Visit(Node* node) { node->Accept(this); } + void Visit(AstNode* node) { node->Accept(this); } // Iteration virtual void VisitStatements(ZoneList<Statement*>* statements); @@ -1702,7 +1707,7 @@ class AstVisitor BASE_EMBEDDED { // Individual nodes #define DEF_VISIT(type) \ virtual void Visit##type(type* node) = 0; - NODE_LIST(DEF_VISIT) + AST_NODE_LIST(DEF_VISIT) #undef DEF_VISIT private: diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc index ad5396ec62..a2c45626be 100644 --- a/deps/v8/src/bootstrapper.cc +++ b/deps/v8/src/bootstrapper.cc @@ -47,14 +47,10 @@ namespace internal { // generate an index for each native JS file. class SourceCodeCache BASE_EMBEDDED { public: - explicit SourceCodeCache(Script::Type type): type_(type) { } + explicit SourceCodeCache(Script::Type type): type_(type), cache_(NULL) { } void Initialize(bool create_heap_objects) { - if (create_heap_objects) { - cache_ = Heap::empty_fixed_array(); - } else { - cache_ = NULL; - } + cache_ = create_heap_objects ? Heap::empty_fixed_array() : NULL; } void Iterate(ObjectVisitor* v) { @@ -1107,12 +1103,6 @@ bool Genesis::InstallNatives() { global_context()->set_empty_script(*script); } -#ifdef V8_HOST_ARCH_64_BIT - // TODO(X64): Remove this when inline caches work. - FLAG_use_ic = false; -#endif // V8_HOST_ARCH_64_BIT - - if (FLAG_natives_file == NULL) { // Without natives file, install default natives. for (int i = Natives::GetDelayCount(); diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc index 0648e54dc2..1ea0245a72 100644 --- a/deps/v8/src/builtins.cc +++ b/deps/v8/src/builtins.cc @@ -87,18 +87,34 @@ static inline Object* __builtin_arg__(int n, int argc, Object** argv) { } -// TODO(1238487): Get rid of this function that determines if the -// builtin is called as a constructor. This may be a somewhat slow -// operation due to the stack frame iteration. static inline bool CalledAsConstructor() { +#ifdef DEBUG + // Calculate the result using a full stack frame iterator and check + // that the state of the stack is as we assume it to be in the + // code below. StackFrameIterator it; ASSERT(it.frame()->is_exit()); it.Advance(); StackFrame* frame = it.frame(); - return frame->is_construct(); + bool reference_result = frame->is_construct(); +#endif + Address fp = Top::c_entry_fp(Top::GetCurrentThread()); + // Because we know fp points to an exit frame we can use the relevant + // part of ExitFrame::ComputeCallerState directly. + const int kCallerOffset = ExitFrameConstants::kCallerFPOffset; + Address caller_fp = Memory::Address_at(fp + kCallerOffset); + // This inlines the part of StackFrame::ComputeType that grabs the + // type of the current frame. Note that StackFrame::ComputeType + // has been specialized for each architecture so if any one of them + // changes this code has to be changed as well. + const int kMarkerOffset = StandardFrameConstants::kMarkerOffset; + const Smi* kConstructMarker = Smi::FromInt(StackFrame::CONSTRUCT); + Object* marker = Memory::Object_at(caller_fp + kMarkerOffset); + bool result = (marker == kConstructMarker); + ASSERT_EQ(result, reference_result); + return result; } - // ---------------------------------------------------------------------------- diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc index b7297d7ce8..7a4bb12ed0 100644 --- a/deps/v8/src/codegen.cc +++ b/deps/v8/src/codegen.cc @@ -496,7 +496,7 @@ void CodeGenerator::CodeForReturnPosition(FunctionLiteral* fun) { } -void CodeGenerator::CodeForStatementPosition(Node* node) { +void CodeGenerator::CodeForStatementPosition(AstNode* node) { if (FLAG_debug_info) { int pos = node->statement_pos(); if (pos != RelocInfo::kNoPosition) { diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc index 0951af1390..ec5b39c2fb 100644 --- a/deps/v8/src/compilation-cache.cc +++ b/deps/v8/src/compilation-cache.cc @@ -37,10 +37,17 @@ namespace internal { static const int kSubCacheCount = 4; // The number of generations for each sub cache. +#if defined(ANDROID) +static const int kScriptGenerations = 1; +static const int kEvalGlobalGenerations = 1; +static const int kEvalContextualGenerations = 1; +static const int kRegExpGenerations = 1; +#else static const int kScriptGenerations = 5; static const int kEvalGlobalGenerations = 2; static const int kEvalContextualGenerations = 2; static const int kRegExpGenerations = 2; +#endif // Initial of each compilation cache table allocated. static const int kInitialCacheSize = 64; @@ -56,6 +63,8 @@ class CompilationSubCache { tables_ = NewArray<Object*>(generations); } + ~CompilationSubCache() { DeleteArray(tables_); } + // Get the compilation cache tables for a specific generation. Handle<CompilationCacheTable> GetTable(int generation); diff --git a/deps/v8/src/debug-agent.cc b/deps/v8/src/debug-agent.cc index 62cc251ed1..3dba53a435 100644 --- a/deps/v8/src/debug-agent.cc +++ b/deps/v8/src/debug-agent.cc @@ -254,8 +254,8 @@ SmartPointer<char> DebuggerAgentUtil::ReceiveMessage(const Socket* conn) { // Check that key is Content-Length. if (strcmp(key, kContentLength) == 0) { - // Get the content length value if within a sensible range. - if (strlen(value) > 7) { + // Get the content length value if present and within a sensible range. + if (value == NULL || strlen(value) > 7) { return SmartPointer<char>(); } for (int i = 0; value[i] != '\0'; i++) { diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc index 52be9301ff..18536f5c7a 100644 --- a/deps/v8/src/debug.cc +++ b/deps/v8/src/debug.cc @@ -334,8 +334,11 @@ void BreakLocationIterator::PrepareStepIn() { rinfo()->set_target_address(stub->entry()); } } else { - // Step in through constructs call requires no changes to the running code. - ASSERT(RelocInfo::IsConstructCall(rmode())); + // Step in through construct call requires no changes to the running code. + // Step in through getters/setters should already be prepared as well + // because caller of this function (Debug::PrepareStep) is expected to + // flood the top frame's function with one shot breakpoints. + ASSERT(RelocInfo::IsConstructCall(rmode()) || code->is_inline_cache_stub()); } } @@ -1087,10 +1090,18 @@ void Debug::PrepareStep(StepAction step_action, int step_count) { // Compute whether or not the target is a call target. bool is_call_target = false; + bool is_load_or_store = false; + bool is_inline_cache_stub = false; if (RelocInfo::IsCodeTarget(it.rinfo()->rmode())) { Address target = it.rinfo()->target_address(); Code* code = Code::GetCodeFromTargetAddress(target); - if (code->is_call_stub()) is_call_target = true; + if (code->is_call_stub()) { + is_call_target = true; + } + if (code->is_inline_cache_stub()) { + is_inline_cache_stub = true; + is_load_or_store = !is_call_target; + } } // If this is the last break code target step out is the only possibility. @@ -1103,8 +1114,8 @@ void Debug::PrepareStep(StepAction step_action, int step_count) { JSFunction* function = JSFunction::cast(frames_it.frame()->function()); FloodWithOneShot(Handle<SharedFunctionInfo>(function->shared())); } - } else if (!(is_call_target || RelocInfo::IsConstructCall(it.rmode())) || - step_action == StepNext || step_action == StepMin) { + } else if (!(is_inline_cache_stub || RelocInfo::IsConstructCall(it.rmode())) + || step_action == StepNext || step_action == StepMin) { // Step next or step min. // Fill the current function with one-shot break points. @@ -1117,9 +1128,20 @@ void Debug::PrepareStep(StepAction step_action, int step_count) { } else { // Fill the current function with one-shot break points even for step in on // a call target as the function called might be a native function for - // which step in will not stop. + // which step in will not stop. It also prepares for stepping in + // getters/setters. FloodWithOneShot(shared); + if (is_load_or_store) { + // Remember source position and frame to handle step in getter/setter. If + // there is a custom getter/setter it will be handled in + // Object::Get/SetPropertyWithCallback, otherwise the step action will be + // propagated on the next Debug::Break. + thread_local_.last_statement_position_ = + debug_info->code()->SourceStatementPosition(frame->pc()); + thread_local_.last_fp_ = frame->fp(); + } + // Step in or Step in min it.PrepareStepIn(); ActivateStepIn(frame); @@ -1279,7 +1301,7 @@ void Debug::HandleStepIn(Handle<JSFunction> function, // step into was requested. if (fp == Debug::step_in_fp()) { // Don't allow step into functions in the native context. - if (function->context()->global() != Top::context()->builtins()) { + if (!function->IsBuiltin()) { if (function->shared()->code() == Builtins::builtin(Builtins::FunctionApply) || function->shared()->code() == @@ -1288,7 +1310,8 @@ void Debug::HandleStepIn(Handle<JSFunction> function, // function to be called and not the code for Builtins::FunctionApply or // Builtins::FunctionCall. The receiver of call/apply is the target // function. - if (!holder.is_null() && holder->IsJSFunction()) { + if (!holder.is_null() && holder->IsJSFunction() && + !JSFunction::cast(*holder)->IsBuiltin()) { Handle<SharedFunctionInfo> shared_info( JSFunction::cast(*holder)->shared()); Debug::FloodWithOneShot(shared_info); diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc index adc18725e4..eb68f46bfd 100644 --- a/deps/v8/src/execution.cc +++ b/deps/v8/src/execution.cc @@ -164,19 +164,16 @@ Handle<Object> Execution::GetFunctionDelegate(Handle<Object> object) { // If you return a function from here, it will be called when an // attempt is made to call the given object as a function. - // The regular expression code here is really meant more as an - // example than anything else. KJS does not support calling regular - // expressions as functions, but SpiderMonkey does. - if (FLAG_call_regexp) { - bool is_regexp = - object->IsHeapObject() && - (HeapObject::cast(*object)->map()->constructor() == - *Top::regexp_function()); - - if (is_regexp) { - Handle<String> exec = Factory::exec_symbol(); - return Handle<Object>(object->GetProperty(*exec)); - } + // Regular expressions can be called as functions in both Firefox + // and Safari so we allow it too. + bool is_regexp = + object->IsHeapObject() && + (HeapObject::cast(*object)->map()->constructor() == + *Top::regexp_function()); + + if (is_regexp) { + Handle<String> exec = Factory::exec_symbol(); + return Handle<Object>(object->GetProperty(*exec)); } // Objects created through the API can have an instance-call handler @@ -590,6 +587,23 @@ Object* Execution::DebugBreakHelper() { return Heap::undefined_value(); } + { + JavaScriptFrameIterator it; + ASSERT(!it.done()); + Object* fun = it.frame()->function(); + if (fun && fun->IsJSFunction()) { + // Don't stop in builtin functions. + if (JSFunction::cast(fun)->IsBuiltin()) { + return Heap::undefined_value(); + } + GlobalObject* global = JSFunction::cast(fun)->context()->global(); + // Don't stop in debugger functions. + if (Debug::IsDebugGlobal(global)) { + return Heap::undefined_value(); + } + } + } + // Collect the break state before clearing the flags. bool debug_command_only = StackGuard::IsDebugCommand() && !StackGuard::IsDebugBreak(); diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc index 1045a4c3c8..36554df732 100644 --- a/deps/v8/src/factory.cc +++ b/deps/v8/src/factory.cc @@ -210,6 +210,16 @@ Handle<ByteArray> Factory::NewByteArray(int length, PretenureFlag pretenure) { } +Handle<PixelArray> Factory::NewPixelArray(int length, + uint8_t* external_pointer, + PretenureFlag pretenure) { + ASSERT(0 <= length); + CALL_HEAP_FUNCTION(Heap::AllocatePixelArray(length, + external_pointer, + pretenure), PixelArray); +} + + Handle<Map> Factory::NewMap(InstanceType type, int instance_size) { CALL_HEAP_FUNCTION(Heap::AllocateMap(type, instance_size), Map); } diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h index 0afdd76a49..4db5d4e71b 100644 --- a/deps/v8/src/factory.h +++ b/deps/v8/src/factory.h @@ -154,6 +154,10 @@ class Factory : public AllStatic { static Handle<ByteArray> NewByteArray(int length, PretenureFlag pretenure = NOT_TENURED); + static Handle<PixelArray> NewPixelArray(int length, + uint8_t* external_pointer, + PretenureFlag pretenure = NOT_TENURED); + static Handle<Map> NewMap(InstanceType type, int instance_size); static Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function); diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h index 9c696ed661..b0770b0028 100644 --- a/deps/v8/src/flag-definitions.h +++ b/deps/v8/src/flag-definitions.h @@ -144,9 +144,6 @@ DEFINE_bool(debugger_auto_break, false, "automatically set the debug break flag when debugger commands are " "in the queue (experimental)") -// execution.cc -DEFINE_bool(call_regexp, false, "allow calls to RegExp objects") - // frames.cc DEFINE_int(max_stack_trace_source_length, 300, "maximum length of function source code printed in a stack trace.") @@ -158,6 +155,8 @@ DEFINE_bool(gc_global, false, "always perform global GCs") DEFINE_int(gc_interval, -1, "garbage collect after <n> allocations") DEFINE_bool(trace_gc, false, "print one trace line following each garbage collection") +DEFINE_bool(trace_gc_verbose, false, + "print more details following each garbage collection") DEFINE_bool(collect_maps, true, "garbage collect maps from which no objects can be reached") diff --git a/deps/v8/src/frame-element.cc b/deps/v8/src/frame-element.cc new file mode 100644 index 0000000000..e6bc2eafdb --- /dev/null +++ b/deps/v8/src/frame-element.cc @@ -0,0 +1,45 @@ +// Copyright 2009 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "v8.h" + +#include "frame-element.h" + +namespace v8 { +namespace internal { + +// ------------------------------------------------------------------------- +// FrameElement implementation. + + +FrameElement::ZoneObjectList* FrameElement::ConstantList() { + static ZoneObjectList list(10); + return &list; +} + + +} } // namespace v8::internal diff --git a/deps/v8/src/frame-element.h b/deps/v8/src/frame-element.h index 666aabb269..ccdecf1d66 100644 --- a/deps/v8/src/frame-element.h +++ b/deps/v8/src/frame-element.h @@ -91,10 +91,7 @@ class FrameElement BASE_EMBEDDED { // this table of handles to the actual constants. typedef ZoneList<Handle<Object> > ZoneObjectList; - static ZoneObjectList* ConstantList() { - static ZoneObjectList list(10); - return &list; - } + static ZoneObjectList* ConstantList(); // Clear the constants indirection table. static void ClearConstantList() { diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h index 44bd527574..195a2e2fc3 100644 --- a/deps/v8/src/globals.h +++ b/deps/v8/src/globals.h @@ -207,6 +207,7 @@ class HeapObject; class IC; class InterceptorInfo; class IterationStatement; +class Array; class JSArray; class JSFunction; class JSObject; diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc index 510ea95b8c..6345d41815 100644 --- a/deps/v8/src/handles.cc +++ b/deps/v8/src/handles.cc @@ -164,8 +164,11 @@ void SetExpectedNofPropertiesFromEstimate(Handle<JSFunction> func, void NormalizeProperties(Handle<JSObject> object, - PropertyNormalizationMode mode) { - CALL_HEAP_FUNCTION_VOID(object->NormalizeProperties(mode)); + PropertyNormalizationMode mode, + int expected_additional_properties) { + CALL_HEAP_FUNCTION_VOID(object->NormalizeProperties( + mode, + expected_additional_properties)); } @@ -341,6 +344,14 @@ Handle<String> SubString(Handle<String> str, int start, int end) { Handle<Object> SetElement(Handle<JSObject> object, uint32_t index, Handle<Object> value) { + if (object->HasPixelElements()) { + if (!value->IsSmi() && !value->IsHeapNumber() && !value->IsUndefined()) { + bool has_exception; + Handle<Object> number = Execution::ToNumber(value, &has_exception); + if (has_exception) return Handle<Object>(); + value = number; + } + } CALL_HEAP_FUNCTION(object->SetElement(index, *value), Object); } @@ -643,13 +654,17 @@ bool CompileLazyInLoop(Handle<JSFunction> function, ClearExceptionFlag flag) { OptimizedObjectForAddingMultipleProperties:: OptimizedObjectForAddingMultipleProperties(Handle<JSObject> object, + int expected_additional_properties, bool condition) { object_ = object; if (condition && object_->HasFastProperties()) { // Normalize the properties of object to avoid n^2 behavior - // when extending the object multiple properties. + // when extending the object multiple properties. Indicate the number of + // properties to be added. unused_property_fields_ = object->map()->unused_property_fields(); - NormalizeProperties(object_, KEEP_INOBJECT_PROPERTIES); + NormalizeProperties(object_, + KEEP_INOBJECT_PROPERTIES, + expected_additional_properties); has_been_transformed_ = true; } else { diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h index a86dc9632c..ba2694f509 100644 --- a/deps/v8/src/handles.h +++ b/deps/v8/src/handles.h @@ -181,7 +181,8 @@ class HandleScope { // of space or encountering an internal error. void NormalizeProperties(Handle<JSObject> object, - PropertyNormalizationMode mode); + PropertyNormalizationMode mode, + int expected_additional_properties); void NormalizeElements(Handle<JSObject> object); void TransformToFastProperties(Handle<JSObject> object, int unused_property_fields); @@ -336,6 +337,7 @@ class NoHandleAllocation BASE_EMBEDDED { class OptimizedObjectForAddingMultipleProperties BASE_EMBEDDED { public: OptimizedObjectForAddingMultipleProperties(Handle<JSObject> object, + int expected_property_count, bool condition = true); ~OptimizedObjectForAddingMultipleProperties(); private: diff --git a/deps/v8/src/hashmap.cc b/deps/v8/src/hashmap.cc index b7173127ec..3c4e5cdc60 100644 --- a/deps/v8/src/hashmap.cc +++ b/deps/v8/src/hashmap.cc @@ -194,7 +194,10 @@ HashMap::Entry* HashMap::Probe(void* key, uint32_t hash) { void HashMap::Initialize(uint32_t capacity) { ASSERT(IsPowerOf2(capacity)); map_ = reinterpret_cast<Entry*>(allocator_->New(capacity * sizeof(Entry))); - if (map_ == NULL) V8::FatalProcessOutOfMemory("HashMap::Initialize"); + if (map_ == NULL) { + V8::FatalProcessOutOfMemory("HashMap::Initialize"); + return; + } capacity_ = capacity; Clear(); } diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h index 36c6f4bfa2..d27f14f1af 100644 --- a/deps/v8/src/heap-inl.h +++ b/deps/v8/src/heap-inl.h @@ -228,6 +228,31 @@ void Heap::ScavengeObject(HeapObject** p, HeapObject* object) { } +int Heap::AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) { + ASSERT(HasBeenSetup()); + int amount = amount_of_external_allocated_memory_ + change_in_bytes; + if (change_in_bytes >= 0) { + // Avoid overflow. + if (amount > amount_of_external_allocated_memory_) { + amount_of_external_allocated_memory_ = amount; + } + int amount_since_last_global_gc = + amount_of_external_allocated_memory_ - + amount_of_external_allocated_memory_at_last_global_gc_; + if (amount_since_last_global_gc > external_allocation_limit_) { + CollectAllGarbage(); + } + } else { + // Avoid underflow. + if (amount >= 0) { + amount_of_external_allocated_memory_ = amount; + } + } + ASSERT(amount_of_external_allocated_memory_ >= 0); + return amount_of_external_allocated_memory_; +} + + void Heap::SetLastScriptId(Object* last_script_id) { roots_[kLastScriptIdRootIndex] = last_script_id; } diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc index 0af3d90efa..ebd0e1e656 100644 --- a/deps/v8/src/heap.cc +++ b/deps/v8/src/heap.cc @@ -69,7 +69,7 @@ int Heap::amount_of_external_allocated_memory_at_last_global_gc_ = 0; // semispace_size_ should be a power of 2 and old_generation_size_ should be // a multiple of Page::kPageSize. -#if V8_TARGET_ARCH_ARM +#if defined(ANDROID) int Heap::semispace_size_ = 512*KB; int Heap::old_generation_size_ = 128*MB; int Heap::initial_semispace_size_ = 128*KB; @@ -85,8 +85,8 @@ GCCallback Heap::global_gc_epilogue_callback_ = NULL; // Variables set based on semispace_size_ and old_generation_size_ in // ConfigureHeap. int Heap::young_generation_size_ = 0; // Will be 2 * semispace_size_. - int Heap::survived_since_last_expansion_ = 0; +int Heap::external_allocation_limit_ = 0; Heap::HeapState Heap::gc_state_ = NOT_IN_GC; @@ -207,6 +207,27 @@ void Heap::ReportStatisticsBeforeGC() { } +#if defined(ENABLE_LOGGING_AND_PROFILING) +void Heap::PrintShortHeapStatistics() { + if (!FLAG_trace_gc_verbose) return; + PrintF("Memory allocator, used: %8d, available: %8d\n", + MemoryAllocator::Size(), MemoryAllocator::Available()); + PrintF("New space, used: %8d, available: %8d\n", + Heap::new_space_.Size(), new_space_.Available()); + PrintF("Old pointers, used: %8d, available: %8d\n", + old_pointer_space_->Size(), old_pointer_space_->Available()); + PrintF("Old data space, used: %8d, available: %8d\n", + old_data_space_->Size(), old_data_space_->Available()); + PrintF("Code space, used: %8d, available: %8d\n", + code_space_->Size(), code_space_->Available()); + PrintF("Map space, used: %8d, available: %8d\n", + map_space_->Size(), map_space_->Available()); + PrintF("Large object space, used: %8d, avaialble: %8d\n", + lo_space_->Size(), lo_space_->Available()); +} +#endif + + // TODO(1238405): Combine the infrastructure for --heap-stats and // --log-gc to avoid the complicated preprocessor and flag testing. void Heap::ReportStatisticsAfterGC() { @@ -1166,10 +1187,14 @@ bool Heap::CreateInitialMaps() { set_undetectable_long_ascii_string_map(Map::cast(obj)); Map::cast(obj)->set_is_undetectable(); - obj = AllocateMap(BYTE_ARRAY_TYPE, Array::kAlignedSize); + obj = AllocateMap(BYTE_ARRAY_TYPE, ByteArray::kAlignedSize); if (obj->IsFailure()) return false; set_byte_array_map(Map::cast(obj)); + obj = AllocateMap(PIXEL_ARRAY_TYPE, PixelArray::kAlignedSize); + if (obj->IsFailure()) return false; + set_pixel_array_map(Map::cast(obj)); + obj = AllocateMap(CODE_TYPE, Code::kHeaderSize); if (obj->IsFailure()) return false; set_code_map(Map::cast(obj)); @@ -1386,6 +1411,12 @@ bool Heap::CreateInitialObjects() { if (obj->IsFailure()) return false; set_the_hole_value(obj); + obj = CreateOddball( + oddball_map(), "no_interceptor_result_sentinel", Smi::FromInt(-2)); + if (obj->IsFailure()) return false; + set_no_interceptor_result_sentinel(obj); + + // Allocate the empty string. obj = AllocateRawAsciiString(0, TENURED); if (obj->IsFailure()) return false; @@ -1412,13 +1443,15 @@ bool Heap::CreateInitialObjects() { if (obj->IsFailure()) return false; set_prototype_accessors(Proxy::cast(obj)); - // Allocate the code_stubs dictionary. - obj = NumberDictionary::Allocate(4); + // Allocate the code_stubs dictionary. The initial size is set to avoid + // expanding the dictionary during bootstrapping. + obj = NumberDictionary::Allocate(128); if (obj->IsFailure()) return false; set_code_stubs(NumberDictionary::cast(obj)); - // Allocate the non_monomorphic_cache used in stub-cache.cc - obj = NumberDictionary::Allocate(4); + // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size + // is set to avoid expanding the dictionary during bootstrapping. + obj = NumberDictionary::Allocate(64); if (obj->IsFailure()) return false; set_non_monomorphic_cache(NumberDictionary::cast(obj)); @@ -1555,8 +1588,7 @@ Object* Heap::NumberFromDouble(double value, PretenureFlag pretenure) { Object* Heap::AllocateProxy(Address proxy, PretenureFlag pretenure) { // Statically ensure that it is safe to allocate proxies in paged spaces. STATIC_ASSERT(Proxy::kSize <= Page::kMaxHeapObjectSize); - AllocationSpace space = - (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE; + AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE; Object* result = Allocate(proxy_map(), space); if (result->IsFailure()) return result; @@ -1838,6 +1870,23 @@ void Heap::CreateFillerObjectAt(Address addr, int size) { } +Object* Heap::AllocatePixelArray(int length, + uint8_t* external_pointer, + PretenureFlag pretenure) { + AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE; + + Object* result = AllocateRaw(PixelArray::kAlignedSize, space, OLD_DATA_SPACE); + + if (result->IsFailure()) return result; + + reinterpret_cast<PixelArray*>(result)->set_map(pixel_array_map()); + reinterpret_cast<PixelArray*>(result)->set_length(length); + reinterpret_cast<PixelArray*>(result)->set_external_pointer(external_pointer); + + return result; +} + + Object* Heap::CreateCode(const CodeDesc& desc, ZoneScopeInfo* sinfo, Code::Flags flags, @@ -2056,6 +2105,11 @@ Object* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) { // properly initialized. ASSERT(map->instance_type() != JS_FUNCTION_TYPE); + // Both types of globla objects should be allocated using + // AllocateGloblaObject to be properly initialized. + ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE); + ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE); + // Allocate the backing storage for the properties. int prop_size = map->unused_property_fields() - map->inobject_properties(); Object* properties = AllocateFixedArray(prop_size, pretenure); @@ -2096,24 +2150,62 @@ Object* Heap::AllocateJSObject(JSFunction* constructor, Object* Heap::AllocateGlobalObject(JSFunction* constructor) { ASSERT(constructor->has_initial_map()); + Map* map = constructor->initial_map(); + // Make sure no field properties are described in the initial map. // This guarantees us that normalizing the properties does not // require us to change property values to JSGlobalPropertyCells. - ASSERT(constructor->initial_map()->NextFreePropertyIndex() == 0); + ASSERT(map->NextFreePropertyIndex() == 0); // Make sure we don't have a ton of pre-allocated slots in the // global objects. They will be unused once we normalize the object. - ASSERT(constructor->initial_map()->unused_property_fields() == 0); - ASSERT(constructor->initial_map()->inobject_properties() == 0); + ASSERT(map->unused_property_fields() == 0); + ASSERT(map->inobject_properties() == 0); + + // Initial size of the backing store to avoid resize of the storage during + // bootstrapping. The size differs between the JS global object ad the + // builtins object. + int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512; + + // Allocate a dictionary object for backing storage. + Object* obj = + StringDictionary::Allocate( + map->NumberOfDescribedProperties() * 2 + initial_size); + if (obj->IsFailure()) return obj; + StringDictionary* dictionary = StringDictionary::cast(obj); + + // The global object might be created from an object template with accessors. + // Fill these accessors into the dictionary. + DescriptorArray* descs = map->instance_descriptors(); + for (int i = 0; i < descs->number_of_descriptors(); i++) { + PropertyDetails details = descs->GetDetails(i); + ASSERT(details.type() == CALLBACKS); // Only accessors are expected. + PropertyDetails d = + PropertyDetails(details.attributes(), CALLBACKS, details.index()); + Object* value = descs->GetCallbacksObject(i); + value = Heap::AllocateJSGlobalPropertyCell(value); + if (value->IsFailure()) return value; + + Object* result = dictionary->Add(descs->GetKey(i), value, d); + if (result->IsFailure()) return result; + dictionary = StringDictionary::cast(result); + } - // Allocate the object based on the constructors initial map. - Object* result = AllocateJSObjectFromMap(constructor->initial_map(), TENURED); - if (result->IsFailure()) return result; + // Allocate the global object and initialize it with the backing store. + obj = Allocate(map, OLD_POINTER_SPACE); + if (obj->IsFailure()) return obj; + JSObject* global = JSObject::cast(obj); + InitializeJSObjectFromMap(global, dictionary, map); - // Normalize the result. - JSObject* global = JSObject::cast(result); - result = global->NormalizeProperties(CLEAR_INOBJECT_PROPERTIES); - if (result->IsFailure()) return result; + // Create a new map for the global object. + obj = map->CopyDropDescriptors(); + if (obj->IsFailure()) return obj; + Map* new_map = Map::cast(obj); + + // Setup the global object as a normalized object. + global->set_map(new_map); + global->map()->set_instance_descriptors(Heap::empty_descriptor_array()); + global->set_properties(dictionary); // Make sure result is a global object with properties in dictionary. ASSERT(global->IsGlobalObject()); @@ -2967,6 +3059,7 @@ bool Heap::ConfigureHeap(int semispace_size, int old_gen_size) { semispace_size_ = RoundUpToPowerOf2(semispace_size_); initial_semispace_size_ = Min(initial_semispace_size_, semispace_size_); young_generation_size_ = 2 * semispace_size_; + external_allocation_limit_ = 10 * semispace_size_; // The old generation is paged. old_generation_size_ = RoundUp(old_generation_size_, Page::kPageSize); @@ -3369,6 +3462,100 @@ void HeapIterator::reset() { } +#ifdef ENABLE_LOGGING_AND_PROFILING +namespace { + +// JSConstructorProfile is responsible for gathering and logging +// "constructor profile" of JS object allocated on heap. +// It is run during garbage collection cycle, thus it doesn't need +// to use handles. +class JSConstructorProfile BASE_EMBEDDED { + public: + JSConstructorProfile() : zscope_(DELETE_ON_EXIT) {} + void CollectStats(JSObject* obj); + void PrintStats(); + // Used by ZoneSplayTree::ForEach. + void Call(String* name, const NumberAndSizeInfo& number_and_size); + private: + struct TreeConfig { + typedef String* Key; + typedef NumberAndSizeInfo Value; + static const Key kNoKey; + static const Value kNoValue; + // Strings are unique, so it is sufficient to compare their pointers. + static int Compare(const Key& a, const Key& b) { + return a == b ? 0 : (a < b ? -1 : 1); + } + }; + + typedef ZoneSplayTree<TreeConfig> JSObjectsInfoTree; + static int CalculateJSObjectNetworkSize(JSObject* obj); + + ZoneScope zscope_; + JSObjectsInfoTree js_objects_info_tree_; +}; + +const JSConstructorProfile::TreeConfig::Key + JSConstructorProfile::TreeConfig::kNoKey = NULL; +const JSConstructorProfile::TreeConfig::Value + JSConstructorProfile::TreeConfig::kNoValue; + + +int JSConstructorProfile::CalculateJSObjectNetworkSize(JSObject* obj) { + int size = obj->Size(); + // If 'properties' and 'elements' are non-empty (thus, non-shared), + // take their size into account. + if (FixedArray::cast(obj->properties())->length() != 0) { + size += obj->properties()->Size(); + } + if (FixedArray::cast(obj->elements())->length() != 0) { + size += obj->elements()->Size(); + } + return size; +} + + +void JSConstructorProfile::Call(String* name, + const NumberAndSizeInfo& number_and_size) { + SmartPointer<char> s_name; + if (name != NULL) { + s_name = name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); + } + LOG(HeapSampleJSConstructorEvent(*s_name, + number_and_size.number(), + number_and_size.bytes())); +} + + +void JSConstructorProfile::CollectStats(JSObject* obj) { + String* constructor_func = NULL; + if (obj->map()->constructor()->IsJSFunction()) { + JSFunction* constructor = JSFunction::cast(obj->map()->constructor()); + SharedFunctionInfo* sfi = constructor->shared(); + String* name = String::cast(sfi->name()); + constructor_func = name->length() > 0 ? name : sfi->inferred_name(); + } else if (obj->IsJSFunction()) { + constructor_func = Heap::function_class_symbol(); + } + JSObjectsInfoTree::Locator loc; + if (!js_objects_info_tree_.Find(constructor_func, &loc)) { + js_objects_info_tree_.Insert(constructor_func, &loc); + } + NumberAndSizeInfo number_and_size = loc.value(); + number_and_size.increment_number(1); + number_and_size.increment_bytes(CalculateJSObjectNetworkSize(obj)); + loc.set_value(number_and_size); +} + + +void JSConstructorProfile::PrintStats() { + js_objects_info_tree_.ForEach(this); +} + +} // namespace +#endif + + // // HeapProfiler class implementation. // @@ -3385,15 +3572,22 @@ void HeapProfiler::CollectStats(HeapObject* obj, HistogramInfo* info) { #ifdef ENABLE_LOGGING_AND_PROFILING void HeapProfiler::WriteSample() { LOG(HeapSampleBeginEvent("Heap", "allocated")); + LOG(HeapSampleStats( + "Heap", "allocated", Heap::Capacity(), Heap::SizeOfObjects())); HistogramInfo info[LAST_TYPE+1]; #define DEF_TYPE_NAME(name) info[name].set_name(#name); INSTANCE_TYPE_LIST(DEF_TYPE_NAME) #undef DEF_TYPE_NAME + JSConstructorProfile js_cons_profile; HeapIterator iterator; while (iterator.has_next()) { - CollectStats(iterator.next(), info); + HeapObject* obj = iterator.next(); + CollectStats(obj, info); + if (obj->IsJSObject()) { + js_cons_profile.CollectStats(JSObject::cast(obj)); + } } // Lump all the string types together. @@ -3415,6 +3609,8 @@ void HeapProfiler::WriteSample() { } } + js_cons_profile.PrintStats(); + LOG(HeapSampleEndEvent("Heap", "allocated")); } @@ -3620,6 +3816,10 @@ GCTracer::~GCTracer() { CollectorString(), start_size_, SizeOfHeapObjects(), static_cast<int>(OS::TimeCurrentMillis() - start_time_)); + +#if defined(ENABLE_LOGGING_AND_PROFILING) + Heap::PrintShortHeapStatistics(); +#endif } diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h index f3959882df..69d9ff0013 100644 --- a/deps/v8/src/heap.h +++ b/deps/v8/src/heap.h @@ -94,6 +94,7 @@ namespace internal { UndetectableMediumAsciiStringMap) \ V(Map, undetectable_long_ascii_string_map, UndetectableLongAsciiStringMap) \ V(Map, byte_array_map, ByteArrayMap) \ + V(Map, pixel_array_map, PixelArrayMap) \ V(Map, fixed_array_map, FixedArrayMap) \ V(Map, hash_table_map, HashTableMap) \ V(Map, context_map, ContextMap) \ @@ -109,6 +110,7 @@ namespace internal { V(Map, two_pointer_filler_map, TwoPointerFillerMap) \ V(Object, nan_value, NanValue) \ V(Object, undefined_value, UndefinedValue) \ + V(Object, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \ V(Object, minus_zero_value, MinusZeroValue) \ V(Object, null_value, NullValue) \ V(Object, true_value, TrueValue) \ @@ -418,6 +420,14 @@ class Heap : public AllStatic { // Please note this does not perform a garbage collection. static Object* AllocateByteArray(int length); + // Allocate a pixel array of the specified length + // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation + // failed. + // Please note this does not perform a garbage collection. + static Object* AllocatePixelArray(int length, + uint8_t* external_pointer, + PretenureFlag pretenure); + // Allocate a tenured JS global property cell. // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. @@ -733,6 +743,11 @@ class Heap : public AllStatic { static void ZapFromSpace(); #endif +#if defined(ENABLE_LOGGING_AND_PROFILING) + // Print short heap statistics. + static void PrintShortHeapStatistics(); +#endif + // Makes a new symbol object // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. @@ -741,7 +756,7 @@ class Heap : public AllStatic { static Object* CreateSymbol(String* str); // Write barrier support for address[offset] = o. - inline static void RecordWrite(Address address, int offset); + static inline void RecordWrite(Address address, int offset); // Given an address occupied by a live code object, return that object. static Object* FindCodeObject(Address a); @@ -797,22 +812,7 @@ class Heap : public AllStatic { // Adjusts the amount of registered external memory. // Returns the adjusted value. - static int AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) { - int amount = amount_of_external_allocated_memory_ + change_in_bytes; - if (change_in_bytes >= 0) { - // Avoid overflow. - if (amount > amount_of_external_allocated_memory_) { - amount_of_external_allocated_memory_ = amount; - } - } else { - // Avoid underflow. - if (amount >= 0) { - amount_of_external_allocated_memory_ = amount; - } - } - ASSERT(amount_of_external_allocated_memory_ >= 0); - return amount_of_external_allocated_memory_; - } + static inline int AdjustAmountOfExternalAllocatedMemory(int change_in_bytes); // Allocate unitialized fixed array (pretenure == NON_TENURE). static Object* AllocateRawFixedArray(int length); @@ -896,6 +896,10 @@ class Heap : public AllStatic { // every allocation in large object space. static int old_gen_allocation_limit_; + // Limit on the amount of externally allocated memory allowed + // between global GCs. If reached a global GC is forced. + static int external_allocation_limit_; + // The amount of external memory registered through the API kept alive // by global handles static int amount_of_external_allocated_memory_; @@ -1225,7 +1229,7 @@ class KeyedLookupCache { // Clear the cache. static void Clear(); private: - inline static int Hash(Map* map, String* name); + static inline int Hash(Map* map, String* name); static const int kLength = 64; struct Key { Map* map; diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc index 6d1dc2dee0..664827719b 100644 --- a/deps/v8/src/ia32/codegen-ia32.cc +++ b/deps/v8/src/ia32/codegen-ia32.cc @@ -3857,7 +3857,7 @@ Result CodeGenerator::LoadFromGlobalSlotCheckExtensions( s = s->outer_scope(); } - if (s->is_eval_scope()) { + if (s != NULL && s->is_eval_scope()) { // Loop up the context chain. There is no frame effect so it is // safe to use raw labels here. Label next, fast; @@ -4351,7 +4351,7 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) { FieldOperand(elements.reg(), JSObject::kElementsOffset)); // Write to the indexed properties array. - int offset = i * kPointerSize + Array::kHeaderSize; + int offset = i * kPointerSize + FixedArray::kHeaderSize; __ mov(FieldOperand(elements.reg(), offset), prop_value.reg()); // Update the write barrier for the array address. @@ -5388,12 +5388,6 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { } else { Load(node->expression()); switch (op) { - case Token::NOT: - case Token::DELETE: - case Token::TYPEOF: - UNREACHABLE(); // handled above - break; - case Token::SUB: { bool overwrite = (node->AsBinaryOperation() != NULL && @@ -5448,6 +5442,8 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { } default: + // NOT, DELETE, TYPEOF, and VOID are handled outside the + // switch. UNREACHABLE(); } } @@ -6301,15 +6297,15 @@ void Reference::GetValue(TypeofState typeof_state) { __ mov(elements.reg(), FieldOperand(receiver.reg(), JSObject::kElementsOffset)); __ cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset), - Immediate(Factory::hash_table_map())); - deferred->Branch(equal); + Immediate(Factory::fixed_array_map())); + deferred->Branch(not_equal); // Shift the key to get the actual index value and check that // it is within bounds. __ mov(index.reg(), key.reg()); __ sar(index.reg(), kSmiTagSize); __ cmp(index.reg(), - FieldOperand(elements.reg(), Array::kLengthOffset)); + FieldOperand(elements.reg(), FixedArray::kLengthOffset)); deferred->Branch(above_equal); // Load and check that the result is not the hole. We could @@ -6323,7 +6319,7 @@ void Reference::GetValue(TypeofState typeof_state) { __ mov(value.reg(), Operand(elements.reg(), index.reg(), times_4, - Array::kHeaderSize - kHeapObjectTag)); + FixedArray::kHeaderSize - kHeapObjectTag)); elements.Unuse(); index.Unuse(); __ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value())); @@ -6495,7 +6491,7 @@ void Reference::SetValue(InitState init_state) { __ mov(Operand(tmp.reg(), key.reg(), times_2, - Array::kHeaderSize - kHeapObjectTag), + FixedArray::kHeaderSize - kHeapObjectTag), value.reg()); __ IncrementCounter(&Counters::keyed_store_inline, 1); diff --git a/deps/v8/src/ia32/codegen-ia32.h b/deps/v8/src/ia32/codegen-ia32.h index 5cd50b8b8e..4bb006f7ed 100644 --- a/deps/v8/src/ia32/codegen-ia32.h +++ b/deps/v8/src/ia32/codegen-ia32.h @@ -359,7 +359,7 @@ class CodeGenerator: public AstVisitor { #define DEF_VISIT(type) \ void Visit##type(type* node); - NODE_LIST(DEF_VISIT) + AST_NODE_LIST(DEF_VISIT) #undef DEF_VISIT // Visit a statement and then spill the virtual frame if control flow can @@ -558,7 +558,7 @@ class CodeGenerator: public AstVisitor { // information. void CodeForFunctionPosition(FunctionLiteral* fun); void CodeForReturnPosition(FunctionLiteral* fun); - void CodeForStatementPosition(Node* node); + void CodeForStatementPosition(AstNode* node); void CodeForSourcePosition(int pos); #ifdef DEBUG diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc index 90e0fd1b40..08ffe2f141 100644 --- a/deps/v8/src/ia32/ic-ia32.cc +++ b/deps/v8/src/ia32/ic-ia32.cc @@ -43,6 +43,10 @@ namespace internal { // Helper function used to load a property from a dictionary backing storage. +// This function may return false negatives, so miss_label +// must always call a backup property load that is complete. +// This function is safe to call if the receiver has fast properties, +// or if name is not a symbol, and will jump to the miss_label in that case. static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label, Register r0, Register r1, Register r2, Register name) { @@ -56,7 +60,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label, // // r2 - used to hold the capacity of the property dictionary. // - // name - holds the name of the property and is unchanges. + // name - holds the name of the property and is unchanged. Label done; @@ -89,7 +93,8 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label, // Compute the capacity mask. const int kCapacityOffset = - Array::kHeaderSize + StringDictionary::kCapacityIndex * kPointerSize; + StringDictionary::kHeaderSize + + StringDictionary::kCapacityIndex * kPointerSize; __ mov(r2, FieldOperand(r0, kCapacityOffset)); __ shr(r2, kSmiTagSize); // convert smi to int __ dec(r2); @@ -99,7 +104,8 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label, // cover ~93% of loads from dictionaries. static const int kProbes = 4; const int kElementsStartOffset = - Array::kHeaderSize + StringDictionary::kElementsStartIndex * kPointerSize; + StringDictionary::kHeaderSize + + StringDictionary::kElementsStartIndex * kPointerSize; for (int i = 0; i < kProbes; i++) { // Compute the masked index: (hash + i + i * i) & mask. __ mov(r1, FieldOperand(name, String::kLengthOffset)); @@ -153,6 +159,9 @@ static void GenerateCheckNonObjectOrLoaded(MacroAssembler* masm, Label* miss, } +// The offset from the inlined patch site to the start of the +// inlined load instruction. It is 7 bytes (test eax, imm) plus +// 6 bytes (jne slow_label). const int LoadIC::kOffsetToLoadInstruction = 13; @@ -225,11 +234,11 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { // -- esp[4] : name // -- esp[8] : receiver // ----------------------------------- - Label slow, fast, check_string, index_int, index_string; + Label slow, check_string, index_int, index_string, check_pixel_array; // Load name and receiver. - __ mov(eax, (Operand(esp, kPointerSize))); - __ mov(ecx, (Operand(esp, 2 * kPointerSize))); + __ mov(eax, Operand(esp, kPointerSize)); + __ mov(ecx, Operand(esp, 2 * kPointerSize)); // Check that the object isn't a smi. __ test(ecx, Immediate(kSmiTagMask)); @@ -260,24 +269,56 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset)); // Check that the object is in fast mode (not dictionary). __ cmp(FieldOperand(ecx, HeapObject::kMapOffset), - Immediate(Factory::hash_table_map())); - __ j(equal, &slow, not_taken); + Immediate(Factory::fixed_array_map())); + __ j(not_equal, &check_pixel_array); // Check that the key (index) is within bounds. - __ cmp(eax, FieldOperand(ecx, Array::kLengthOffset)); - __ j(below, &fast, taken); + __ cmp(eax, FieldOperand(ecx, FixedArray::kLengthOffset)); + __ j(above_equal, &slow); + // Fast case: Do the load. + __ mov(eax, + Operand(ecx, eax, times_4, FixedArray::kHeaderSize - kHeapObjectTag)); + __ cmp(Operand(eax), Immediate(Factory::the_hole_value())); + // In case the loaded value is the_hole we have to consult GetProperty + // to ensure the prototype chain is searched. + __ j(equal, &slow); + __ IncrementCounter(&Counters::keyed_load_generic_smi, 1); + __ ret(0); + + // Check whether the elements is a pixel array. + // eax: untagged index + // ecx: elements array + __ bind(&check_pixel_array); + __ cmp(FieldOperand(ecx, HeapObject::kMapOffset), + Immediate(Factory::pixel_array_map())); + __ j(not_equal, &slow); + __ cmp(eax, FieldOperand(ecx, PixelArray::kLengthOffset)); + __ j(above_equal, &slow); + __ mov(ecx, FieldOperand(ecx, PixelArray::kExternalPointerOffset)); + __ movzx_b(eax, Operand(ecx, eax, times_1, 0)); + __ shl(eax, kSmiTagSize); + __ ret(0); + + // Slow case: Load name and receiver from stack and jump to runtime. __ bind(&slow); __ IncrementCounter(&Counters::keyed_load_generic_slow, 1); KeyedLoadIC::Generate(masm, ExternalReference(Runtime::kKeyedGetProperty)); - // Check if the key is a symbol that is not an array index. + __ bind(&check_string); + // The key is not a smi. + // Is it a string? + __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edx); + __ j(above_equal, &slow); + // Is the string an array index, with cached numeric value? __ mov(ebx, FieldOperand(eax, String::kLengthOffset)); __ test(ebx, Immediate(String::kIsArrayIndexMask)); __ j(not_zero, &index_string, not_taken); - __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); - __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset)); + + // If the string is a symbol, do a quick inline probe of the receiver's + // dictionary, if it exists. + __ movzx_b(ebx, FieldOperand(edx, Map::kInstanceTypeOffset)); __ test(ebx, Immediate(kIsSymbolMask)); - __ j(not_zero, &slow, not_taken); + __ j(zero, &slow, not_taken); // Probe the dictionary leaving result in ecx. GenerateDictionaryLoad(masm, &slow, ebx, ecx, edx, eax); GenerateCheckNonObjectOrLoaded(masm, &slow, ecx, edx); @@ -299,15 +340,6 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ and_(eax, (1 << String::kShortLengthShift) - 1); __ shr(eax, String::kLongLengthShift); __ jmp(&index_int); - // Fast case: Do the load. - __ bind(&fast); - __ mov(eax, Operand(ecx, eax, times_4, Array::kHeaderSize - kHeapObjectTag)); - __ cmp(Operand(eax), Immediate(Factory::the_hole_value())); - // In case the loaded value is the_hole we have to consult GetProperty - // to ensure the prototype chain is searched. - __ j(equal, &slow, not_taken); - __ IncrementCounter(&Counters::keyed_load_generic_smi, 1); - __ ret(0); } @@ -318,7 +350,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { // -- esp[4] : key // -- esp[8] : receiver // ----------------------------------- - Label slow, fast, array, extra; + Label slow, fast, array, extra, check_pixel_array; // Get the receiver from the stack. __ mov(edx, Operand(esp, 2 * kPointerSize)); // 2 ~ return address, key @@ -353,8 +385,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset)); // Check that the object is in fast mode (not dictionary). __ cmp(FieldOperand(ecx, HeapObject::kMapOffset), - Immediate(Factory::hash_table_map())); - __ j(equal, &slow, not_taken); + Immediate(Factory::fixed_array_map())); + __ j(not_equal, &check_pixel_array, not_taken); // Untag the key (for checking against untagged length in the fixed array). __ mov(edx, Operand(ebx)); __ sar(edx, kSmiTagSize); // untag the index and use it for the comparison @@ -364,7 +396,6 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { // ebx: index (as a smi) __ j(below, &fast, taken); - // Slow case: Push extra copies of the arguments (3). __ bind(&slow); __ pop(ecx); @@ -375,6 +406,37 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { // Do tail-call to runtime routine. __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3); + // Check whether the elements is a pixel array. + // eax: value + // ecx: elements array + // ebx: index (as a smi) + __ bind(&check_pixel_array); + __ cmp(FieldOperand(ecx, HeapObject::kMapOffset), + Immediate(Factory::pixel_array_map())); + __ j(not_equal, &slow); + // Check that the value is a smi. If a conversion is needed call into the + // runtime to convert and clamp. + __ test(eax, Immediate(kSmiTagMask)); + __ j(not_zero, &slow); + __ sar(ebx, kSmiTagSize); // Untag the index. + __ cmp(ebx, FieldOperand(ecx, PixelArray::kLengthOffset)); + __ j(above_equal, &slow); + __ sar(eax, kSmiTagSize); // Untag the value. + { // Clamp the value to [0..255]. + Label done, check_255; + __ cmp(eax, 0); + __ j(greater_equal, &check_255); + __ mov(eax, Immediate(0)); + __ jmp(&done); + __ bind(&check_255); + __ cmp(eax, 255); + __ j(less_equal, &done); + __ mov(eax, Immediate(255)); + __ bind(&done); + } + __ mov(ecx, FieldOperand(ecx, PixelArray::kExternalPointerOffset)); + __ mov_b(Operand(ecx, ebx, times_1, 0), eax); + __ ret(0); // Extra capacity case: Check if there is extra capacity to // perform the store and update the length. Used for adding one @@ -405,21 +467,21 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { // ebx: index (as a smi) __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset)); __ cmp(FieldOperand(ecx, HeapObject::kMapOffset), - Immediate(Factory::hash_table_map())); - __ j(equal, &slow, not_taken); + Immediate(Factory::fixed_array_map())); + __ j(not_equal, &check_pixel_array); // Check the key against the length in the array, compute the // address to store into and fall through to fast case. __ cmp(ebx, FieldOperand(edx, JSArray::kLengthOffset)); __ j(above_equal, &extra, not_taken); - // Fast case: Do the store. __ bind(&fast); // eax: value // ecx: FixedArray // ebx: index (as a smi) - __ mov(Operand(ecx, ebx, times_2, Array::kHeaderSize - kHeapObjectTag), eax); + __ mov(Operand(ecx, ebx, times_2, FixedArray::kHeaderSize - kHeapObjectTag), + eax); // Update write barrier for the elements array address. __ mov(edx, Operand(eax)); __ RecordWrite(ecx, 0, edx, ebx); @@ -731,12 +793,10 @@ void LoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) { // ----------------------------------- __ mov(eax, Operand(esp, kPointerSize)); - - // Move the return address below the arguments. __ pop(ebx); - __ push(eax); - __ push(ecx); - __ push(ebx); + __ push(eax); // receiver + __ push(ecx); // name + __ push(ebx); // return address // Perform tail call to the entry. __ TailCallRuntime(f, 2); @@ -779,7 +839,8 @@ void KeyedStoreIC::RestoreInlinedVersion(Address address) { bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) { // The address of the instruction following the call. - Address test_instruction_address = address + 4; + Address test_instruction_address = + address + Assembler::kTargetAddrToReturnAddrDist; // If the instruction following the call is not a test eax, nothing // was inlined. if (*test_instruction_address != kTestEaxByte) return false; @@ -805,7 +866,8 @@ bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) { static bool PatchInlinedMapCheck(Address address, Object* map) { - Address test_instruction_address = address + 4; // 4 = stub address + Address test_instruction_address = + address + Assembler::kTargetAddrToReturnAddrDist; // The keyed load has a fast inlined case if the IC call instruction // is immediately followed by a test instruction. if (*test_instruction_address != kTestEaxByte) return false; @@ -859,12 +921,10 @@ void KeyedLoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) { __ mov(eax, Operand(esp, kPointerSize)); __ mov(ecx, Operand(esp, 2 * kPointerSize)); - - // Move the return address below the arguments. __ pop(ebx); - __ push(ecx); - __ push(eax); - __ push(ebx); + __ push(ecx); // receiver + __ push(eax); // name + __ push(ebx); // return address // Perform tail call to the entry. __ TailCallRuntime(f, 2); @@ -899,12 +959,12 @@ void StoreIC::GenerateExtendStorage(MacroAssembler* masm) { // -- esp[4] : receiver // ----------------------------------- - // Move the return address below the arguments. __ pop(ebx); - __ push(Operand(esp, 0)); - __ push(ecx); - __ push(eax); - __ push(ebx); + __ push(Operand(esp, 0)); // receiver + __ push(ecx); // transition map + __ push(eax); // value + __ push(ebx); // return address + // Perform tail call to the entry. __ TailCallRuntime( ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3); diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc index 479b8ca014..fae15251ec 100644 --- a/deps/v8/src/ia32/macro-assembler-ia32.cc +++ b/deps/v8/src/ia32/macro-assembler-ia32.cc @@ -79,7 +79,7 @@ static void RecordWriteHelper(MacroAssembler* masm, // Add the page header, array header, and array body size to the page // address. masm->add(Operand(object), Immediate(Page::kObjectStartOffset - + Array::kHeaderSize)); + + FixedArray::kHeaderSize)); masm->add(object, Operand(scratch)); @@ -199,9 +199,10 @@ void MacroAssembler::RecordWrite(Register object, int offset, lea(dst, Operand(object, offset)); } else { // array access: calculate the destination address in the same manner as - // KeyedStoreIC::GenerateGeneric - lea(dst, - Operand(object, dst, times_2, Array::kHeaderSize - kHeapObjectTag)); + // KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset + // into an array of words. + lea(dst, Operand(object, dst, times_2, + FixedArray::kHeaderSize - kHeapObjectTag)); } // If we are already generating a shared stub, not inlining the // record write code isn't going to save us any memory. diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc index 04a5390a2d..2129fd16b6 100644 --- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc +++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc @@ -634,11 +634,9 @@ Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) { __ push(Immediate(0)); // Make room for "input start - 1" constant. // Check if we have space on the stack for registers. - Label retry_stack_check; Label stack_limit_hit; Label stack_ok; - __ bind(&retry_stack_check); ExternalReference stack_guard_limit = ExternalReference::address_of_stack_guard_limit(); __ mov(ecx, esp); @@ -658,10 +656,7 @@ Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) { CallCheckStackGuardState(ebx); __ or_(eax, Operand(eax)); // If returned value is non-zero, we exit with the returned value as result. - // Otherwise it was a preemption and we just check the limit again. - __ j(equal, &retry_stack_check); - // Return value was non-zero. Exit with exception or retry. - __ jmp(&exit_label_); + __ j(not_zero, &exit_label_); __ bind(&stack_ok); @@ -762,19 +757,11 @@ Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) { __ push(backtrack_stackpointer()); __ push(edi); - Label retry; - - __ bind(&retry); CallCheckStackGuardState(ebx); __ or_(eax, Operand(eax)); // If returning non-zero, we should end execution with the given // result as return value. __ j(not_zero, &exit_label_); - // Check if we are still preempted. - ExternalReference stack_guard_limit = - ExternalReference::address_of_stack_guard_limit(); - __ cmp(esp, Operand::StaticVariable(stack_guard_limit)); - __ j(below_equal, &retry); __ pop(edi); __ pop(backtrack_stackpointer()); @@ -1073,10 +1060,12 @@ int RegExpMacroAssemblerIA32::CaseInsensitiveCompareUC16(Address byte_offset1, unibrow::uchar c1 = substring1[i]; unibrow::uchar c2 = substring2[i]; if (c1 != c2) { - canonicalize.get(c1, '\0', &c1); - if (c1 != c2) { - canonicalize.get(c2, '\0', &c2); - if (c1 != c2) { + unibrow::uchar s1[1] = { c1 }; + canonicalize.get(c1, '\0', s1); + if (s1[0] != c2) { + unibrow::uchar s2[1] = { c2 }; + canonicalize.get(c2, '\0', s2); + if (s1[0] != s2[0]) { return 0; } } diff --git a/deps/v8/src/ia32/register-allocator-ia32-inl.h b/deps/v8/src/ia32/register-allocator-ia32-inl.h index ddee472d2d..99ae6ebcb7 100644 --- a/deps/v8/src/ia32/register-allocator-ia32-inl.h +++ b/deps/v8/src/ia32/register-allocator-ia32-inl.h @@ -49,7 +49,7 @@ bool RegisterAllocator::IsReserved(Register reg) { int RegisterAllocator::ToNumber(Register reg) { ASSERT(reg.is_valid() && !IsReserved(reg)); - static int numbers[] = { + const int kNumbers[] = { 0, // eax 2, // ecx 3, // edx @@ -59,14 +59,14 @@ int RegisterAllocator::ToNumber(Register reg) { -1, // esi 4 // edi }; - return numbers[reg.code()]; + return kNumbers[reg.code()]; } Register RegisterAllocator::ToRegister(int num) { ASSERT(num >= 0 && num < kNumRegisters); - static Register registers[] = { eax, ebx, ecx, edx, edi }; - return registers[num]; + const Register kRegisters[] = { eax, ebx, ecx, edx, edi }; + return kRegisters[num]; } diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc index 0a887d5faf..a626377e4f 100644 --- a/deps/v8/src/ia32/stub-cache-ia32.cc +++ b/deps/v8/src/ia32/stub-cache-ia32.cc @@ -152,6 +152,22 @@ void StubCache::GenerateProbe(MacroAssembler* masm, } +template <typename Pushable> +static void PushInterceptorArguments(MacroAssembler* masm, + Register receiver, + Register holder, + Pushable name, + JSObject* holder_obj) { + __ push(receiver); + __ push(holder); + __ push(name); + InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor(); + __ mov(receiver, Immediate(Handle<Object>(interceptor))); + __ push(receiver); + __ push(FieldOperand(receiver, InterceptorInfo::kDataOffset)); +} + + void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm, int index, Register prototype) { @@ -266,13 +282,327 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm, __ mov(dst, FieldOperand(src, offset)); } else { // Calculate the offset into the properties array. - int offset = index * kPointerSize + Array::kHeaderSize; + int offset = index * kPointerSize + FixedArray::kHeaderSize; __ mov(dst, FieldOperand(src, JSObject::kPropertiesOffset)); __ mov(dst, FieldOperand(dst, offset)); } } +template <class Pushable> +static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm, + Register receiver, + Register holder, + Pushable name, + JSObject* holder_obj) { + PushInterceptorArguments(masm, receiver, holder, name, holder_obj); + + ExternalReference ref = + ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly)); + __ mov(eax, Immediate(5)); + __ mov(ebx, Immediate(ref)); + + CEntryStub stub; + __ CallStub(&stub); +} + + +template <class Compiler> +static void CompileLoadInterceptor(Compiler* compiler, + StubCompiler* stub_compiler, + MacroAssembler* masm, + JSObject* object, + JSObject* holder, + String* name, + LookupResult* lookup, + Register receiver, + Register scratch1, + Register scratch2, + Label* miss) { + ASSERT(holder->HasNamedInterceptor()); + ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined()); + + // Check that the receiver isn't a smi. + __ test(receiver, Immediate(kSmiTagMask)); + __ j(zero, miss, not_taken); + + // Check that the maps haven't changed. + Register reg = + stub_compiler->CheckPrototypes(object, receiver, holder, + scratch1, scratch2, name, miss); + + if (lookup->IsValid() && lookup->IsCacheable()) { + compiler->CompileCacheable(masm, + stub_compiler, + receiver, + reg, + scratch1, + scratch2, + holder, + lookup, + name, + miss); + } else { + compiler->CompileRegular(masm, + receiver, + reg, + scratch2, + holder, + miss); + } +} + + +static void LookupPostInterceptor(JSObject* holder, + String* name, + LookupResult* lookup) { + holder->LocalLookupRealNamedProperty(name, lookup); + if (lookup->IsNotFound()) { + Object* proto = holder->GetPrototype(); + if (proto != Heap::null_value()) { + proto->Lookup(name, lookup); + } + } +} + + +class LoadInterceptorCompiler BASE_EMBEDDED { + public: + explicit LoadInterceptorCompiler(Register name) : name_(name) {} + + void CompileCacheable(MacroAssembler* masm, + StubCompiler* stub_compiler, + Register receiver, + Register holder, + Register scratch1, + Register scratch2, + JSObject* holder_obj, + LookupResult* lookup, + String* name, + Label* miss_label) { + AccessorInfo* callback = 0; + bool optimize = false; + // So far the most popular follow ups for interceptor loads are FIELD + // and CALLBACKS, so inline only them, other cases may be added + // later. + if (lookup->type() == FIELD) { + optimize = true; + } else if (lookup->type() == CALLBACKS) { + Object* callback_object = lookup->GetCallbackObject(); + if (callback_object->IsAccessorInfo()) { + callback = AccessorInfo::cast(callback_object); + optimize = callback->getter() != NULL; + } + } + + if (!optimize) { + CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label); + return; + } + + // Note: starting a frame here makes GC aware of pointers pushed below. + __ EnterInternalFrame(); + + if (lookup->type() == CALLBACKS) { + __ push(receiver); + } + __ push(holder); + __ push(name_); + + CompileCallLoadPropertyWithInterceptor(masm, + receiver, + holder, + name_, + holder_obj); + + Label interceptor_failed; + __ cmp(eax, Factory::no_interceptor_result_sentinel()); + __ j(equal, &interceptor_failed); + __ LeaveInternalFrame(); + __ ret(0); + + __ bind(&interceptor_failed); + __ pop(name_); + __ pop(holder); + if (lookup->type() == CALLBACKS) { + __ pop(receiver); + } + + __ LeaveInternalFrame(); + + if (lookup->type() == FIELD) { + holder = stub_compiler->CheckPrototypes(holder_obj, holder, + lookup->holder(), scratch1, + scratch2, + name, + miss_label); + stub_compiler->GenerateFastPropertyLoad(masm, eax, + holder, lookup->holder(), + lookup->GetFieldIndex()); + __ ret(0); + } else { + ASSERT(lookup->type() == CALLBACKS); + ASSERT(lookup->GetCallbackObject()->IsAccessorInfo()); + ASSERT(callback != NULL); + ASSERT(callback->getter() != NULL); + + Label cleanup; + __ pop(scratch2); + __ push(receiver); + __ push(scratch2); + + holder = stub_compiler->CheckPrototypes(holder_obj, holder, + lookup->holder(), scratch1, + scratch2, + name, + &cleanup); + + __ pop(scratch2); // save old return address + __ push(holder); + __ mov(holder, Immediate(Handle<AccessorInfo>(callback))); + __ push(holder); + __ push(FieldOperand(holder, AccessorInfo::kDataOffset)); + __ push(name_); + __ push(scratch2); // restore old return address + + ExternalReference ref = + ExternalReference(IC_Utility(IC::kLoadCallbackProperty)); + __ TailCallRuntime(ref, 5); + + __ bind(&cleanup); + __ pop(scratch1); + __ pop(scratch2); + __ push(scratch1); + } + } + + + void CompileRegular(MacroAssembler* masm, + Register receiver, + Register holder, + Register scratch, + JSObject* holder_obj, + Label* miss_label) { + __ pop(scratch); // save old return address + PushInterceptorArguments(masm, receiver, holder, name_, holder_obj); + __ push(scratch); // restore old return address + + ExternalReference ref = ExternalReference( + IC_Utility(IC::kLoadPropertyWithInterceptorForLoad)); + __ TailCallRuntime(ref, 5); + } + + private: + Register name_; +}; + + +class CallInterceptorCompiler BASE_EMBEDDED { + public: + explicit CallInterceptorCompiler(const ParameterCount& arguments) + : arguments_(arguments), argc_(arguments.immediate()) {} + + void CompileCacheable(MacroAssembler* masm, + StubCompiler* stub_compiler, + Register receiver, + Register holder, + Register scratch1, + Register scratch2, + JSObject* holder_obj, + LookupResult* lookup, + String* name, + Label* miss_label) { + JSFunction* function = 0; + bool optimize = false; + // So far the most popular case for failed interceptor is + // CONSTANT_FUNCTION sitting below. + if (lookup->type() == CONSTANT_FUNCTION) { + function = lookup->GetConstantFunction(); + // JSArray holder is a special case for call constant function + // (see the corresponding code). + if (function->is_compiled() && !holder_obj->IsJSArray()) { + optimize = true; + } + } + + if (!optimize) { + CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label); + return; + } + + __ EnterInternalFrame(); + __ push(holder); // save the holder + + CompileCallLoadPropertyWithInterceptor( + masm, + receiver, + holder, + // Under EnterInternalFrame this refers to name. + Operand(ebp, (argc_ + 3) * kPointerSize), + holder_obj); + + __ pop(receiver); // restore holder + __ LeaveInternalFrame(); + + __ cmp(eax, Factory::no_interceptor_result_sentinel()); + Label invoke; + __ j(not_equal, &invoke); + + stub_compiler->CheckPrototypes(holder_obj, receiver, + lookup->holder(), scratch1, + scratch2, + name, + miss_label); + if (lookup->holder()->IsGlobalObject()) { + __ mov(edx, Operand(esp, (argc_ + 1) * kPointerSize)); + __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset)); + __ mov(Operand(esp, (argc_ + 1) * kPointerSize), edx); + } + + ASSERT(function->is_compiled()); + // Get the function and setup the context. + __ mov(edi, Immediate(Handle<JSFunction>(function))); + __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); + + // Jump to the cached code (tail call). + ASSERT(function->is_compiled()); + Handle<Code> code(function->code()); + ParameterCount expected(function->shared()->formal_parameter_count()); + __ InvokeCode(code, expected, arguments_, + RelocInfo::CODE_TARGET, JUMP_FUNCTION); + + __ bind(&invoke); + } + + void CompileRegular(MacroAssembler* masm, + Register receiver, + Register holder, + Register scratch, + JSObject* holder_obj, + Label* miss_label) { + __ EnterInternalFrame(); + + PushInterceptorArguments(masm, + receiver, + holder, + Operand(ebp, (argc_ + 3) * kPointerSize), + holder_obj); + + ExternalReference ref = ExternalReference( + IC_Utility(IC::kLoadPropertyWithInterceptorForCall)); + __ mov(eax, Immediate(5)); + __ mov(ebx, Immediate(ref)); + + CEntryStub stub; + __ CallStub(&stub); + + __ LeaveInternalFrame(); + } + + private: + const ParameterCount& arguments_; + int argc_; +}; void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) { @@ -349,7 +679,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, __ RecordWrite(receiver_reg, offset, name_reg, scratch); } else { // Write to the properties array. - int offset = index * kPointerSize + Array::kHeaderSize; + int offset = index * kPointerSize + FixedArray::kHeaderSize; // Get the properties array (optimistically). __ mov(scratch, FieldOperand(receiver_reg, JSObject::kPropertiesOffset)); __ mov(FieldOperand(scratch, offset), eax); @@ -449,15 +779,17 @@ void StubCompiler::GenerateLoadCallback(JSObject* object, // Push the arguments on the JS stack of the caller. __ pop(scratch2); // remove return address __ push(receiver); // receiver - __ push(Immediate(Handle<AccessorInfo>(callback))); // callback data - __ push(name_reg); // name __ push(reg); // holder + __ mov(reg, Immediate(Handle<AccessorInfo>(callback))); // callback data + __ push(reg); + __ push(FieldOperand(reg, AccessorInfo::kDataOffset)); + __ push(name_reg); // name __ push(scratch2); // restore return address // Do tail-call to the runtime system. ExternalReference load_callback_property = ExternalReference(IC_Utility(IC::kLoadCallbackProperty)); - __ TailCallRuntime(load_callback_property, 4); + __ TailCallRuntime(load_callback_property, 5); } @@ -486,36 +818,25 @@ void StubCompiler::GenerateLoadConstant(JSObject* object, void StubCompiler::GenerateLoadInterceptor(JSObject* object, JSObject* holder, - Smi* lookup_hint, + LookupResult* lookup, Register receiver, Register name_reg, Register scratch1, Register scratch2, String* name, Label* miss) { - // Check that the receiver isn't a smi. - __ test(receiver, Immediate(kSmiTagMask)); - __ j(zero, miss, not_taken); - - // Check that the maps haven't changed. - Register reg = - CheckPrototypes(object, receiver, holder, - scratch1, scratch2, name, miss); - - // Push the arguments on the JS stack of the caller. - __ pop(scratch2); // remove return address - __ push(receiver); // receiver - __ push(reg); // holder - __ push(name_reg); // name - // TODO(367): Maybe don't push lookup_hint for LOOKUP_IN_HOLDER and/or - // LOOKUP_IN_PROTOTYPE, but use a special version of lookup method? - __ push(Immediate(lookup_hint)); - __ push(scratch2); // restore return address - - // Do tail-call to the runtime system. - ExternalReference load_ic_property = - ExternalReference(IC_Utility(IC::kLoadInterceptorProperty)); - __ TailCallRuntime(load_ic_property, 4); + LoadInterceptorCompiler compiler(name_reg); + CompileLoadInterceptor(&compiler, + this, + masm(), + object, + holder, + name, + lookup, + receiver, + scratch1, + scratch2, + miss); } @@ -680,13 +1001,13 @@ Object* CallStubCompiler::CompileCallConstant(Object* object, case JSARRAY_HAS_FAST_ELEMENTS_CHECK: CheckPrototypes(JSObject::cast(object), edx, holder, ebx, ecx, name, &miss); - // Make sure object->elements()->map() != Heap::dictionary_array_map() + // Make sure object->HasFastElements(). // Get the elements array of the object. __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset)); // Check that the object is in fast mode (not dictionary). __ cmp(FieldOperand(ebx, HeapObject::kMapOffset), - Immediate(Factory::hash_table_map())); - __ j(equal, &miss, not_taken); + Immediate(Factory::fixed_array_map())); + __ j(not_equal, &miss, not_taken); break; default: @@ -728,47 +1049,32 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* object, // Get the number of arguments. const int argc = arguments().immediate(); + LookupResult lookup; + LookupPostInterceptor(holder, name, &lookup); + // Get the receiver from the stack. __ mov(edx, Operand(esp, (argc + 1) * kPointerSize)); - // Check that the receiver isn't a smi. - __ test(edx, Immediate(kSmiTagMask)); - __ j(zero, &miss, not_taken); - - // Check that maps have not changed and compute the holder register. - Register reg = - CheckPrototypes(JSObject::cast(object), edx, holder, - ebx, ecx, name, &miss); - - // Enter an internal frame. - __ EnterInternalFrame(); - - // Push arguments on the expression stack. - __ push(edx); // receiver - __ push(reg); // holder - __ push(Operand(ebp, (argc + 3) * kPointerSize)); // name - __ push(Immediate(holder->InterceptorPropertyLookupHint(name))); - - // Perform call. - ExternalReference load_interceptor = - ExternalReference(IC_Utility(IC::kLoadInterceptorProperty)); - __ mov(eax, Immediate(4)); - __ mov(ebx, Immediate(load_interceptor)); - - CEntryStub stub; - __ CallStub(&stub); - - // Move result to edi and restore receiver. - __ mov(edi, eax); - __ mov(edx, Operand(ebp, (argc + 2) * kPointerSize)); // receiver - - // Exit frame. - __ LeaveInternalFrame(); + CallInterceptorCompiler compiler(arguments()); + CompileLoadInterceptor(&compiler, + this, + masm(), + JSObject::cast(object), + holder, + name, + &lookup, + edx, + ebx, + ecx, + &miss); + + // Restore receiver. + __ mov(edx, Operand(esp, (argc + 1) * kPointerSize)); // Check that the function really is a function. - __ test(edi, Immediate(kSmiTagMask)); + __ test(eax, Immediate(kSmiTagMask)); __ j(zero, &miss, not_taken); - __ CmpObjectType(edi, JS_FUNCTION_TYPE, ebx); + __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx); __ j(not_equal, &miss, not_taken); // Patch the receiver on the stack with the global proxy if @@ -779,6 +1085,7 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* object, } // Invoke the function. + __ mov(edi, eax); __ InvokeFunction(edi, arguments(), JUMP_FUNCTION); // Handle load cache miss. @@ -800,8 +1107,6 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object, // ----------------------------------- Label miss; - __ IncrementCounter(&Counters::call_global_inline, 1); - // Get the number of arguments. const int argc = arguments().immediate(); @@ -837,6 +1142,7 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object, __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); // Jump to the cached code (tail call). + __ IncrementCounter(&Counters::call_global_inline, 1); ASSERT(function->is_compiled()); Handle<Code> code(function->code()); ParameterCount expected(function->shared()->formal_parameter_count()); @@ -845,7 +1151,6 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object, // Handle call cache miss. __ bind(&miss); - __ DecrementCounter(&Counters::call_global_inline, 1); __ IncrementCounter(&Counters::call_global_inline_miss, 1); Handle<Code> ic = ComputeCallMiss(arguments().immediate()); __ jmp(ic, RelocInfo::CODE_TARGET); @@ -1009,10 +1314,8 @@ Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object, // ----------------------------------- Label miss; - __ IncrementCounter(&Counters::named_store_global_inline, 1); - // Check that the map of the global has not changed. - __ mov(ebx, (Operand(esp, kPointerSize))); + __ mov(ebx, Operand(esp, kPointerSize)); __ cmp(FieldOperand(ebx, HeapObject::kMapOffset), Immediate(Handle<Map>(object->map()))); __ j(not_equal, &miss, not_taken); @@ -1022,11 +1325,11 @@ Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object, __ mov(FieldOperand(ecx, JSGlobalPropertyCell::kValueOffset), eax); // Return the value (register eax). + __ IncrementCounter(&Counters::named_store_global_inline, 1); __ ret(0); // Handle store cache miss. __ bind(&miss); - __ DecrementCounter(&Counters::named_store_global_inline, 1); __ IncrementCounter(&Counters::named_store_global_inline_miss, 1); Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss)); __ jmp(ic, RelocInfo::CODE_TARGET); @@ -1091,7 +1394,7 @@ Object* LoadStubCompiler::CompileLoadField(JSObject* object, // ----------------------------------- Label miss; - __ mov(eax, (Operand(esp, kPointerSize))); + __ mov(eax, Operand(esp, kPointerSize)); GenerateLoadField(object, holder, eax, ebx, edx, index, name, &miss); __ bind(&miss); GenerateLoadMiss(masm(), Code::LOAD_IC); @@ -1112,7 +1415,7 @@ Object* LoadStubCompiler::CompileLoadCallback(JSObject* object, // ----------------------------------- Label miss; - __ mov(eax, (Operand(esp, kPointerSize))); + __ mov(eax, Operand(esp, kPointerSize)); GenerateLoadCallback(object, holder, eax, ecx, ebx, edx, callback, name, &miss); __ bind(&miss); @@ -1134,7 +1437,7 @@ Object* LoadStubCompiler::CompileLoadConstant(JSObject* object, // ----------------------------------- Label miss; - __ mov(eax, (Operand(esp, kPointerSize))); + __ mov(eax, Operand(esp, kPointerSize)); GenerateLoadConstant(object, holder, eax, ebx, edx, value, name, &miss); __ bind(&miss); GenerateLoadMiss(masm(), Code::LOAD_IC); @@ -1154,12 +1457,15 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver, // ----------------------------------- Label miss; - __ mov(eax, (Operand(esp, kPointerSize))); + LookupResult lookup; + LookupPostInterceptor(holder, name, &lookup); + + __ mov(eax, Operand(esp, kPointerSize)); // TODO(368): Compile in the whole chain: all the interceptors in // prototypes and ultimate answer. GenerateLoadInterceptor(receiver, holder, - holder->InterceptorPropertyLookupHint(name), + &lookup, eax, ecx, edx, @@ -1187,10 +1493,8 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object, // ----------------------------------- Label miss; - __ IncrementCounter(&Counters::named_load_global_inline, 1); - // Get the receiver from the stack. - __ mov(eax, (Operand(esp, kPointerSize))); + __ mov(eax, Operand(esp, kPointerSize)); // If the object is the holder then we know that it's a global // object which can only happen for contextual loads. In this case, @@ -1216,10 +1520,10 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object, __ Check(not_equal, "DontDelete cells can't contain the hole"); } + __ IncrementCounter(&Counters::named_load_global_inline, 1); __ ret(0); __ bind(&miss); - __ DecrementCounter(&Counters::named_load_global_inline, 1); __ IncrementCounter(&Counters::named_load_global_inline_miss, 1); GenerateLoadMiss(masm(), Code::LOAD_IC); @@ -1239,8 +1543,8 @@ Object* KeyedLoadStubCompiler::CompileLoadField(String* name, // ----------------------------------- Label miss; - __ mov(eax, (Operand(esp, kPointerSize))); - __ mov(ecx, (Operand(esp, 2 * kPointerSize))); + __ mov(eax, Operand(esp, kPointerSize)); + __ mov(ecx, Operand(esp, 2 * kPointerSize)); __ IncrementCounter(&Counters::keyed_load_field, 1); // Check that the name has not changed. @@ -1269,8 +1573,8 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name, // ----------------------------------- Label miss; - __ mov(eax, (Operand(esp, kPointerSize))); - __ mov(ecx, (Operand(esp, 2 * kPointerSize))); + __ mov(eax, Operand(esp, kPointerSize)); + __ mov(ecx, Operand(esp, 2 * kPointerSize)); __ IncrementCounter(&Counters::keyed_load_callback, 1); // Check that the name has not changed. @@ -1299,8 +1603,8 @@ Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name, // ----------------------------------- Label miss; - __ mov(eax, (Operand(esp, kPointerSize))); - __ mov(ecx, (Operand(esp, 2 * kPointerSize))); + __ mov(eax, Operand(esp, kPointerSize)); + __ mov(ecx, Operand(esp, 2 * kPointerSize)); __ IncrementCounter(&Counters::keyed_load_constant_function, 1); // Check that the name has not changed. @@ -1328,17 +1632,19 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver, // ----------------------------------- Label miss; - __ mov(eax, (Operand(esp, kPointerSize))); - __ mov(ecx, (Operand(esp, 2 * kPointerSize))); + __ mov(eax, Operand(esp, kPointerSize)); + __ mov(ecx, Operand(esp, 2 * kPointerSize)); __ IncrementCounter(&Counters::keyed_load_interceptor, 1); // Check that the name has not changed. __ cmp(Operand(eax), Immediate(Handle<String>(name))); __ j(not_equal, &miss, not_taken); + LookupResult lookup; + LookupPostInterceptor(holder, name, &lookup); GenerateLoadInterceptor(receiver, holder, - Smi::FromInt(JSObject::kLookupInHolder), + &lookup, ecx, eax, edx, @@ -1364,8 +1670,8 @@ Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) { // ----------------------------------- Label miss; - __ mov(eax, (Operand(esp, kPointerSize))); - __ mov(ecx, (Operand(esp, 2 * kPointerSize))); + __ mov(eax, Operand(esp, kPointerSize)); + __ mov(ecx, Operand(esp, 2 * kPointerSize)); __ IncrementCounter(&Counters::keyed_load_array_length, 1); // Check that the name has not changed. @@ -1390,8 +1696,8 @@ Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) { // ----------------------------------- Label miss; - __ mov(eax, (Operand(esp, kPointerSize))); - __ mov(ecx, (Operand(esp, 2 * kPointerSize))); + __ mov(eax, Operand(esp, kPointerSize)); + __ mov(ecx, Operand(esp, 2 * kPointerSize)); __ IncrementCounter(&Counters::keyed_load_string_length, 1); // Check that the name has not changed. @@ -1416,8 +1722,8 @@ Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) { // ----------------------------------- Label miss; - __ mov(eax, (Operand(esp, kPointerSize))); - __ mov(ecx, (Operand(esp, 2 * kPointerSize))); + __ mov(eax, Operand(esp, kPointerSize)); + __ mov(ecx, Operand(esp, 2 * kPointerSize)); __ IncrementCounter(&Counters::keyed_load_function_prototype, 1); // Check that the name has not changed. diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc index 7e82295c89..f4d74c9354 100644 --- a/deps/v8/src/ic.cc +++ b/deps/v8/src/ic.cc @@ -273,28 +273,39 @@ static bool HasInterceptorGetter(JSObject* object) { static void LookupForRead(Object* object, String* name, LookupResult* lookup) { - object->Lookup(name, lookup); - if (lookup->IsNotFound() || lookup->type() != INTERCEPTOR) { - return; - } + AssertNoAllocation no_gc; // pointers must stay valid + + // Skip all the objects with named interceptors, but + // without actual getter. + while (true) { + object->Lookup(name, lookup); + // Besides normal conditions (property not found or it's not + // an interceptor), bail out of lookup is not cacheable: we won't + // be able to IC it anyway and regular lookup should work fine. + if (lookup->IsNotFound() || lookup->type() != INTERCEPTOR || + !lookup->IsCacheable()) { + return; + } - JSObject* holder = lookup->holder(); - if (HasInterceptorGetter(holder)) { - return; - } + JSObject* holder = lookup->holder(); + if (HasInterceptorGetter(holder)) { + return; + } - // There is no getter, just skip it and lookup down the proto chain - holder->LocalLookupRealNamedProperty(name, lookup); - if (lookup->IsValid()) { - return; - } + holder->LocalLookupRealNamedProperty(name, lookup); + if (lookup->IsValid()) { + ASSERT(lookup->type() != INTERCEPTOR); + return; + } - Object* proto = holder->GetPrototype(); - if (proto == Heap::null_value()) { - return; - } + Object* proto = holder->GetPrototype(); + if (proto->IsNull()) { + lookup->NotFound(); + return; + } - LookupForRead(proto, name, lookup); + object = proto; + } } @@ -736,7 +747,7 @@ Object* KeyedLoadIC::Load(State state, set_target(Code::cast(code)); #ifdef DEBUG TraceIC("KeyedLoadIC", name, state, target()); -#endif +#endif // DEBUG return Smi::FromInt(string->length()); } @@ -748,7 +759,7 @@ Object* KeyedLoadIC::Load(State state, set_target(Code::cast(code)); #ifdef DEBUG TraceIC("KeyedLoadIC", name, state, target()); -#endif +#endif // DEBUG return JSArray::cast(*object)->length(); } @@ -761,7 +772,7 @@ Object* KeyedLoadIC::Load(State state, set_target(Code::cast(code)); #ifdef DEBUG TraceIC("KeyedLoadIC", name, state, target()); -#endif +#endif // DEBUG return Accessors::FunctionGetPrototype(*object, 0); } } @@ -787,7 +798,6 @@ Object* KeyedLoadIC::Load(State state, } } - // Update the inline cache. if (FLAG_use_ic && lookup.IsLoaded()) { UpdateCaches(&lookup, state, object, name); } @@ -1221,11 +1231,6 @@ void CallIC::GenerateInitialize(MacroAssembler* masm, int argc) { } -void CallIC::GeneratePreMonomorphic(MacroAssembler* masm, int argc) { - Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss))); -} - - void CallIC::GenerateMiss(MacroAssembler* masm, int argc) { Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss))); } diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h index 7d033778de..860b7e60d6 100644 --- a/deps/v8/src/ic.h +++ b/deps/v8/src/ic.h @@ -35,21 +35,24 @@ namespace internal { // IC_UTIL_LIST defines all utility functions called from generated // inline caching code. The argument for the macro, ICU, is the function name. -#define IC_UTIL_LIST(ICU) \ - ICU(LoadIC_Miss) \ - ICU(KeyedLoadIC_Miss) \ - ICU(CallIC_Miss) \ - ICU(StoreIC_Miss) \ - ICU(SharedStoreIC_ExtendStorage) \ - ICU(KeyedStoreIC_Miss) \ - /* Utilities for IC stubs. */ \ - ICU(LoadCallbackProperty) \ - ICU(StoreCallbackProperty) \ - ICU(LoadInterceptorProperty) \ +#define IC_UTIL_LIST(ICU) \ + ICU(LoadIC_Miss) \ + ICU(KeyedLoadIC_Miss) \ + ICU(CallIC_Miss) \ + ICU(StoreIC_Miss) \ + ICU(SharedStoreIC_ExtendStorage) \ + ICU(KeyedStoreIC_Miss) \ + /* Utilities for IC stubs. */ \ + ICU(LoadCallbackProperty) \ + ICU(StoreCallbackProperty) \ + ICU(LoadPropertyWithInterceptorOnly) \ + ICU(LoadPropertyWithInterceptorForLoad) \ + ICU(LoadPropertyWithInterceptorForCall) \ ICU(StoreInterceptorProperty) // -// IC is the base class for LoadIC, StoreIC and CallIC. +// IC is the base class for LoadIC, StoreIC, CallIC, KeyedLoadIC, +// and KeyedStoreIC. // class IC { public: @@ -173,7 +176,6 @@ class CallIC: public IC { // Code generator routines. static void GenerateInitialize(MacroAssembler* masm, int argc); - static void GeneratePreMonomorphic(MacroAssembler* masm, int argc); static void GenerateMiss(MacroAssembler* masm, int argc); static void GenerateMegamorphic(MacroAssembler* masm, int argc); static void GenerateNormal(MacroAssembler* masm, int argc); @@ -219,8 +221,8 @@ class LoadIC: public IC { static void GenerateFunctionPrototype(MacroAssembler* masm); // The offset from the inlined patch site to the start of the - // inlined load instruction. It is 7 bytes (test eax, imm) plus - // 6 bytes (jne slow_label). + // inlined load instruction. It is architecture-dependent, and not + // used on ARM. static const int kOffsetToLoadInstruction; private: @@ -387,6 +389,10 @@ class KeyedStoreIC: public IC { // Support for patching the map that is checked in an inlined // version of keyed store. + // The address is the patch point for the IC call + // (Assembler::kTargetAddrToReturnAddrDist before the end of + // the call/return address). + // The map is the new map that the inlined code should check against. static bool PatchInlinedStore(Address address, Object* map); friend class IC; diff --git a/deps/v8/src/interpreter-irregexp.cc b/deps/v8/src/interpreter-irregexp.cc index 0a8ae8ccea..ae914d39df 100644 --- a/deps/v8/src/interpreter-irregexp.cc +++ b/deps/v8/src/interpreter-irregexp.cc @@ -51,9 +51,11 @@ static bool BackRefMatchesNoCase(int from, unibrow::uchar old_char = subject[from++]; unibrow::uchar new_char = subject[current++]; if (old_char == new_char) continue; - interp_canonicalize.get(old_char, '\0', &old_char); - interp_canonicalize.get(new_char, '\0', &new_char); - if (old_char != new_char) { + unibrow::uchar old_string[1] = { old_char }; + unibrow::uchar new_string[1] = { new_char }; + interp_canonicalize.get(old_char, '\0', old_string); + interp_canonicalize.get(new_char, '\0', new_string); + if (old_string[0] != new_string[0]) { return false; } } diff --git a/deps/v8/src/jsregexp-inl.h b/deps/v8/src/jsregexp-inl.h deleted file mode 100644 index cc90bd172b..0000000000 --- a/deps/v8/src/jsregexp-inl.h +++ /dev/null @@ -1,260 +0,0 @@ -// Copyright 2008 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef V8_JSREGEXP_INL_H_ -#define V8_JSREGEXP_INL_H_ - - -#include "jsregexp.h" -#include "regexp-macro-assembler.h" - - -namespace v8 { -namespace internal { - - -template <typename C> -bool ZoneSplayTree<C>::Insert(const Key& key, Locator* locator) { - if (is_empty()) { - // If the tree is empty, insert the new node. - root_ = new Node(key, C::kNoValue); - } else { - // Splay on the key to move the last node on the search path - // for the key to the root of the tree. - Splay(key); - // Ignore repeated insertions with the same key. - int cmp = C::Compare(key, root_->key_); - if (cmp == 0) { - locator->bind(root_); - return false; - } - // Insert the new node. - Node* node = new Node(key, C::kNoValue); - if (cmp > 0) { - node->left_ = root_; - node->right_ = root_->right_; - root_->right_ = NULL; - } else { - node->right_ = root_; - node->left_ = root_->left_; - root_->left_ = NULL; - } - root_ = node; - } - locator->bind(root_); - return true; -} - - -template <typename C> -bool ZoneSplayTree<C>::Find(const Key& key, Locator* locator) { - if (is_empty()) - return false; - Splay(key); - if (C::Compare(key, root_->key_) == 0) { - locator->bind(root_); - return true; - } else { - return false; - } -} - - -template <typename C> -bool ZoneSplayTree<C>::FindGreatestLessThan(const Key& key, - Locator* locator) { - if (is_empty()) - return false; - // Splay on the key to move the node with the given key or the last - // node on the search path to the top of the tree. - Splay(key); - // Now the result is either the root node or the greatest node in - // the left subtree. - int cmp = C::Compare(root_->key_, key); - if (cmp <= 0) { - locator->bind(root_); - return true; - } else { - Node* temp = root_; - root_ = root_->left_; - bool result = FindGreatest(locator); - root_ = temp; - return result; - } -} - - -template <typename C> -bool ZoneSplayTree<C>::FindLeastGreaterThan(const Key& key, - Locator* locator) { - if (is_empty()) - return false; - // Splay on the key to move the node with the given key or the last - // node on the search path to the top of the tree. - Splay(key); - // Now the result is either the root node or the least node in - // the right subtree. - int cmp = C::Compare(root_->key_, key); - if (cmp >= 0) { - locator->bind(root_); - return true; - } else { - Node* temp = root_; - root_ = root_->right_; - bool result = FindLeast(locator); - root_ = temp; - return result; - } -} - - -template <typename C> -bool ZoneSplayTree<C>::FindGreatest(Locator* locator) { - if (is_empty()) - return false; - Node* current = root_; - while (current->right_ != NULL) - current = current->right_; - locator->bind(current); - return true; -} - - -template <typename C> -bool ZoneSplayTree<C>::FindLeast(Locator* locator) { - if (is_empty()) - return false; - Node* current = root_; - while (current->left_ != NULL) - current = current->left_; - locator->bind(current); - return true; -} - - -template <typename C> -bool ZoneSplayTree<C>::Remove(const Key& key) { - // Bail if the tree is empty - if (is_empty()) - return false; - // Splay on the key to move the node with the given key to the top. - Splay(key); - // Bail if the key is not in the tree - if (C::Compare(key, root_->key_) != 0) - return false; - if (root_->left_ == NULL) { - // No left child, so the new tree is just the right child. - root_ = root_->right_; - } else { - // Left child exists. - Node* right = root_->right_; - // Make the original left child the new root. - root_ = root_->left_; - // Splay to make sure that the new root has an empty right child. - Splay(key); - // Insert the original right child as the right child of the new - // root. - root_->right_ = right; - } - return true; -} - - -template <typename C> -void ZoneSplayTree<C>::Splay(const Key& key) { - if (is_empty()) - return; - Node dummy_node(C::kNoKey, C::kNoValue); - // Create a dummy node. The use of the dummy node is a bit - // counter-intuitive: The right child of the dummy node will hold - // the L tree of the algorithm. The left child of the dummy node - // will hold the R tree of the algorithm. Using a dummy node, left - // and right will always be nodes and we avoid special cases. - Node* dummy = &dummy_node; - Node* left = dummy; - Node* right = dummy; - Node* current = root_; - while (true) { - int cmp = C::Compare(key, current->key_); - if (cmp < 0) { - if (current->left_ == NULL) - break; - if (C::Compare(key, current->left_->key_) < 0) { - // Rotate right. - Node* temp = current->left_; - current->left_ = temp->right_; - temp->right_ = current; - current = temp; - if (current->left_ == NULL) - break; - } - // Link right. - right->left_ = current; - right = current; - current = current->left_; - } else if (cmp > 0) { - if (current->right_ == NULL) - break; - if (C::Compare(key, current->right_->key_) > 0) { - // Rotate left. - Node* temp = current->right_; - current->right_ = temp->left_; - temp->left_ = current; - current = temp; - if (current->right_ == NULL) - break; - } - // Link left. - left->right_ = current; - left = current; - current = current->right_; - } else { - break; - } - } - // Assemble. - left->right_ = current->left_; - right->left_ = current->right_; - current->left_ = dummy->right_; - current->right_ = dummy->left_; - root_ = current; -} - - -template <typename Node, class Callback> -static void DoForEach(Node* node, Callback* callback) { - if (node == NULL) return; - DoForEach<Node, Callback>(node->left(), callback); - callback->Call(node->key(), node->value()); - DoForEach<Node, Callback>(node->right(), callback); -} - - -}} // namespace v8::internal - - -#endif // V8_JSREGEXP_INL_H_ diff --git a/deps/v8/src/jsregexp.cc b/deps/v8/src/jsregexp.cc index 852d431be0..bd511024bb 100644 --- a/deps/v8/src/jsregexp.cc +++ b/deps/v8/src/jsregexp.cc @@ -31,7 +31,7 @@ #include "compiler.h" #include "execution.h" #include "factory.h" -#include "jsregexp-inl.h" +#include "jsregexp.h" #include "platform.h" #include "runtime.h" #include "top.h" @@ -254,7 +254,7 @@ Handle<Object> RegExpImpl::AtomExec(Handle<JSRegExp> re, { NoHandleAllocation no_handles; - FixedArray* array = last_match_info->elements(); + FixedArray* array = FixedArray::cast(last_match_info->elements()); SetAtomLastCapture(array, *subject, value, value + needle->length()); } return last_match_info; @@ -442,7 +442,7 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp, if (res != RegExpMacroAssemblerIA32::SUCCESS) return Factory::null_value(); - array = Handle<FixedArray>(last_match_info->elements()); + array = Handle<FixedArray>(FixedArray::cast(last_match_info->elements())); ASSERT(array->length() >= number_of_capture_registers + kLastMatchOverhead); // The captures come in (start, end+1) pairs. for (int i = 0; i < number_of_capture_registers; i += 2) { @@ -475,7 +475,7 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp, return Factory::null_value(); } - array = Handle<FixedArray>(last_match_info->elements()); + array = Handle<FixedArray>(FixedArray::cast(last_match_info->elements())); ASSERT(array->length() >= number_of_capture_registers + kLastMatchOverhead); // The captures come in (start, end+1) pairs. for (int i = 0; i < number_of_capture_registers; i += 2) { diff --git a/deps/v8/src/jsregexp.h b/deps/v8/src/jsregexp.h index 0e7965c2a2..3bc30b6a8b 100644 --- a/deps/v8/src/jsregexp.h +++ b/deps/v8/src/jsregexp.h @@ -214,108 +214,6 @@ class CharacterRange { }; -template <typename Node, class Callback> -static void DoForEach(Node* node, Callback* callback); - - -// A zone splay tree. The config type parameter encapsulates the -// different configurations of a concrete splay tree: -// -// typedef Key: the key type -// typedef Value: the value type -// static const kNoKey: the dummy key used when no key is set -// static const kNoValue: the dummy value used to initialize nodes -// int (Compare)(Key& a, Key& b) -> {-1, 0, 1}: comparison function -// -template <typename Config> -class ZoneSplayTree : public ZoneObject { - public: - typedef typename Config::Key Key; - typedef typename Config::Value Value; - - class Locator; - - ZoneSplayTree() : root_(NULL) { } - - // Inserts the given key in this tree with the given value. Returns - // true if a node was inserted, otherwise false. If found the locator - // is enabled and provides access to the mapping for the key. - bool Insert(const Key& key, Locator* locator); - - // Looks up the key in this tree and returns true if it was found, - // otherwise false. If the node is found the locator is enabled and - // provides access to the mapping for the key. - bool Find(const Key& key, Locator* locator); - - // Finds the mapping with the greatest key less than or equal to the - // given key. - bool FindGreatestLessThan(const Key& key, Locator* locator); - - // Find the mapping with the greatest key in this tree. - bool FindGreatest(Locator* locator); - - // Finds the mapping with the least key greater than or equal to the - // given key. - bool FindLeastGreaterThan(const Key& key, Locator* locator); - - // Find the mapping with the least key in this tree. - bool FindLeast(Locator* locator); - - // Remove the node with the given key from the tree. - bool Remove(const Key& key); - - bool is_empty() { return root_ == NULL; } - - // Perform the splay operation for the given key. Moves the node with - // the given key to the top of the tree. If no node has the given - // key, the last node on the search path is moved to the top of the - // tree. - void Splay(const Key& key); - - class Node : public ZoneObject { - public: - Node(const Key& key, const Value& value) - : key_(key), - value_(value), - left_(NULL), - right_(NULL) { } - Key key() { return key_; } - Value value() { return value_; } - Node* left() { return left_; } - Node* right() { return right_; } - private: - friend class ZoneSplayTree; - friend class Locator; - Key key_; - Value value_; - Node* left_; - Node* right_; - }; - - // A locator provides access to a node in the tree without actually - // exposing the node. - class Locator { - public: - explicit Locator(Node* node) : node_(node) { } - Locator() : node_(NULL) { } - const Key& key() { return node_->key_; } - Value& value() { return node_->value_; } - void set_value(const Value& value) { node_->value_ = value; } - inline void bind(Node* node) { node_ = node; } - private: - Node* node_; - }; - - template <class Callback> - void ForEach(Callback* c) { - DoForEach<typename ZoneSplayTree<Config>::Node, Callback>(root_, c); - } - - private: - Node* root_; -}; - - // A set of unsigned integers that behaves especially well on small // integers (< 32). May do zone-allocation. class OutSet: public ZoneObject { diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc index 2ca89dd468..0c1b76d7f4 100644 --- a/deps/v8/src/log.cc +++ b/deps/v8/src/log.cc @@ -843,7 +843,22 @@ void Logger::HeapSampleBeginEvent(const char* space, const char* kind) { #ifdef ENABLE_LOGGING_AND_PROFILING if (!Log::IsEnabled() || !FLAG_log_gc) return; LogMessageBuilder msg; - msg.Append("heap-sample-begin,\"%s\",\"%s\"\n", space, kind); + // Using non-relative system time in order to be able to synchronize with + // external memory profiling events (e.g. DOM memory size). + msg.Append("heap-sample-begin,\"%s\",\"%s\",%.0f\n", + space, kind, OS::TimeCurrentMillis()); + msg.WriteToLogFile(); +#endif +} + + +void Logger::HeapSampleStats(const char* space, const char* kind, + int capacity, int used) { +#ifdef ENABLE_LOGGING_AND_PROFILING + if (!Log::IsEnabled() || !FLAG_log_gc) return; + LogMessageBuilder msg; + msg.Append("heap-sample-stats,\"%s\",\"%s\",%d,%d\n", + space, kind, capacity, used); msg.WriteToLogFile(); #endif } @@ -869,6 +884,21 @@ void Logger::HeapSampleItemEvent(const char* type, int number, int bytes) { } +void Logger::HeapSampleJSConstructorEvent(const char* constructor, + int number, int bytes) { +#ifdef ENABLE_LOGGING_AND_PROFILING + if (!Log::IsEnabled() || !FLAG_log_gc) return; + LogMessageBuilder msg; + msg.Append("heap-js-cons-item,%s,%d,%d\n", + constructor != NULL ? + (constructor[0] != '\0' ? constructor : "(anonymous)") : + "(no_constructor)", + number, bytes); + msg.WriteToLogFile(); +#endif +} + + void Logger::DebugTag(const char* call_site_tag) { #ifdef ENABLE_LOGGING_AND_PROFILING if (!Log::IsEnabled() || !FLAG_log) return; diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h index f68234f1e6..1692e77d21 100644 --- a/deps/v8/src/log.h +++ b/deps/v8/src/log.h @@ -219,6 +219,10 @@ class Logger { static void HeapSampleBeginEvent(const char* space, const char* kind); static void HeapSampleEndEvent(const char* space, const char* kind); static void HeapSampleItemEvent(const char* type, int number, int bytes); + static void HeapSampleJSConstructorEvent(const char* constructor, + int number, int bytes); + static void HeapSampleStats(const char* space, const char* kind, + int capacity, int used); static void SharedLibraryEvent(const char* library_path, uintptr_t start, diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js index 870c969f98..fd505ff67f 100644 --- a/deps/v8/src/messages.js +++ b/deps/v8/src/messages.js @@ -561,20 +561,24 @@ function GetStackTraceLine(recv, fun, pos, isGlobal) { var kAddMessageAccessorsMarker = { }; // Defines accessors for a property that is calculated the first time -// the property is read and then replaces the accessor with the value. -// Also, setting the property causes the accessors to be deleted. +// the property is read. function DefineOneShotAccessor(obj, name, fun) { // Note that the accessors consistently operate on 'obj', not 'this'. // Since the object may occur in someone else's prototype chain we // can't rely on 'this' being the same as 'obj'. + var hasBeenSet = false; + var value; obj.__defineGetter__(name, function () { - var value = fun(obj); - obj[name] = value; + if (hasBeenSet) { + return value; + } + hasBeenSet = true; + value = fun(obj); return value; }); obj.__defineSetter__(name, function (v) { - delete obj[name]; - obj[name] = v; + hasBeenSet = true; + value = v; }); } @@ -833,22 +837,25 @@ function DefineError(f) { } else if (!IS_UNDEFINED(m)) { this.message = ToString(m); } - var stackTraceLimit = $Error.stackTraceLimit; - if (stackTraceLimit) { - // Cap the limit to avoid extremely big traces - if (stackTraceLimit < 0 || stackTraceLimit > 10000) - stackTraceLimit = 10000; - var raw_stack = %CollectStackTrace(f, stackTraceLimit); - DefineOneShotAccessor(this, 'stack', function (obj) { - return FormatRawStackTrace(obj, raw_stack); - }); - } + captureStackTrace(this, f); } else { return new f(m); } }); } +function captureStackTrace(obj, cons_opt) { + var stackTraceLimit = $Error.stackTraceLimit; + if (!stackTraceLimit) return; + if (stackTraceLimit < 0 || stackTraceLimit > 10000) + stackTraceLimit = 10000; + var raw_stack = %CollectStackTrace(cons_opt ? cons_opt : captureStackTrace, + stackTraceLimit); + DefineOneShotAccessor(obj, 'stack', function (obj) { + return FormatRawStackTrace(obj, raw_stack); + }); +}; + $Math.__proto__ = global.Object.prototype; DefineError(function Error() { }); @@ -859,6 +866,8 @@ DefineError(function ReferenceError() { }); DefineError(function EvalError() { }); DefineError(function URIError() { }); +$Error.captureStackTrace = captureStackTrace; + // Setup extra properties of the Error.prototype object. $Error.prototype.message = ''; diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc index d54f74183b..40001f9619 100644 --- a/deps/v8/src/objects-debug.cc +++ b/deps/v8/src/objects-debug.cc @@ -115,6 +115,9 @@ void HeapObject::HeapObjectPrint() { case BYTE_ARRAY_TYPE: ByteArray::cast(this)->ByteArrayPrint(); break; + case PIXEL_ARRAY_TYPE: + PixelArray::cast(this)->PixelArrayPrint(); + break; case FILLER_TYPE: PrintF("filler"); break; @@ -191,6 +194,9 @@ void HeapObject::HeapObjectVerify() { case BYTE_ARRAY_TYPE: ByteArray::cast(this)->ByteArrayVerify(); break; + case PIXEL_ARRAY_TYPE: + PixelArray::cast(this)->PixelArrayVerify(); + break; case CODE_TYPE: Code::cast(this)->CodeVerify(); break; @@ -264,11 +270,21 @@ void ByteArray::ByteArrayPrint() { } +void PixelArray::PixelArrayPrint() { + PrintF("pixel array"); +} + + void ByteArray::ByteArrayVerify() { ASSERT(IsByteArray()); } +void PixelArray::PixelArrayVerify() { + ASSERT(IsPixelArray()); +} + + void JSObject::PrintProperties() { if (HasFastProperties()) { DescriptorArray* descs = map()->instance_descriptors(); @@ -312,15 +328,30 @@ void JSObject::PrintProperties() { void JSObject::PrintElements() { - if (HasFastElements()) { - FixedArray* p = FixedArray::cast(elements()); - for (int i = 0; i < p->length(); i++) { - PrintF(" %d: ", i); - p->get(i)->ShortPrint(); - PrintF("\n"); + switch (GetElementsKind()) { + case FAST_ELEMENTS: { + // Print in array notation for non-sparse arrays. + FixedArray* p = FixedArray::cast(elements()); + for (int i = 0; i < p->length(); i++) { + PrintF(" %d: ", i); + p->get(i)->ShortPrint(); + PrintF("\n"); + } + break; } - } else { - elements()->Print(); + case PIXEL_ELEMENTS: { + PixelArray* p = PixelArray::cast(elements()); + for (int i = 0; i < p->length(); i++) { + PrintF(" %d: %d\n", i, p->get(i)); + } + break; + } + case DICTIONARY_ELEMENTS: + elements()->Print(); + break; + default: + UNREACHABLE(); + break; } } @@ -402,6 +433,7 @@ static const char* TypeToString(InstanceType type) { case LONG_EXTERNAL_STRING_TYPE: return "EXTERNAL_STRING"; case FIXED_ARRAY_TYPE: return "FIXED_ARRAY"; case BYTE_ARRAY_TYPE: return "BYTE_ARRAY"; + case PIXEL_ARRAY_TYPE: return "PIXEL_ARRAY"; case FILLER_TYPE: return "FILLER"; case JS_OBJECT_TYPE: return "JS_OBJECT"; case JS_CONTEXT_EXTENSION_OBJECT_TYPE: return "JS_CONTEXT_EXTENSION_OBJECT"; @@ -666,7 +698,7 @@ void Oddball::OddballVerify() { } else { ASSERT(number->IsSmi()); int value = Smi::cast(number)->value(); - ASSERT(value == 0 || value == 1 || value == -1); + ASSERT(value == 0 || value == 1 || value == -1 || value == -2); } } @@ -958,6 +990,7 @@ void Script::ScriptPrint() { } +#ifdef ENABLE_DEBUGGER_SUPPORT void DebugInfo::DebugInfoVerify() { CHECK(IsDebugInfo()); VerifyPointer(shared()); @@ -997,6 +1030,7 @@ void BreakPointInfo::BreakPointInfoPrint() { PrintF("\n - break_point_objects: "); break_point_objects()->ShortPrint(); } +#endif void JSObject::IncrementSpillStatistics(SpillInformation* info) { @@ -1013,21 +1047,35 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) { dict->Capacity() - dict->NumberOfElements(); } // Indexed properties - if (HasFastElements()) { - info->number_of_objects_with_fast_elements_++; - int holes = 0; - FixedArray* e = FixedArray::cast(elements()); - int len = e->length(); - for (int i = 0; i < len; i++) { - if (e->get(i) == Heap::the_hole_value()) holes++; + switch (GetElementsKind()) { + case FAST_ELEMENTS: { + info->number_of_objects_with_fast_elements_++; + int holes = 0; + FixedArray* e = FixedArray::cast(elements()); + int len = e->length(); + for (int i = 0; i < len; i++) { + if (e->get(i) == Heap::the_hole_value()) holes++; + } + info->number_of_fast_used_elements_ += len - holes; + info->number_of_fast_unused_elements_ += holes; + break; } - info->number_of_fast_used_elements_ += len - holes; - info->number_of_fast_unused_elements_ += holes; - } else { - NumberDictionary* dict = element_dictionary(); - info->number_of_slow_used_elements_ += dict->NumberOfElements(); - info->number_of_slow_unused_elements_ += - dict->Capacity() - dict->NumberOfElements(); + case PIXEL_ELEMENTS: { + info->number_of_objects_with_fast_elements_++; + PixelArray* e = PixelArray::cast(elements()); + info->number_of_fast_used_elements_ += e->length(); + break; + } + case DICTIONARY_ELEMENTS: { + NumberDictionary* dict = element_dictionary(); + info->number_of_slow_used_elements_ += dict->NumberOfElements(); + info->number_of_slow_unused_elements_ += + dict->Capacity() - dict->NumberOfElements(); + break; + } + default: + UNREACHABLE(); + break; } } diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h index 37c9b8b61d..c7f791cab4 100644 --- a/deps/v8/src/objects-inl.h +++ b/deps/v8/src/objects-inl.h @@ -321,6 +321,12 @@ bool Object::IsByteArray() { } +bool Object::IsPixelArray() { + return Object::IsHeapObject() && + HeapObject::cast(this)->map()->instance_type() == PIXEL_ARRAY_TYPE; +} + + bool Object::IsFailure() { return HAS_FAILURE_TAG(this); } @@ -1043,7 +1049,22 @@ void HeapNumber::set_value(double value) { ACCESSORS(JSObject, properties, FixedArray, kPropertiesOffset) -ACCESSORS(JSObject, elements, FixedArray, kElementsOffset) + + +Array* JSObject::elements() { + Object* array = READ_FIELD(this, kElementsOffset); + // In the assert below Dictionary is covered under FixedArray. + ASSERT(array->IsFixedArray() || array->IsPixelArray()); + return reinterpret_cast<Array*>(array); +} + + +void JSObject::set_elements(Array* value, WriteBarrierMode mode) { + // In the assert below Dictionary is covered under FixedArray. + ASSERT(value->IsFixedArray() || value->IsPixelArray()); + WRITE_FIELD(this, kElementsOffset, value); + CONDITIONAL_WRITE_BARRIER(this, kElementsOffset, mode); +} void JSObject::initialize_properties() { @@ -1075,7 +1096,12 @@ void JSGlobalPropertyCell::set_value(Object* val, WriteBarrierMode ignored) { int JSObject::GetHeaderSize() { - switch (map()->instance_type()) { + InstanceType type = map()->instance_type(); + // Check for the most common kind of JavaScript object before + // falling into the generic switch. This speeds up the internal + // field operations considerably on average. + if (type == JS_OBJECT_TYPE) return JSObject::kHeaderSize; + switch (type) { case JS_GLOBAL_PROXY_TYPE: return JSGlobalProxy::kSize; case JS_GLOBAL_OBJECT_TYPE: @@ -1090,7 +1116,6 @@ int JSObject::GetHeaderSize() { return JSValue::kSize; case JS_REGEXP_TYPE: return JSValue::kSize; - case JS_OBJECT_TYPE: case JS_CONTEXT_EXTENSION_OBJECT_TYPE: return JSObject::kHeaderSize; default: @@ -1498,6 +1523,7 @@ CAST_ACCESSOR(JSArray) CAST_ACCESSOR(JSRegExp) CAST_ACCESSOR(Proxy) CAST_ACCESSOR(ByteArray) +CAST_ACCESSOR(PixelArray) CAST_ACCESSOR(Struct) @@ -1856,6 +1882,32 @@ Address ByteArray::GetDataStartAddress() { } +uint8_t* PixelArray::external_pointer() { + intptr_t ptr = READ_INTPTR_FIELD(this, kExternalPointerOffset); + return reinterpret_cast<uint8_t*>(ptr); +} + + +void PixelArray::set_external_pointer(uint8_t* value, WriteBarrierMode mode) { + intptr_t ptr = reinterpret_cast<intptr_t>(value); + WRITE_INTPTR_FIELD(this, kExternalPointerOffset, ptr); +} + + +uint8_t PixelArray::get(int index) { + ASSERT((index >= 0) && (index < this->length())); + uint8_t* ptr = external_pointer(); + return ptr[index]; +} + + +void PixelArray::set(int index, uint8_t value) { + ASSERT((index >= 0) && (index < this->length())); + uint8_t* ptr = external_pointer(); + ptr[index] = value; +} + + int Map::instance_size() { return READ_BYTE_FIELD(this, kInstanceSizeOffset) << kPointerSizeLog2; } @@ -2289,6 +2341,11 @@ bool JSFunction::IsBoilerplate() { } +bool JSFunction::IsBuiltin() { + return context()->global()->IsJSBuiltinsObject(); +} + + bool JSObject::IsLoaded() { return !map()->needs_loading(); } @@ -2519,8 +2576,33 @@ void JSRegExp::SetDataAt(int index, Object* value) { } +JSObject::ElementsKind JSObject::GetElementsKind() { + Array* array = elements(); + if (array->IsFixedArray()) { + // FAST_ELEMENTS or DICTIONARY_ELEMENTS are both stored in a FixedArray. + if (array->map() == Heap::fixed_array_map()) { + return FAST_ELEMENTS; + } + ASSERT(array->IsDictionary()); + return DICTIONARY_ELEMENTS; + } + ASSERT(array->IsPixelArray()); + return PIXEL_ELEMENTS; +} + + bool JSObject::HasFastElements() { - return !elements()->IsDictionary(); + return GetElementsKind() == FAST_ELEMENTS; +} + + +bool JSObject::HasDictionaryElements() { + return GetElementsKind() == DICTIONARY_ELEMENTS; +} + + +bool JSObject::HasPixelElements() { + return GetElementsKind() == PIXEL_ELEMENTS; } @@ -2541,7 +2623,7 @@ StringDictionary* JSObject::property_dictionary() { NumberDictionary* JSObject::element_dictionary() { - ASSERT(!HasFastElements()); + ASSERT(HasDictionaryElements()); return NumberDictionary::cast(elements()); } @@ -2647,24 +2729,6 @@ bool JSObject::HasElement(uint32_t index) { } -Smi* JSObject::InterceptorPropertyLookupHint(String* name) { - // TODO(antonm): Do we want to do any shortcuts for global object? - if (HasFastProperties()) { - LookupResult lookup; - LocalLookupRealNamedProperty(name, &lookup); - if (lookup.IsValid()) { - if (lookup.type() == FIELD && lookup.IsCacheable()) { - return Smi::FromInt(lookup.GetFieldIndex()); - } - } else { - return Smi::FromInt(kLookupInPrototype); - } - } - - return Smi::FromInt(kLookupInHolder); -} - - bool AccessorInfo::all_can_read() { return BooleanBit::get(flag(), kAllCanReadBit); } diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc index a9004c924b..b3b290e9f3 100644 --- a/deps/v8/src/objects.cc +++ b/deps/v8/src/objects.cc @@ -216,6 +216,12 @@ Object* Object::GetPropertyWithDefinedGetter(Object* receiver, HandleScope scope; Handle<JSFunction> fun(JSFunction::cast(getter)); Handle<Object> self(receiver); +#ifdef ENABLE_DEBUGGER_SUPPORT + // Handle stepping into a getter if step into is active. + if (Debug::StepInActive()) { + Debug::HandleStepIn(fun, Handle<Object>::null(), 0, false); + } +#endif bool has_pending_exception; Handle<Object> result = Execution::Call(fun, self, 0, NULL, &has_pending_exception); @@ -1000,6 +1006,9 @@ void HeapObject::HeapObjectShortPrint(StringStream* accumulator) { case BYTE_ARRAY_TYPE: accumulator->Add("<ByteArray[%u]>", ByteArray::cast(this)->length()); break; + case PIXEL_ARRAY_TYPE: + accumulator->Add("<PixelArray[%u]>", PixelArray::cast(this)->length()); + break; case SHARED_FUNCTION_INFO_TYPE: accumulator->Add("<SharedFunctionInfo>"); break; @@ -1141,6 +1150,7 @@ void HeapObject::IterateBody(InstanceType type, int object_size, case HEAP_NUMBER_TYPE: case FILLER_TYPE: case BYTE_ARRAY_TYPE: + case PIXEL_ARRAY_TYPE: break; case SHARED_FUNCTION_INFO_TYPE: { SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(this); @@ -1234,7 +1244,7 @@ Object* JSObject::AddFastProperty(String* name, // hidden symbols) and is not a real identifier. StringInputBuffer buffer(name); if (!Scanner::IsIdentifier(&buffer) && name != Heap::hidden_symbol()) { - Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES); + Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0); if (obj->IsFailure()) return obj; return AddSlowProperty(name, value, attributes); } @@ -1272,7 +1282,7 @@ Object* JSObject::AddFastProperty(String* name, if (map()->unused_property_fields() == 0) { if (properties()->length() > kMaxFastProperties) { - Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES); + Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0); if (obj->IsFailure()) return obj; return AddSlowProperty(name, value, attributes); } @@ -1393,7 +1403,7 @@ Object* JSObject::AddProperty(String* name, } else { // Normalize the object to prevent very large instance descriptors. // This eliminates unwanted N^2 allocation and lookup behavior. - Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES); + Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0); if (obj->IsFailure()) return obj; } } @@ -1463,7 +1473,7 @@ Object* JSObject::ConvertDescriptorToField(String* name, PropertyAttributes attributes) { if (map()->unused_property_fields() == 0 && properties()->length() > kMaxFastProperties) { - Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES); + Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0); if (obj->IsFailure()) return obj; return ReplaceSlowProperty(name, new_value, attributes); } @@ -1624,6 +1634,12 @@ Object* JSObject::SetPropertyWithDefinedSetter(JSFunction* setter, Handle<Object> value_handle(value); Handle<JSFunction> fun(JSFunction::cast(setter)); Handle<JSObject> self(this); +#ifdef ENABLE_DEBUGGER_SUPPORT + // Handle stepping into a setter if step into is active. + if (Debug::StepInActive()) { + Debug::HandleStepIn(fun, Handle<Object>::null(), 0, false); + } +#endif bool has_pending_exception; Object** argv[] = { value_handle.location() }; Execution::Call(fun, self, 1, argv, &has_pending_exception); @@ -1657,7 +1673,9 @@ Object* JSObject::LookupCallbackSetterInPrototypes(uint32_t index) { for (Object* pt = GetPrototype(); pt != Heap::null_value(); pt = pt->GetPrototype()) { - if (JSObject::cast(pt)->HasFastElements()) continue; + if (!JSObject::cast(pt)->HasDictionaryElements()) { + continue; + } NumberDictionary* dictionary = JSObject::cast(pt)->element_dictionary(); int entry = dictionary->FindEntry(index); if (entry != NumberDictionary::kNotFound) { @@ -2106,12 +2124,22 @@ PropertyAttributes JSObject::GetLocalPropertyAttribute(String* name) { } -Object* JSObject::NormalizeProperties(PropertyNormalizationMode mode) { +Object* JSObject::NormalizeProperties(PropertyNormalizationMode mode, + int expected_additional_properties) { if (!HasFastProperties()) return this; - // Allocate new content + // The global object is always normalized. + ASSERT(!IsGlobalObject()); + + // Allocate new content. + int property_count = map()->NumberOfDescribedProperties(); + if (expected_additional_properties > 0) { + property_count += expected_additional_properties; + } else { + property_count += 2; // Make space for two more properties. + } Object* obj = - StringDictionary::Allocate(map()->NumberOfDescribedProperties() * 2 + 4); + StringDictionary::Allocate(property_count * 2); if (obj->IsFailure()) return obj; StringDictionary* dictionary = StringDictionary::cast(obj); @@ -2123,10 +2151,6 @@ Object* JSObject::NormalizeProperties(PropertyNormalizationMode mode) { PropertyDetails d = PropertyDetails(details.attributes(), NORMAL, details.index()); Object* value = descs->GetConstantFunction(i); - if (IsGlobalObject()) { - value = Heap::AllocateJSGlobalPropertyCell(value); - if (value->IsFailure()) return value; - } Object* result = dictionary->Add(descs->GetKey(i), value, d); if (result->IsFailure()) return result; dictionary = StringDictionary::cast(result); @@ -2136,10 +2160,6 @@ Object* JSObject::NormalizeProperties(PropertyNormalizationMode mode) { PropertyDetails d = PropertyDetails(details.attributes(), NORMAL, details.index()); Object* value = FastPropertyAt(descs->GetFieldIndex(i)); - if (IsGlobalObject()) { - value = Heap::AllocateJSGlobalPropertyCell(value); - if (value->IsFailure()) return value; - } Object* result = dictionary->Add(descs->GetKey(i), value, d); if (result->IsFailure()) return result; dictionary = StringDictionary::cast(result); @@ -2149,10 +2169,6 @@ Object* JSObject::NormalizeProperties(PropertyNormalizationMode mode) { PropertyDetails d = PropertyDetails(details.attributes(), CALLBACKS, details.index()); Object* value = descs->GetCallbacksObject(i); - if (IsGlobalObject()) { - value = Heap::AllocateJSGlobalPropertyCell(value); - if (value->IsFailure()) return value; - } Object* result = dictionary->Add(descs->GetKey(i), value, d); if (result->IsFailure()) return result; dictionary = StringDictionary::cast(result); @@ -2164,9 +2180,7 @@ Object* JSObject::NormalizeProperties(PropertyNormalizationMode mode) { case INTERCEPTOR: break; default: - case NORMAL: UNREACHABLE(); - break; } } @@ -2219,7 +2233,8 @@ Object* JSObject::TransformToFastProperties(int unused_property_fields) { Object* JSObject::NormalizeElements() { - if (!HasFastElements()) return this; + ASSERT(!HasPixelElements()); + if (HasDictionaryElements()) return this; // Get number of entries. FixedArray* array = FixedArray::cast(elements()); @@ -2264,7 +2279,7 @@ Object* JSObject::DeletePropertyPostInterceptor(String* name, DeleteMode mode) { if (!result.IsValid()) return Heap::true_value(); // Normalize object if needed. - Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES); + Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0); if (obj->IsFailure()) return obj; return DeleteNormalizedProperty(name, mode); @@ -2305,20 +2320,28 @@ Object* JSObject::DeletePropertyWithInterceptor(String* name) { Object* JSObject::DeleteElementPostInterceptor(uint32_t index, DeleteMode mode) { - if (HasFastElements()) { - uint32_t length = IsJSArray() ? + ASSERT(!HasPixelElements()); + switch (GetElementsKind()) { + case FAST_ELEMENTS: { + uint32_t length = IsJSArray() ? static_cast<uint32_t>(Smi::cast(JSArray::cast(this)->length())->value()) : static_cast<uint32_t>(FixedArray::cast(elements())->length()); - if (index < length) { - FixedArray::cast(elements())->set_the_hole(index); + if (index < length) { + FixedArray::cast(elements())->set_the_hole(index); + } + break; } - return Heap::true_value(); - } - ASSERT(!HasFastElements()); - NumberDictionary* dictionary = element_dictionary(); - int entry = dictionary->FindEntry(index); - if (entry != NumberDictionary::kNotFound) { - return dictionary->DeleteProperty(entry, mode); + case DICTIONARY_ELEMENTS: { + NumberDictionary* dictionary = element_dictionary(); + int entry = dictionary->FindEntry(index); + if (entry != NumberDictionary::kNotFound) { + return dictionary->DeleteProperty(entry, mode); + } + break; + } + default: + UNREACHABLE(); + break; } return Heap::true_value(); } @@ -2380,20 +2403,31 @@ Object* JSObject::DeleteElement(uint32_t index, DeleteMode mode) { return DeleteElementWithInterceptor(index); } - if (HasFastElements()) { - uint32_t length = IsJSArray() ? + switch (GetElementsKind()) { + case FAST_ELEMENTS: { + uint32_t length = IsJSArray() ? static_cast<uint32_t>(Smi::cast(JSArray::cast(this)->length())->value()) : static_cast<uint32_t>(FixedArray::cast(elements())->length()); - if (index < length) { - FixedArray::cast(elements())->set_the_hole(index); + if (index < length) { + FixedArray::cast(elements())->set_the_hole(index); + } + break; } - return Heap::true_value(); - } else { - NumberDictionary* dictionary = element_dictionary(); - int entry = dictionary->FindEntry(index); - if (entry != NumberDictionary::kNotFound) { - return dictionary->DeleteProperty(entry, mode); + case PIXEL_ELEMENTS: { + // Pixel elements cannot be deleted. Just silently ignore here. + break; } + case DICTIONARY_ELEMENTS: { + NumberDictionary* dictionary = element_dictionary(); + int entry = dictionary->FindEntry(index); + if (entry != NumberDictionary::kNotFound) { + return dictionary->DeleteProperty(entry, mode); + } + break; + } + default: + UNREACHABLE(); + break; } return Heap::true_value(); } @@ -2442,7 +2476,7 @@ Object* JSObject::DeleteProperty(String* name, DeleteMode mode) { mode); } // Normalize object if needed. - Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES); + Object* obj = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0); if (obj->IsFailure()) return obj; // Make sure the properties are normalized before removing the entry. return DeleteNormalizedProperty(name, mode); @@ -2471,21 +2505,32 @@ bool JSObject::ReferencesObject(Object* obj) { } // Check if the object is among the indexed properties. - if (HasFastElements()) { - int length = IsJSArray() - ? Smi::cast(JSArray::cast(this)->length())->value() - : FixedArray::cast(elements())->length(); - for (int i = 0; i < length; i++) { - Object* element = FixedArray::cast(elements())->get(i); - if (!element->IsTheHole() && element == obj) { - return true; + switch (GetElementsKind()) { + case PIXEL_ELEMENTS: + // Raw pixels do not reference other objects. + break; + case FAST_ELEMENTS: { + int length = IsJSArray() ? + Smi::cast(JSArray::cast(this)->length())->value() : + FixedArray::cast(elements())->length(); + for (int i = 0; i < length; i++) { + Object* element = FixedArray::cast(elements())->get(i); + if (!element->IsTheHole() && element == obj) { + return true; + } } + break; } - } else { - key = element_dictionary()->SlowReverseLookup(obj); - if (key != Heap::undefined_value()) { - return true; + case DICTIONARY_ELEMENTS: { + key = element_dictionary()->SlowReverseLookup(obj); + if (key != Heap::undefined_value()) { + return true; + } + break; } + default: + UNREACHABLE(); + break; } // For functions check the context. Boilerplate functions do @@ -2703,20 +2748,31 @@ Object* JSObject::DefineGetterSetter(String* name, if (is_element && IsJSArray()) return Heap::undefined_value(); if (is_element) { - // Lookup the index. - if (!HasFastElements()) { - NumberDictionary* dictionary = element_dictionary(); - int entry = dictionary->FindEntry(index); - if (entry != NumberDictionary::kNotFound) { - Object* result = dictionary->ValueAt(entry); - PropertyDetails details = dictionary->DetailsAt(entry); - if (details.IsReadOnly()) return Heap::undefined_value(); - if (details.type() == CALLBACKS) { - // Only accessors allowed as elements. - ASSERT(result->IsFixedArray()); - return result; + switch (GetElementsKind()) { + case FAST_ELEMENTS: + break; + case PIXEL_ELEMENTS: + // Ignore getters and setters on pixel elements. + return Heap::undefined_value(); + case DICTIONARY_ELEMENTS: { + // Lookup the index. + NumberDictionary* dictionary = element_dictionary(); + int entry = dictionary->FindEntry(index); + if (entry != NumberDictionary::kNotFound) { + Object* result = dictionary->ValueAt(entry); + PropertyDetails details = dictionary->DetailsAt(entry); + if (details.IsReadOnly()) return Heap::undefined_value(); + if (details.type() == CALLBACKS) { + // Only accessors allowed as elements. + ASSERT(result->IsFixedArray()); + return result; + } } + break; } + default: + UNREACHABLE(); + break; } } else { // Lookup the name. @@ -2753,7 +2809,7 @@ Object* JSObject::DefineGetterSetter(String* name, set_elements(NumberDictionary::cast(dict)); } else { // Normalize object to make this operation simple. - Object* ok = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES); + Object* ok = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0); if (ok->IsFailure()) return ok; // For the global object allocate a new map to invalidate the global inline @@ -2815,9 +2871,9 @@ Object* JSObject::LookupAccessor(String* name, bool is_getter) { for (Object* obj = this; obj != Heap::null_value(); obj = JSObject::cast(obj)->GetPrototype()) { - JSObject* jsObject = JSObject::cast(obj); - if (!jsObject->HasFastElements()) { - NumberDictionary* dictionary = jsObject->element_dictionary(); + JSObject* js_object = JSObject::cast(obj); + if (js_object->HasDictionaryElements()) { + NumberDictionary* dictionary = js_object->element_dictionary(); int entry = dictionary->FindEntry(index); if (entry != NumberDictionary::kNotFound) { Object* element = dictionary->ValueAt(entry); @@ -3017,28 +3073,35 @@ static bool HasKey(FixedArray* array, Object* key) { Object* FixedArray::AddKeysFromJSArray(JSArray* array) { - if (array->HasFastElements()) { - return UnionOfKeys(array->elements()); - } - ASSERT(!array->HasFastElements()); - NumberDictionary* dict = array->element_dictionary(); - int size = dict->NumberOfElements(); - - // Allocate a temporary fixed array. - Object* object = Heap::AllocateFixedArray(size); - if (object->IsFailure()) return object; - FixedArray* key_array = FixedArray::cast(object); - - int capacity = dict->Capacity(); - int pos = 0; - // Copy the elements from the JSArray to the temporary fixed array. - for (int i = 0; i < capacity; i++) { - if (dict->IsKey(dict->KeyAt(i))) { - key_array->set(pos++, dict->ValueAt(i)); + ASSERT(!array->HasPixelElements()); + switch (array->GetElementsKind()) { + case JSObject::FAST_ELEMENTS: + return UnionOfKeys(FixedArray::cast(array->elements())); + case JSObject::DICTIONARY_ELEMENTS: { + NumberDictionary* dict = array->element_dictionary(); + int size = dict->NumberOfElements(); + + // Allocate a temporary fixed array. + Object* object = Heap::AllocateFixedArray(size); + if (object->IsFailure()) return object; + FixedArray* key_array = FixedArray::cast(object); + + int capacity = dict->Capacity(); + int pos = 0; + // Copy the elements from the JSArray to the temporary fixed array. + for (int i = 0; i < capacity; i++) { + if (dict->IsKey(dict->KeyAt(i))) { + key_array->set(pos++, dict->ValueAt(i)); + } + } + // Compute the union of this and the temporary fixed array. + return UnionOfKeys(key_array); } + default: + UNREACHABLE(); } - // Compute the union of this and the temporary fixed array. - return UnionOfKeys(key_array); + UNREACHABLE(); + return Heap::null_value(); // Failure case needs to "return" a value. } @@ -5077,54 +5140,74 @@ void Code::Disassemble(const char* name) { void JSObject::SetFastElements(FixedArray* elems) { + // We should never end in here with a pixel array. + ASSERT(!HasPixelElements()); #ifdef DEBUG // Check the provided array is filled with the_hole. uint32_t len = static_cast<uint32_t>(elems->length()); for (uint32_t i = 0; i < len; i++) ASSERT(elems->get(i)->IsTheHole()); #endif WriteBarrierMode mode = elems->GetWriteBarrierMode(); - if (HasFastElements()) { - FixedArray* old_elements = FixedArray::cast(elements()); - uint32_t old_length = static_cast<uint32_t>(old_elements->length()); - // Fill out the new array with this content and array holes. - for (uint32_t i = 0; i < old_length; i++) { - elems->set(i, old_elements->get(i), mode); + switch (GetElementsKind()) { + case FAST_ELEMENTS: { + FixedArray* old_elements = FixedArray::cast(elements()); + uint32_t old_length = static_cast<uint32_t>(old_elements->length()); + // Fill out the new array with this content and array holes. + for (uint32_t i = 0; i < old_length; i++) { + elems->set(i, old_elements->get(i), mode); + } + break; } - } else { - NumberDictionary* dictionary = NumberDictionary::cast(elements()); - for (int i = 0; i < dictionary->Capacity(); i++) { - Object* key = dictionary->KeyAt(i); - if (key->IsNumber()) { - uint32_t entry = static_cast<uint32_t>(key->Number()); - elems->set(entry, dictionary->ValueAt(i), mode); + case DICTIONARY_ELEMENTS: { + NumberDictionary* dictionary = NumberDictionary::cast(elements()); + for (int i = 0; i < dictionary->Capacity(); i++) { + Object* key = dictionary->KeyAt(i); + if (key->IsNumber()) { + uint32_t entry = static_cast<uint32_t>(key->Number()); + elems->set(entry, dictionary->ValueAt(i), mode); + } } + break; } + default: + UNREACHABLE(); + break; } set_elements(elems); } Object* JSObject::SetSlowElements(Object* len) { + // We should never end in here with a pixel array. + ASSERT(!HasPixelElements()); + uint32_t new_length = static_cast<uint32_t>(len->Number()); - if (!HasFastElements()) { - if (IsJSArray()) { - uint32_t old_length = - static_cast<uint32_t>(JSArray::cast(this)->length()->Number()); - element_dictionary()->RemoveNumberEntries(new_length, old_length), - JSArray::cast(this)->set_length(len); + switch (GetElementsKind()) { + case FAST_ELEMENTS: { + // Make sure we never try to shrink dense arrays into sparse arrays. + ASSERT(static_cast<uint32_t>(FixedArray::cast(elements())->length()) <= + new_length); + Object* obj = NormalizeElements(); + if (obj->IsFailure()) return obj; + + // Update length for JSArrays. + if (IsJSArray()) JSArray::cast(this)->set_length(len); + break; } - return this; + case DICTIONARY_ELEMENTS: { + if (IsJSArray()) { + uint32_t old_length = + static_cast<uint32_t>(JSArray::cast(this)->length()->Number()); + element_dictionary()->RemoveNumberEntries(new_length, old_length), + JSArray::cast(this)->set_length(len); + } + break; + } + default: + UNREACHABLE(); + break; } - - // Make sure we never try to shrink dense arrays into sparse arrays. - ASSERT(static_cast<uint32_t>(FixedArray::cast(elements())->length()) <= - new_length); - Object* obj = NormalizeElements(); - if (obj->IsFailure()) return obj; - - // Update length for JSArrays. - if (IsJSArray()) JSArray::cast(this)->set_length(len); return this; } @@ -5147,7 +5230,7 @@ Object* JSArray::Initialize(int capacity) { void JSArray::Expand(int required_size) { Handle<JSArray> self(this); - Handle<FixedArray> old_backing(elements()); + Handle<FixedArray> old_backing(FixedArray::cast(elements())); int old_size = old_backing->length(); // Doubling in size would be overkill, but leave some slack to avoid // constantly growing. @@ -5174,52 +5257,62 @@ static Object* ArrayLengthRangeError() { Object* JSObject::SetElementsLength(Object* len) { + // We should never end in here with a pixel array. + ASSERT(!HasPixelElements()); + Object* smi_length = len->ToSmi(); if (smi_length->IsSmi()) { int value = Smi::cast(smi_length)->value(); if (value < 0) return ArrayLengthRangeError(); - if (HasFastElements()) { - int old_capacity = FixedArray::cast(elements())->length(); - if (value <= old_capacity) { + switch (GetElementsKind()) { + case FAST_ELEMENTS: { + int old_capacity = FixedArray::cast(elements())->length(); + if (value <= old_capacity) { + if (IsJSArray()) { + int old_length = FastD2I(JSArray::cast(this)->length()->Number()); + // NOTE: We may be able to optimize this by removing the + // last part of the elements backing storage array and + // setting the capacity to the new size. + for (int i = value; i < old_length; i++) { + FixedArray::cast(elements())->set_the_hole(i); + } + JSArray::cast(this)->set_length(smi_length, SKIP_WRITE_BARRIER); + } + return this; + } + int min = NewElementsCapacity(old_capacity); + int new_capacity = value > min ? value : min; + if (new_capacity <= kMaxFastElementsLength || + !ShouldConvertToSlowElements(new_capacity)) { + Object* obj = Heap::AllocateFixedArrayWithHoles(new_capacity); + if (obj->IsFailure()) return obj; + if (IsJSArray()) JSArray::cast(this)->set_length(smi_length, + SKIP_WRITE_BARRIER); + SetFastElements(FixedArray::cast(obj)); + return this; + } + break; + } + case DICTIONARY_ELEMENTS: { if (IsJSArray()) { - int old_length = FastD2I(JSArray::cast(this)->length()->Number()); - // NOTE: We may be able to optimize this by removing the - // last part of the elements backing storage array and - // setting the capacity to the new size. - for (int i = value; i < old_length; i++) { - FixedArray::cast(elements())->set_the_hole(i); + if (value == 0) { + // If the length of a slow array is reset to zero, we clear + // the array and flush backing storage. This has the added + // benefit that the array returns to fast mode. + initialize_elements(); + } else { + // Remove deleted elements. + uint32_t old_length = + static_cast<uint32_t>(JSArray::cast(this)->length()->Number()); + element_dictionary()->RemoveNumberEntries(value, old_length); } JSArray::cast(this)->set_length(smi_length, SKIP_WRITE_BARRIER); } return this; } - int min = NewElementsCapacity(old_capacity); - int new_capacity = value > min ? value : min; - if (new_capacity <= kMaxFastElementsLength || - !ShouldConvertToSlowElements(new_capacity)) { - Object* obj = Heap::AllocateFixedArrayWithHoles(new_capacity); - if (obj->IsFailure()) return obj; - if (IsJSArray()) JSArray::cast(this)->set_length(smi_length, - SKIP_WRITE_BARRIER); - SetFastElements(FixedArray::cast(obj)); - return this; - } - } else { - if (IsJSArray()) { - if (value == 0) { - // If the length of a slow array is reset to zero, we clear - // the array and flush backing storage. This has the added - // benefit that the array returns to fast mode. - initialize_elements(); - } else { - // Remove deleted elements. - uint32_t old_length = - static_cast<uint32_t>(JSArray::cast(this)->length()->Number()); - element_dictionary()->RemoveNumberEntries(value, old_length); - } - JSArray::cast(this)->set_length(smi_length, SKIP_WRITE_BARRIER); - } - return this; + default: + UNREACHABLE(); + break; } } @@ -5246,20 +5339,36 @@ Object* JSObject::SetElementsLength(Object* len) { bool JSObject::HasElementPostInterceptor(JSObject* receiver, uint32_t index) { - if (HasFastElements()) { - uint32_t length = IsJSArray() ? - static_cast<uint32_t>( - Smi::cast(JSArray::cast(this)->length())->value()) : - static_cast<uint32_t>(FixedArray::cast(elements())->length()); - if ((index < length) && - !FixedArray::cast(elements())->get(index)->IsTheHole()) { - return true; + switch (GetElementsKind()) { + case FAST_ELEMENTS: { + uint32_t length = IsJSArray() ? + static_cast<uint32_t> + (Smi::cast(JSArray::cast(this)->length())->value()) : + static_cast<uint32_t>(FixedArray::cast(elements())->length()); + if ((index < length) && + !FixedArray::cast(elements())->get(index)->IsTheHole()) { + return true; + } + break; } - } else { - if (element_dictionary()->FindEntry(index) - != NumberDictionary::kNotFound) { - return true; + case PIXEL_ELEMENTS: { + // TODO(iposva): Add testcase. + PixelArray* pixels = PixelArray::cast(elements()); + if (index < static_cast<uint32_t>(pixels->length())) { + return true; + } + break; + } + case DICTIONARY_ELEMENTS: { + if (element_dictionary()->FindEntry(index) + != NumberDictionary::kNotFound) { + return true; + } + break; } + default: + UNREACHABLE(); + break; } // Handle [] on String objects. @@ -5326,17 +5435,29 @@ bool JSObject::HasLocalElement(uint32_t index) { // Handle [] on String objects. if (this->IsStringObjectWithCharacterAt(index)) return true; - if (HasFastElements()) { - uint32_t length = IsJSArray() ? - static_cast<uint32_t>( - Smi::cast(JSArray::cast(this)->length())->value()) : - static_cast<uint32_t>(FixedArray::cast(elements())->length()); - return (index < length) && - !FixedArray::cast(elements())->get(index)->IsTheHole(); - } else { - return element_dictionary()->FindEntry(index) - != NumberDictionary::kNotFound; + switch (GetElementsKind()) { + case FAST_ELEMENTS: { + uint32_t length = IsJSArray() ? + static_cast<uint32_t> + (Smi::cast(JSArray::cast(this)->length())->value()) : + static_cast<uint32_t>(FixedArray::cast(elements())->length()); + return (index < length) && + !FixedArray::cast(elements())->get(index)->IsTheHole(); + } + case PIXEL_ELEMENTS: { + PixelArray* pixels = PixelArray::cast(elements()); + return (index < static_cast<uint32_t>(pixels->length())); + } + case DICTIONARY_ELEMENTS: { + return element_dictionary()->FindEntry(index) + != NumberDictionary::kNotFound; + } + default: + UNREACHABLE(); + break; } + UNREACHABLE(); + return Heap::null_value(); } @@ -5353,18 +5474,33 @@ bool JSObject::HasElementWithReceiver(JSObject* receiver, uint32_t index) { return HasElementWithInterceptor(receiver, index); } - if (HasFastElements()) { - uint32_t length = IsJSArray() ? - static_cast<uint32_t>( - Smi::cast(JSArray::cast(this)->length())->value()) : - static_cast<uint32_t>(FixedArray::cast(elements())->length()); - if ((index < length) && - !FixedArray::cast(elements())->get(index)->IsTheHole()) return true; - } else { - if (element_dictionary()->FindEntry(index) - != NumberDictionary::kNotFound) { - return true; + switch (GetElementsKind()) { + case FAST_ELEMENTS: { + uint32_t length = IsJSArray() ? + static_cast<uint32_t> + (Smi::cast(JSArray::cast(this)->length())->value()) : + static_cast<uint32_t>(FixedArray::cast(elements())->length()); + if ((index < length) && + !FixedArray::cast(elements())->get(index)->IsTheHole()) return true; + break; + } + case PIXEL_ELEMENTS: { + PixelArray* pixels = PixelArray::cast(elements()); + if (index < static_cast<uint32_t>(pixels->length())) { + return true; + } + break; + } + case DICTIONARY_ELEMENTS: { + if (element_dictionary()->FindEntry(index) + != NumberDictionary::kNotFound) { + return true; + } + break; } + default: + UNREACHABLE(); + break; } // Handle [] on String objects. @@ -5460,7 +5596,7 @@ Object* JSObject::SetFastElement(uint32_t index, Object* value) { // Otherwise default to slow case. Object* obj = NormalizeElements(); if (obj->IsFailure()) return obj; - ASSERT(!HasFastElements()); + ASSERT(HasDictionaryElements()); return SetElement(index, value); } @@ -5489,80 +5625,95 @@ Object* JSObject::SetElement(uint32_t index, Object* value) { Object* JSObject::SetElementWithoutInterceptor(uint32_t index, Object* value) { - // Fast case. - if (HasFastElements()) return SetFastElement(index, value); - - // Dictionary case. - ASSERT(!HasFastElements()); + switch (GetElementsKind()) { + case FAST_ELEMENTS: + // Fast case. + return SetFastElement(index, value); + case PIXEL_ELEMENTS: { + PixelArray* pixels = PixelArray::cast(elements()); + return pixels->SetValue(index, value); + } + case DICTIONARY_ELEMENTS: { + // Insert element in the dictionary. + FixedArray* elms = FixedArray::cast(elements()); + NumberDictionary* dictionary = NumberDictionary::cast(elms); - // Insert element in the dictionary. - FixedArray* elms = FixedArray::cast(elements()); - NumberDictionary* dictionary = NumberDictionary::cast(elms); - - int entry = dictionary->FindEntry(index); - if (entry != NumberDictionary::kNotFound) { - Object* element = dictionary->ValueAt(entry); - PropertyDetails details = dictionary->DetailsAt(entry); - if (details.type() == CALLBACKS) { - // Only accessors allowed as elements. - FixedArray* structure = FixedArray::cast(element); - if (structure->get(kSetterIndex)->IsJSFunction()) { - JSFunction* setter = JSFunction::cast(structure->get(kSetterIndex)); - return SetPropertyWithDefinedSetter(setter, value); + int entry = dictionary->FindEntry(index); + if (entry != NumberDictionary::kNotFound) { + Object* element = dictionary->ValueAt(entry); + PropertyDetails details = dictionary->DetailsAt(entry); + if (details.type() == CALLBACKS) { + // Only accessors allowed as elements. + FixedArray* structure = FixedArray::cast(element); + if (structure->get(kSetterIndex)->IsJSFunction()) { + JSFunction* setter = JSFunction::cast(structure->get(kSetterIndex)); + return SetPropertyWithDefinedSetter(setter, value); + } else { + Handle<Object> self(this); + Handle<Object> key(Factory::NewNumberFromUint(index)); + Handle<Object> args[2] = { key, self }; + return Top::Throw(*Factory::NewTypeError("no_setter_in_callback", + HandleVector(args, 2))); + } + } else { + dictionary->UpdateMaxNumberKey(index); + dictionary->ValueAtPut(entry, value); + } } else { - Handle<Object> self(this); - Handle<Object> key(Factory::NewNumberFromUint(index)); - Handle<Object> args[2] = { key, self }; - return Top::Throw(*Factory::NewTypeError("no_setter_in_callback", - HandleVector(args, 2))); - } - } else { - dictionary->UpdateMaxNumberKey(index); - dictionary->ValueAtPut(entry, value); - } - } else { - // Index not already used. Look for an accessor in the prototype chain. - if (!IsJSArray()) { - Object* setter = LookupCallbackSetterInPrototypes(index); - if (setter->IsJSFunction()) { - return SetPropertyWithDefinedSetter(JSFunction::cast(setter), value); + // Index not already used. Look for an accessor in the prototype chain. + if (!IsJSArray()) { + Object* setter = LookupCallbackSetterInPrototypes(index); + if (setter->IsJSFunction()) { + return SetPropertyWithDefinedSetter(JSFunction::cast(setter), + value); + } + } + Object* result = dictionary->AtNumberPut(index, value); + if (result->IsFailure()) return result; + if (elms != FixedArray::cast(result)) { + set_elements(FixedArray::cast(result)); + } } - } - Object* result = dictionary->AtNumberPut(index, value); - if (result->IsFailure()) return result; - if (elms != FixedArray::cast(result)) { - set_elements(FixedArray::cast(result)); - } - } - // Update the array length if this JSObject is an array. - if (IsJSArray()) { - JSArray* array = JSArray::cast(this); - Object* return_value = array->JSArrayUpdateLengthFromIndex(index, value); - if (return_value->IsFailure()) return return_value; - } + // Update the array length if this JSObject is an array. + if (IsJSArray()) { + JSArray* array = JSArray::cast(this); + Object* return_value = array->JSArrayUpdateLengthFromIndex(index, + value); + if (return_value->IsFailure()) return return_value; + } - // Attempt to put this object back in fast case. - if (ShouldConvertToFastElements()) { - uint32_t new_length = 0; - if (IsJSArray()) { - CHECK(Array::IndexFromObject(JSArray::cast(this)->length(), &new_length)); - JSArray::cast(this)->set_length(Smi::FromInt(new_length)); - } else { - new_length = NumberDictionary::cast(elements())->max_number_key() + 1; - } - Object* obj = Heap::AllocateFixedArrayWithHoles(new_length); - if (obj->IsFailure()) return obj; - SetFastElements(FixedArray::cast(obj)); + // Attempt to put this object back in fast case. + if (ShouldConvertToFastElements()) { + uint32_t new_length = 0; + if (IsJSArray()) { + CHECK(Array::IndexFromObject(JSArray::cast(this)->length(), + &new_length)); + JSArray::cast(this)->set_length(Smi::FromInt(new_length)); + } else { + new_length = NumberDictionary::cast(elements())->max_number_key() + 1; + } + Object* obj = Heap::AllocateFixedArrayWithHoles(new_length); + if (obj->IsFailure()) return obj; + SetFastElements(FixedArray::cast(obj)); #ifdef DEBUG - if (FLAG_trace_normalization) { - PrintF("Object elements are fast case again:\n"); - Print(); - } + if (FLAG_trace_normalization) { + PrintF("Object elements are fast case again:\n"); + Print(); + } #endif - } + } - return value; + return value; + } + default: + UNREACHABLE(); + break; + } + // All possible cases have been handled above. Add a return to avoid the + // complaints from the compiler. + UNREACHABLE(); + return Heap::null_value(); } @@ -5585,32 +5736,45 @@ Object* JSObject::GetElementPostInterceptor(JSObject* receiver, uint32_t index) { // Get element works for both JSObject and JSArray since // JSArray::length cannot change. - if (HasFastElements()) { - FixedArray* elms = FixedArray::cast(elements()); - if (index < static_cast<uint32_t>(elms->length())) { - Object* value = elms->get(index); - if (!value->IsTheHole()) return value; + switch (GetElementsKind()) { + case FAST_ELEMENTS: { + FixedArray* elms = FixedArray::cast(elements()); + if (index < static_cast<uint32_t>(elms->length())) { + Object* value = elms->get(index); + if (!value->IsTheHole()) return value; + } + break; } - } else { - NumberDictionary* dictionary = element_dictionary(); - int entry = dictionary->FindEntry(index); - if (entry != NumberDictionary::kNotFound) { - Object* element = dictionary->ValueAt(entry); - PropertyDetails details = dictionary->DetailsAt(entry); - if (details.type() == CALLBACKS) { - // Only accessors allowed as elements. - FixedArray* structure = FixedArray::cast(element); - Object* getter = structure->get(kGetterIndex); - if (getter->IsJSFunction()) { - return GetPropertyWithDefinedGetter(receiver, - JSFunction::cast(getter)); - } else { - // Getter is not a function. - return Heap::undefined_value(); + case PIXEL_ELEMENTS: { + // TODO(iposva): Add testcase and implement. + UNIMPLEMENTED(); + break; + } + case DICTIONARY_ELEMENTS: { + NumberDictionary* dictionary = element_dictionary(); + int entry = dictionary->FindEntry(index); + if (entry != NumberDictionary::kNotFound) { + Object* element = dictionary->ValueAt(entry); + PropertyDetails details = dictionary->DetailsAt(entry); + if (details.type() == CALLBACKS) { + // Only accessors allowed as elements. + FixedArray* structure = FixedArray::cast(element); + Object* getter = structure->get(kGetterIndex); + if (getter->IsJSFunction()) { + return GetPropertyWithDefinedGetter(receiver, + JSFunction::cast(getter)); + } else { + // Getter is not a function. + return Heap::undefined_value(); + } } + return element; } - return element; + break; } + default: + UNREACHABLE(); + break; } // Continue searching via the prototype chain. @@ -5669,31 +5833,44 @@ Object* JSObject::GetElementWithReceiver(JSObject* receiver, uint32_t index) { // Get element works for both JSObject and JSArray since // JSArray::length cannot change. - if (HasFastElements()) { - FixedArray* elms = FixedArray::cast(elements()); - if (index < static_cast<uint32_t>(elms->length())) { - Object* value = elms->get(index); - if (!value->IsTheHole()) return value; + switch (GetElementsKind()) { + case FAST_ELEMENTS: { + FixedArray* elms = FixedArray::cast(elements()); + if (index < static_cast<uint32_t>(elms->length())) { + Object* value = elms->get(index); + if (!value->IsTheHole()) return value; + } + break; } - } else { - NumberDictionary* dictionary = element_dictionary(); - int entry = dictionary->FindEntry(index); - if (entry != NumberDictionary::kNotFound) { - Object* element = dictionary->ValueAt(entry); - PropertyDetails details = dictionary->DetailsAt(entry); - if (details.type() == CALLBACKS) { - // Only accessors allowed as elements. - FixedArray* structure = FixedArray::cast(element); - Object* getter = structure->get(kGetterIndex); - if (getter->IsJSFunction()) { - return GetPropertyWithDefinedGetter(receiver, - JSFunction::cast(getter)); - } else { - // Getter is not a function. - return Heap::undefined_value(); + case PIXEL_ELEMENTS: { + PixelArray* pixels = PixelArray::cast(elements()); + if (index < static_cast<uint32_t>(pixels->length())) { + uint8_t value = pixels->get(index); + return Smi::FromInt(value); + } + break; + } + case DICTIONARY_ELEMENTS: { + NumberDictionary* dictionary = element_dictionary(); + int entry = dictionary->FindEntry(index); + if (entry != NumberDictionary::kNotFound) { + Object* element = dictionary->ValueAt(entry); + PropertyDetails details = dictionary->DetailsAt(entry); + if (details.type() == CALLBACKS) { + // Only accessors allowed as elements. + FixedArray* structure = FixedArray::cast(element); + Object* getter = structure->get(kGetterIndex); + if (getter->IsJSFunction()) { + return GetPropertyWithDefinedGetter(receiver, + JSFunction::cast(getter)); + } else { + // Getter is not a function. + return Heap::undefined_value(); + } } + return element; } - return element; + break; } } @@ -5707,16 +5884,27 @@ bool JSObject::HasDenseElements() { int capacity = 0; int number_of_elements = 0; - if (HasFastElements()) { - FixedArray* elms = FixedArray::cast(elements()); - capacity = elms->length(); - for (int i = 0; i < capacity; i++) { - if (!elms->get(i)->IsTheHole()) number_of_elements++; + switch (GetElementsKind()) { + case FAST_ELEMENTS: { + FixedArray* elms = FixedArray::cast(elements()); + capacity = elms->length(); + for (int i = 0; i < capacity; i++) { + if (!elms->get(i)->IsTheHole()) number_of_elements++; + } + break; } - } else { - NumberDictionary* dictionary = NumberDictionary::cast(elements()); - capacity = dictionary->Capacity(); - number_of_elements = dictionary->NumberOfElements(); + case PIXEL_ELEMENTS: { + return true; + } + case DICTIONARY_ELEMENTS: { + NumberDictionary* dictionary = NumberDictionary::cast(elements()); + capacity = dictionary->Capacity(); + number_of_elements = dictionary->NumberOfElements(); + break; + } + default: + UNREACHABLE(); + break; } if (capacity == 0) return true; @@ -5735,7 +5923,7 @@ bool JSObject::ShouldConvertToSlowElements(int new_capacity) { bool JSObject::ShouldConvertToFastElements() { - ASSERT(!HasFastElements()); + ASSERT(HasDictionaryElements()); NumberDictionary* dictionary = NumberDictionary::cast(elements()); // If the elements are sparse, we should not go back to fast case. if (!HasDenseElements()) return false; @@ -5836,12 +6024,12 @@ Object* JSObject::GetPropertyPostInterceptor(JSObject* receiver, } -Object* JSObject::GetPropertyWithInterceptorProper( +Object* JSObject::GetPropertyWithInterceptor( JSObject* receiver, String* name, PropertyAttributes* attributes) { + InterceptorInfo* interceptor = GetNamedInterceptor(); HandleScope scope; - Handle<InterceptorInfo> interceptor(GetNamedInterceptor()); Handle<JSObject> receiver_handle(receiver); Handle<JSObject> holder_handle(this); Handle<String> name_handle(name); @@ -5860,85 +6048,14 @@ Object* JSObject::GetPropertyWithInterceptorProper( VMState state(EXTERNAL); result = getter(v8::Utils::ToLocal(name_handle), info); } - if (!Top::has_scheduled_exception() && !result.IsEmpty()) { + RETURN_IF_SCHEDULED_EXCEPTION(); + if (!result.IsEmpty()) { *attributes = NONE; return *v8::Utils::OpenHandle(*result); } } - *attributes = ABSENT; - return Heap::undefined_value(); -} - - -Object* JSObject::GetInterceptorPropertyWithLookupHint( - JSObject* receiver, - Smi* lookup_hint, - String* name, - PropertyAttributes* attributes) { - HandleScope scope; - Handle<JSObject> receiver_handle(receiver); - Handle<JSObject> holder_handle(this); - Handle<String> name_handle(name); - - Object* result = GetPropertyWithInterceptorProper(receiver, - name, - attributes); - if (*attributes != ABSENT) { - return result; - } - RETURN_IF_SCHEDULED_EXCEPTION(); - - int property_index = lookup_hint->value(); - if (property_index >= 0) { - result = holder_handle->FastPropertyAt(property_index); - } else { - switch (property_index) { - case kLookupInPrototype: { - Object* pt = holder_handle->GetPrototype(); - *attributes = ABSENT; - if (pt == Heap::null_value()) return Heap::undefined_value(); - result = pt->GetPropertyWithReceiver( - *receiver_handle, - *name_handle, - attributes); - RETURN_IF_SCHEDULED_EXCEPTION(); - } - break; - - case kLookupInHolder: - result = holder_handle->GetPropertyPostInterceptor( - *receiver_handle, - *name_handle, - attributes); - RETURN_IF_SCHEDULED_EXCEPTION(); - break; - - default: - UNREACHABLE(); - } - } - - return result; -} - - -Object* JSObject::GetPropertyWithInterceptor( - JSObject* receiver, - String* name, - PropertyAttributes* attributes) { - HandleScope scope; - Handle<JSObject> receiver_handle(receiver); - Handle<JSObject> holder_handle(this); - Handle<String> name_handle(name); - - Object* result = GetPropertyWithInterceptorProper(receiver, name, attributes); - if (*attributes != ABSENT) { - return result; - } - RETURN_IF_SCHEDULED_EXCEPTION(); - - result = holder_handle->GetPropertyPostInterceptor( + Object* result = holder_handle->GetPropertyPostInterceptor( *receiver_handle, *name_handle, attributes); @@ -5989,16 +6106,30 @@ bool JSObject::HasRealElementProperty(uint32_t index) { // Handle [] on String objects. if (this->IsStringObjectWithCharacterAt(index)) return true; - if (HasFastElements()) { - uint32_t length = IsJSArray() ? - static_cast<uint32_t>( - Smi::cast(JSArray::cast(this)->length())->value()) : - static_cast<uint32_t>(FixedArray::cast(elements())->length()); - return (index < length) && - !FixedArray::cast(elements())->get(index)->IsTheHole(); + switch (GetElementsKind()) { + case FAST_ELEMENTS: { + uint32_t length = IsJSArray() ? + static_cast<uint32_t>( + Smi::cast(JSArray::cast(this)->length())->value()) : + static_cast<uint32_t>(FixedArray::cast(elements())->length()); + return (index < length) && + !FixedArray::cast(elements())->get(index)->IsTheHole(); + } + case PIXEL_ELEMENTS: { + PixelArray* pixels = PixelArray::cast(elements()); + return index < static_cast<uint32_t>(pixels->length()); + } + case DICTIONARY_ELEMENTS: { + return element_dictionary()->FindEntry(index) + != NumberDictionary::kNotFound; + } + default: + UNREACHABLE(); + break; } - return element_dictionary()->FindEntry(index) - != NumberDictionary::kNotFound; + // All possibilities have been handled above already. + UNREACHABLE(); + return Heap::null_value(); } @@ -6181,24 +6312,43 @@ int JSObject::NumberOfEnumElements() { int JSObject::GetLocalElementKeys(FixedArray* storage, PropertyAttributes filter) { int counter = 0; - if (HasFastElements()) { - int length = IsJSArray() - ? Smi::cast(JSArray::cast(this)->length())->value() - : FixedArray::cast(elements())->length(); - for (int i = 0; i < length; i++) { - if (!FixedArray::cast(elements())->get(i)->IsTheHole()) { - if (storage) { - storage->set(counter, Smi::FromInt(i), SKIP_WRITE_BARRIER); + switch (GetElementsKind()) { + case FAST_ELEMENTS: { + int length = IsJSArray() ? + Smi::cast(JSArray::cast(this)->length())->value() : + FixedArray::cast(elements())->length(); + for (int i = 0; i < length; i++) { + if (!FixedArray::cast(elements())->get(i)->IsTheHole()) { + if (storage != NULL) { + storage->set(counter, Smi::FromInt(i), SKIP_WRITE_BARRIER); + } + counter++; + } + } + ASSERT(!storage || storage->length() >= counter); + break; + } + case PIXEL_ELEMENTS: { + int length = PixelArray::cast(elements())->length(); + while (counter < length) { + if (storage != NULL) { + storage->set(counter, Smi::FromInt(counter), SKIP_WRITE_BARRIER); } counter++; } + ASSERT(!storage || storage->length() >= counter); + break; } - ASSERT(!storage || storage->length() >= counter); - } else { - if (storage) { - element_dictionary()->CopyKeysTo(storage, filter); + case DICTIONARY_ELEMENTS: { + if (storage != NULL) { + element_dictionary()->CopyKeysTo(storage, filter); + } + counter = element_dictionary()->NumberOfElementsFilterAttributes(filter); + break; } - counter = element_dictionary()->NumberOfElementsFilterAttributes(filter); + default: + UNREACHABLE(); + break; } if (this->IsJSValue()) { @@ -6657,7 +6807,7 @@ int Dictionary<StringDictionaryShape, String*>::NumberOfEnumElements(); // Collates undefined and unexisting elements below limit from position // zero of the elements. The object stays in Dictionary mode. Object* JSObject::PrepareSlowElementsForSort(uint32_t limit) { - ASSERT(!HasFastElements()); + ASSERT(HasDictionaryElements()); // Must stay in dictionary mode, either because of requires_slow_elements, // or because we are not going to sort (and therefore compact) all of the // elements. @@ -6731,7 +6881,9 @@ Object* JSObject::PrepareSlowElementsForSort(uint32_t limit) { // If the object is in dictionary mode, it is converted to fast elements // mode. Object* JSObject::PrepareElementsForSort(uint32_t limit) { - if (!HasFastElements()) { + ASSERT(!HasPixelElements()); + + if (HasDictionaryElements()) { // Convert to fast elements containing only the existing properties. // Ordering is irrelevant, since we are going to sort anyway. NumberDictionary* dict = element_dictionary(); @@ -6756,7 +6908,7 @@ Object* JSObject::PrepareElementsForSort(uint32_t limit) { // Collect holes at the end, undefined before that and the rest at the // start, and return the number of non-hole, non-undefined values. - FixedArray* elements = this->elements(); + FixedArray* elements = FixedArray::cast(this->elements()); uint32_t elements_length = static_cast<uint32_t>(elements->length()); if (limit > elements_length) { limit = elements_length ; @@ -6826,6 +6978,41 @@ Object* JSObject::PrepareElementsForSort(uint32_t limit) { } +Object* PixelArray::SetValue(uint32_t index, Object* value) { + uint8_t clamped_value = 0; + if (index < static_cast<uint32_t>(length())) { + if (value->IsSmi()) { + int int_value = Smi::cast(value)->value(); + if (int_value < 0) { + clamped_value = 0; + } else if (int_value > 255) { + clamped_value = 255; + } else { + clamped_value = static_cast<uint8_t>(int_value); + } + } else if (value->IsHeapNumber()) { + double double_value = HeapNumber::cast(value)->value(); + if (!(double_value > 0)) { + // NaN and less than zero clamp to zero. + clamped_value = 0; + } else if (double_value > 255) { + // Greater than 255 clamp to 255. + clamped_value = 255; + } else { + // Other doubles are rounded to the nearest integer. + clamped_value = static_cast<uint8_t>(double_value + 0.5); + } + } else { + // Clamp undefined to zero (default). All other types have been + // converted to a number type further up in the call chain. + ASSERT(value->IsUndefined()); + } + set(index, clamped_value); + } + return Smi::FromInt(clamped_value); +} + + Object* GlobalObject::GetPropertyCell(LookupResult* result) { ASSERT(!HasFastProperties()); Object* value = property_dictionary()->ValueAt(result->GetDictionaryEntry()); diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h index 5c76e4a51b..d367f815fd 100644 --- a/deps/v8/src/objects.h +++ b/deps/v8/src/objects.h @@ -52,6 +52,7 @@ // - JSValue // - Array // - ByteArray +// - PixelArray // - FixedArray // - DescriptorArray // - HashTable @@ -95,7 +96,6 @@ // HeapObject: [32 bit direct pointer] (4 byte aligned) | 01 // Failure: [30 bit signed int] 11 - // Ecma-262 3rd 8.6.1 enum PropertyAttributes { NONE = v8::None, @@ -270,6 +270,7 @@ enum PropertyNormalizationMode { V(ODDBALL_TYPE) \ V(PROXY_TYPE) \ V(BYTE_ARRAY_TYPE) \ + V(PIXEL_ARRAY_TYPE) \ V(FILLER_TYPE) \ \ V(ACCESSOR_INFO_TYPE) \ @@ -659,6 +660,7 @@ enum InstanceType { JS_GLOBAL_PROPERTY_CELL_TYPE, PROXY_TYPE, BYTE_ARRAY_TYPE, + PIXEL_ARRAY_TYPE, FILLER_TYPE, SMI_TYPE, @@ -760,6 +762,7 @@ class Object BASE_EMBEDDED { inline bool IsNumber(); inline bool IsByteArray(); + inline bool IsPixelArray(); inline bool IsFailure(); inline bool IsRetryAfterGC(); inline bool IsOutOfMemoryFailure(); @@ -1302,6 +1305,11 @@ class HeapNumber: public HeapObject { class JSObject: public HeapObject { public: enum DeleteMode { NORMAL_DELETION, FORCE_DELETION }; + enum ElementsKind { + FAST_ELEMENTS, + DICTIONARY_ELEMENTS, + PIXEL_ELEMENTS + }; // [properties]: Backing storage for properties. // properties is a FixedArray in the fast case, and a Dictionary in the @@ -1313,10 +1321,13 @@ class JSObject: public HeapObject { // [elements]: The elements (properties with names that are integers). // elements is a FixedArray in the fast case, and a Dictionary in the slow - // case. - DECL_ACCESSORS(elements, FixedArray) // Get and set fast elements. + // case or a PixelArray in a special case. + DECL_ACCESSORS(elements, Array) // Get and set fast elements. inline void initialize_elements(); + inline ElementsKind GetElementsKind(); inline bool HasFastElements(); + inline bool HasDictionaryElements(); + inline bool HasPixelElements(); inline NumberDictionary* element_dictionary(); // Gets slow elements. // Collects elements starting at index 0. @@ -1496,14 +1507,6 @@ class JSObject: public HeapObject { Object* LookupCallbackSetterInPrototypes(uint32_t index); void LookupCallback(String* name, LookupResult* result); - inline Smi* InterceptorPropertyLookupHint(String* name); - Object* GetInterceptorPropertyWithLookupHint(JSObject* receiver, - Smi* lookup_hint, - String* name, - PropertyAttributes* attributes); - static const int kLookupInHolder = -1; - static const int kLookupInPrototype = -2; - // Returns the number of properties on this object filtering out properties // with the specified attributes (ignoring interceptors). int NumberOfLocalProperties(PropertyAttributes filter); @@ -1581,8 +1584,11 @@ class JSObject: public HeapObject { PropertyAttributes attributes); // Convert the object to use the canonical dictionary - // representation. - Object* NormalizeProperties(PropertyNormalizationMode mode); + // representation. If the object is expected to have additional properties + // added this number can be indicated to have the backing store allocated to + // an initial capacity for holding these properties. + Object* NormalizeProperties(PropertyNormalizationMode mode, + int expected_additional_properties); Object* NormalizeElements(); // Transform slow named properties to fast variants. @@ -1695,12 +1701,6 @@ class JSObject: public HeapObject { void LookupInDescriptor(String* name, LookupResult* result); - // Attempts to get property with a named interceptor getter. - // Sets |attributes| to ABSENT if interceptor didn't return anything - Object* GetPropertyWithInterceptorProper(JSObject* receiver, - String* name, - PropertyAttributes* attributes); - DISALLOW_IMPLICIT_CONSTRUCTORS(JSObject); }; @@ -1718,6 +1718,10 @@ class Array: public HeapObject { // Layout descriptor. static const int kLengthOffset = HeapObject::kHeaderSize; + + protected: + // No code should use the Array class directly, only its subclasses. + // Use the kHeaderSize of the appropriate subclass, which may be aligned. static const int kHeaderSize = kLengthOffset + kIntSize; static const int kAlignedSize = POINTER_SIZE_ALIGN(kHeaderSize); @@ -2427,11 +2431,54 @@ class ByteArray: public Array { void ByteArrayVerify(); #endif + // ByteArray headers are not quadword aligned. + static const int kHeaderSize = Array::kHeaderSize; + static const int kAlignedSize = Array::kAlignedSize; + private: DISALLOW_IMPLICIT_CONSTRUCTORS(ByteArray); }; +// A PixelArray represents a fixed-size byte array with special semantics +// used for implementing the CanvasPixelArray object. Please see the +// specification at: +// http://www.whatwg.org/specs/web-apps/current-work/ +// multipage/the-canvas-element.html#canvaspixelarray +// In particular, write access clamps the value written to 0 or 255 if the +// value written is outside this range. +class PixelArray: public Array { + public: + // [external_pointer]: The pointer to the external memory area backing this + // pixel array. + DECL_ACCESSORS(external_pointer, uint8_t) // Pointer to the data store. + + // Setter and getter. + inline uint8_t get(int index); + inline void set(int index, uint8_t value); + + // This accessor applies the correct conversion from Smi, HeapNumber and + // undefined and clamps the converted value between 0 and 255. + Object* SetValue(uint32_t index, Object* value); + + // Casting. + static inline PixelArray* cast(Object* obj); + +#ifdef DEBUG + void PixelArrayPrint(); + void PixelArrayVerify(); +#endif // DEBUG + + // PixelArray headers are not quadword aligned. + static const int kExternalPointerOffset = Array::kAlignedSize; + static const int kHeaderSize = kExternalPointerOffset + kPointerSize; + static const int kAlignedSize = OBJECT_SIZE_ALIGN(kHeaderSize); + + private: + DISALLOW_IMPLICIT_CONSTRUCTORS(PixelArray); +}; + + // Code describes objects with on-the-fly generated machine code. class Code: public HeapObject { public: @@ -3129,6 +3176,9 @@ class JSFunction: public JSObject { // function. inline bool IsBoilerplate(); + // Tells whether this function is builtin. + inline bool IsBuiltin(); + // [literals]: Fixed array holding the materialized literals. // // If the function contains object, regexp or array literals, the diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc index 89d6d5b62f..348c12a4ac 100644 --- a/deps/v8/src/parser.cc +++ b/deps/v8/src/parser.cc @@ -834,12 +834,7 @@ class AstBuildingParserFactory : public ParserFactory { return new CallEval(expression, arguments, pos); } - virtual Statement* EmptyStatement() { - // Use a statically allocated empty statement singleton to avoid - // allocating lots and lots of empty statements. - static v8::internal::EmptyStatement empty; - return ∅ - } + virtual Statement* EmptyStatement(); }; @@ -1032,6 +1027,14 @@ Scope* AstBuildingParserFactory::NewScope(Scope* parent, Scope::Type type, } +Statement* AstBuildingParserFactory::EmptyStatement() { + // Use a statically allocated empty statement singleton to avoid + // allocating lots and lots of empty statements. + static v8::internal::EmptyStatement empty; + return ∅ +} + + Scope* ParserFactory::NewScope(Scope* parent, Scope::Type type, bool inside_with) { ASSERT(parent != NULL); @@ -1056,7 +1059,7 @@ VariableProxy* PreParser::Declare(Handle<String> name, Variable::Mode mode, class Target BASE_EMBEDDED { public: - Target(Parser* parser, Node* node) + Target(Parser* parser, AstNode* node) : parser_(parser), node_(node), previous_(parser_->target_stack_) { parser_->target_stack_ = this; } @@ -1066,11 +1069,11 @@ class Target BASE_EMBEDDED { } Target* previous() { return previous_; } - Node* node() { return node_; } + AstNode* node() { return node_; } private: Parser* parser_; - Node* node_; + AstNode* node_; Target* previous_; }; @@ -2367,7 +2370,7 @@ TryStatement* Parser::ParseTryStatement(bool* ok) { result = NEW(TryFinally(try_block, finally_block)); // Add the jump targets of the try block and the catch block. for (int i = 0; i < collector.targets()->length(); i++) { - catch_collector.targets()->Add(collector.targets()->at(i)); + catch_collector.AddTarget(collector.targets()->at(i)); } result->set_escaping_targets(catch_collector.targets()); } @@ -3928,7 +3931,7 @@ RegExpTree* RegExpParser::ParseDisjunction() { case '*': case '+': case '?': - ReportError(CStrVector("Nothing to repeat") CHECK_FAILED); + return ReportError(CStrVector("Nothing to repeat")); case '^': { Advance(); if (multiline_) { @@ -4003,7 +4006,7 @@ RegExpTree* RegExpParser::ParseDisjunction() { case '\\': switch (Next()) { case kEndMarker: - ReportError(CStrVector("\\ at end of pattern") CHECK_FAILED); + return ReportError(CStrVector("\\ at end of pattern")); case 'b': Advance(2); builder->AddAssertion( @@ -4490,7 +4493,7 @@ CharacterRange RegExpParser::ParseClassAtom(uc16* char_class) { return CharacterRange::Singleton(0); // Return dummy value. } case kEndMarker: - ReportError(CStrVector("\\ at end of pattern") CHECK_FAILED); + return ReportError(CStrVector("\\ at end of pattern")); default: uc32 c = ParseClassCharacterEscape(CHECK_FAILED); return CharacterRange::Singleton(c); diff --git a/deps/v8/src/platform-linux.cc b/deps/v8/src/platform-linux.cc index bccf9e6aef..57c884fb73 100644 --- a/deps/v8/src/platform-linux.cc +++ b/deps/v8/src/platform-linux.cc @@ -223,62 +223,36 @@ PosixMemoryMappedFile::~PosixMemoryMappedFile() { } -#ifdef ENABLE_LOGGING_AND_PROFILING -static uintptr_t StringToULong(char* buffer) { - return strtoul(buffer, NULL, 16); // NOLINT -} -#endif - - void OS::LogSharedLibraryAddresses() { #ifdef ENABLE_LOGGING_AND_PROFILING - static const int MAP_LENGTH = 1024; - int fd = open("/proc/self/maps", O_RDONLY); - if (fd < 0) return; + FILE *fp; + fp = fopen("/proc/self/maps", "r"); + if (fp == NULL) return; while (true) { - char addr_buffer[11]; - addr_buffer[0] = '0'; - addr_buffer[1] = 'x'; - addr_buffer[10] = 0; - int result = read(fd, addr_buffer + 2, 8); - if (result < 8) break; - uintptr_t start = StringToULong(addr_buffer); - result = read(fd, addr_buffer + 2, 1); - if (result < 1) break; - if (addr_buffer[2] != '-') break; - result = read(fd, addr_buffer + 2, 8); - if (result < 8) break; - uintptr_t end = StringToULong(addr_buffer); - char buffer[MAP_LENGTH]; - int bytes_read = -1; - do { - bytes_read++; - if (bytes_read >= MAP_LENGTH - 1) - break; - result = read(fd, buffer + bytes_read, 1); - if (result < 1) break; - } while (buffer[bytes_read] != '\n'); - buffer[bytes_read] = 0; - // Ignore mappings that are not executable. - if (buffer[3] != 'x') continue; - char* start_of_path = index(buffer, '/'); - // If there is no filename for this line then log it as an anonymous - // mapping and use the address as its name. - if (start_of_path == NULL) { - // 40 is enough to print a 64 bit address range. - ASSERT(sizeof(buffer) > 40); - snprintf(buffer, - sizeof(buffer), - "%08" V8PRIxPTR "-%08" V8PRIxPTR, - start, - end); - LOG(SharedLibraryEvent(buffer, start, end)); - } else { - buffer[bytes_read] = 0; - LOG(SharedLibraryEvent(start_of_path, start, end)); + uintptr_t start, end; + char attr_r, attr_w, attr_x, attr_p; + if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break; + if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break; + int c; + if (attr_r == 'r' && attr_x == 'x') { + while (c = getc(fp), (c != EOF) && (c != '\n') && (c != '/')); + char lib_name[1024]; + bool lib_has_name = false; + if (c == '/') { + ungetc(c, fp); + lib_has_name = fgets(lib_name, sizeof(lib_name), fp) != NULL; + } + if (lib_has_name && strlen(lib_name) > 0) { + lib_name[strlen(lib_name) - 1] = '\0'; + } else { + snprintf(lib_name, sizeof(lib_name), + "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end); + } + LOG(SharedLibraryEvent(lib_name, start, end)); } + while (c = getc(fp), (c != EOF) && (c != '\n')); } - close(fd); + fclose(fp); #endif } diff --git a/deps/v8/src/platform-macos.cc b/deps/v8/src/platform-macos.cc index 880931e048..445f588d67 100644 --- a/deps/v8/src/platform-macos.cc +++ b/deps/v8/src/platform-macos.cc @@ -28,10 +28,11 @@ // Platform specific code for MacOS goes here. For the POSIX comaptible parts // the implementation is in platform-posix.cc. -#include <ucontext.h> #include <unistd.h> #include <sys/mman.h> #include <mach/mach_init.h> +#include <mach-o/dyld.h> +#include <mach-o/getsect.h> #include <AvailabilityMacros.h> @@ -205,7 +206,19 @@ PosixMemoryMappedFile::~PosixMemoryMappedFile() { void OS::LogSharedLibraryAddresses() { - // TODO(1233579): Implement. +#ifdef ENABLE_LOGGING_AND_PROFILING + unsigned int images_count = _dyld_image_count(); + for (unsigned int i = 0; i < images_count; ++i) { + const mach_header* header = _dyld_get_image_header(i); + if (header == NULL) continue; + unsigned int size; + char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size); + if (code_ptr == NULL) continue; + const uintptr_t slide = _dyld_get_image_vmaddr_slide(i); + const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide; + LOG(SharedLibraryEvent(_dyld_get_image_name(i), start, start + size)); + } +#endif // ENABLE_LOGGING_AND_PROFILING } @@ -411,14 +424,10 @@ class MacOSMutex : public Mutex { public: MacOSMutex() { - // For some reason the compiler doesn't allow you to write - // "this->mutex_ = PTHREAD_..." directly on mac. - pthread_mutex_t m = PTHREAD_MUTEX_INITIALIZER; pthread_mutexattr_t attr; pthread_mutexattr_init(&attr); pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE); - pthread_mutex_init(&m, &attr); - mutex_ = m; + pthread_mutex_init(&mutex_, &attr); } ~MacOSMutex() { pthread_mutex_destroy(&mutex_); } diff --git a/deps/v8/src/platform-posix.cc b/deps/v8/src/platform-posix.cc index d628a5148b..6174522f75 100644 --- a/deps/v8/src/platform-posix.cc +++ b/deps/v8/src/platform-posix.cc @@ -42,11 +42,15 @@ #include <netinet/in.h> #include <netdb.h> +#if defined(ANDROID) +#define LOG_TAG "v8" +#include <utils/Log.h> // LOG_PRI_VA +#endif + #include "v8.h" #include "platform.h" - namespace v8 { namespace internal { @@ -126,7 +130,11 @@ void OS::Print(const char* format, ...) { void OS::VPrint(const char* format, va_list args) { +#if defined(ANDROID) + LOG_PRI_VA(ANDROID_LOG_INFO, LOG_TAG, format, args); +#else vprintf(format, args); +#endif } @@ -139,7 +147,11 @@ void OS::PrintError(const char* format, ...) { void OS::VPrintError(const char* format, va_list args) { +#if defined(ANDROID) + LOG_PRI_VA(ANDROID_LOG_ERROR, LOG_TAG, format, args); +#else vfprintf(stderr, format, args); +#endif } diff --git a/deps/v8/src/prettyprinter.cc b/deps/v8/src/prettyprinter.cc index 79f1883ef4..7a8af40fd3 100644 --- a/deps/v8/src/prettyprinter.cc +++ b/deps/v8/src/prettyprinter.cc @@ -417,7 +417,7 @@ void PrettyPrinter::VisitThisFunction(ThisFunction* node) { } -const char* PrettyPrinter::Print(Node* node) { +const char* PrettyPrinter::Print(AstNode* node) { Init(); Visit(node); return output_; @@ -441,7 +441,7 @@ const char* PrettyPrinter::PrintProgram(FunctionLiteral* program) { } -void PrettyPrinter::PrintOut(Node* node) { +void PrettyPrinter::PrintOut(AstNode* node) { PrettyPrinter printer; PrintF("%s", printer.Print(node)); } @@ -700,7 +700,7 @@ void AstPrinter::PrintLabelsIndented(const char* info, ZoneStringList* labels) { } -void AstPrinter::PrintIndentedVisit(const char* s, Node* node) { +void AstPrinter::PrintIndentedVisit(const char* s, AstNode* node) { IndentedScope indent(s); Visit(node); } @@ -934,6 +934,9 @@ void AstPrinter::VisitObjectLiteral(ObjectLiteral* node) { case ObjectLiteral::Property::COMPUTED: prop_kind = "PROPERTY - COMPUTED"; break; + case ObjectLiteral::Property::MATERIALIZED_LITERAL: + prop_kind = "PROPERTY - MATERIALIZED_LITERAL"; + break; case ObjectLiteral::Property::PROTOTYPE: prop_kind = "PROPERTY - PROTOTYPE"; break; @@ -945,7 +948,6 @@ void AstPrinter::VisitObjectLiteral(ObjectLiteral* node) { break; default: UNREACHABLE(); - break; } IndentedScope prop(prop_kind); PrintIndentedVisit("KEY", node->properties()->at(i)->key()); diff --git a/deps/v8/src/prettyprinter.h b/deps/v8/src/prettyprinter.h index bfce9b0332..8a6d1fbf81 100644 --- a/deps/v8/src/prettyprinter.h +++ b/deps/v8/src/prettyprinter.h @@ -42,17 +42,17 @@ class PrettyPrinter: public AstVisitor { // The following routines print a node into a string. // The result string is alive as long as the PrettyPrinter is alive. - const char* Print(Node* node); + const char* Print(AstNode* node); const char* PrintExpression(FunctionLiteral* program); const char* PrintProgram(FunctionLiteral* program); // Print a node to stdout. - static void PrintOut(Node* node); + static void PrintOut(AstNode* node); // Individual nodes #define DEF_VISIT(type) \ virtual void Visit##type(type* node); - NODE_LIST(DEF_VISIT) + AST_NODE_LIST(DEF_VISIT) #undef DEF_VISIT private: @@ -87,12 +87,12 @@ class AstPrinter: public PrettyPrinter { // Individual nodes #define DEF_VISIT(type) \ virtual void Visit##type(type* node); - NODE_LIST(DEF_VISIT) + AST_NODE_LIST(DEF_VISIT) #undef DEF_VISIT private: friend class IndentedScope; void PrintIndented(const char* txt); - void PrintIndentedVisit(const char* s, Node* node); + void PrintIndentedVisit(const char* s, AstNode* node); void PrintStatements(ZoneList<Statement*>* statements); void PrintDeclarations(ZoneList<Declaration*>* declarations); diff --git a/deps/v8/src/register-allocator.cc b/deps/v8/src/register-allocator.cc index d1b08bbc4c..d55f949d8f 100644 --- a/deps/v8/src/register-allocator.cc +++ b/deps/v8/src/register-allocator.cc @@ -44,6 +44,12 @@ Result::Result(Register reg) { } +Result::ZoneObjectList* Result::ConstantList() { + static ZoneObjectList list(10); + return &list; +} + + // ------------------------------------------------------------------------- // RegisterAllocator implementation. diff --git a/deps/v8/src/register-allocator.h b/deps/v8/src/register-allocator.h index f7167d9262..1765633cdd 100644 --- a/deps/v8/src/register-allocator.h +++ b/deps/v8/src/register-allocator.h @@ -92,10 +92,7 @@ class Result BASE_EMBEDDED { // of handles to the actual constants. typedef ZoneList<Handle<Object> > ZoneObjectList; - static ZoneObjectList* ConstantList() { - static ZoneObjectList list(10); - return &list; - } + static ZoneObjectList* ConstantList(); // Clear the constants indirection table. static void ClearConstantList() { diff --git a/deps/v8/src/rewriter.cc b/deps/v8/src/rewriter.cc index 4d1fbd9dd7..d6ea68e694 100644 --- a/deps/v8/src/rewriter.cc +++ b/deps/v8/src/rewriter.cc @@ -38,8 +38,9 @@ namespace internal { class AstOptimizer: public AstVisitor { public: - explicit AstOptimizer() {} - explicit AstOptimizer(Handle<String> enclosing_name) { + explicit AstOptimizer() : has_function_literal_(false) {} + explicit AstOptimizer(Handle<String> enclosing_name) + : has_function_literal_(false) { func_name_inferrer_.PushEnclosingName(enclosing_name); } @@ -58,7 +59,7 @@ class AstOptimizer: public AstVisitor { // Node visitors. #define DEF_VISIT(type) \ virtual void Visit##type(type* node); - NODE_LIST(DEF_VISIT) + AST_NODE_LIST(DEF_VISIT) #undef DEF_VISIT DISALLOW_COPY_AND_ASSIGN(AstOptimizer); @@ -556,7 +557,7 @@ class Processor: public AstVisitor { // Node visitors. #define DEF_VISIT(type) \ virtual void Visit##type(type* node); - NODE_LIST(DEF_VISIT) + AST_NODE_LIST(DEF_VISIT) #undef DEF_VISIT }; diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc index 350d391bf4..0b981673bb 100644 --- a/deps/v8/src/runtime.cc +++ b/deps/v8/src/runtime.cc @@ -155,33 +155,43 @@ static Object* DeepCopyBoilerplate(JSObject* boilerplate) { } // Deep copy local elements. - if (copy->HasFastElements()) { - FixedArray* elements = copy->elements(); - WriteBarrierMode mode = elements->GetWriteBarrierMode(); - for (int i = 0; i < elements->length(); i++) { - Object* value = elements->get(i); - if (value->IsJSObject()) { - JSObject* jsObject = JSObject::cast(value); - result = DeepCopyBoilerplate(jsObject); - if (result->IsFailure()) return result; - elements->set(i, result, mode); - } - } - } else { - NumberDictionary* element_dictionary = copy->element_dictionary(); - int capacity = element_dictionary->Capacity(); - for (int i = 0; i < capacity; i++) { - Object* k = element_dictionary->KeyAt(i); - if (element_dictionary->IsKey(k)) { - Object* value = element_dictionary->ValueAt(i); + // Pixel elements cannot be created using an object literal. + ASSERT(!copy->HasPixelElements()); + switch (copy->GetElementsKind()) { + case JSObject::FAST_ELEMENTS: { + FixedArray* elements = FixedArray::cast(copy->elements()); + WriteBarrierMode mode = elements->GetWriteBarrierMode(); + for (int i = 0; i < elements->length(); i++) { + Object* value = elements->get(i); if (value->IsJSObject()) { JSObject* jsObject = JSObject::cast(value); result = DeepCopyBoilerplate(jsObject); if (result->IsFailure()) return result; - element_dictionary->ValueAtPut(i, result); + elements->set(i, result, mode); } } + break; } + case JSObject::DICTIONARY_ELEMENTS: { + NumberDictionary* element_dictionary = copy->element_dictionary(); + int capacity = element_dictionary->Capacity(); + for (int i = 0; i < capacity; i++) { + Object* k = element_dictionary->KeyAt(i); + if (element_dictionary->IsKey(k)) { + Object* value = element_dictionary->ValueAt(i); + if (value->IsJSObject()) { + JSObject* jsObject = JSObject::cast(value); + result = DeepCopyBoilerplate(jsObject); + if (result->IsFailure()) return result; + element_dictionary->ValueAtPut(i, result); + } + } + } + break; + } + default: + UNREACHABLE(); + break; } return copy; } @@ -258,6 +268,7 @@ static Handle<Object> CreateObjectLiteralBoilerplate( { // Add the constant properties to the boilerplate. int length = constant_properties->length(); OptimizedObjectForAddingMultipleProperties opt(boilerplate, + length / 2, !is_result_from_cache); for (int index = 0; index < length; index +=2) { Handle<Object> key(constant_properties->get(index+0)); @@ -1637,7 +1648,7 @@ void CompiledReplacement::Apply(ReplacementStringBuilder* builder, } case SUBJECT_CAPTURE: { int capture = part.data; - FixedArray* match_info = last_match_info->elements(); + FixedArray* match_info = FixedArray::cast(last_match_info->elements()); int from = RegExpImpl::GetCapture(match_info, capture * 2); int to = RegExpImpl::GetCapture(match_info, capture * 2 + 1); if (from >= 0 && to > from) { @@ -1717,7 +1728,8 @@ static Object* StringReplaceRegExpWithString(String* subject, int start, end; { AssertNoAllocation match_info_array_is_not_in_a_handle; - FixedArray* match_info_array = last_match_info_handle->elements(); + FixedArray* match_info_array = + FixedArray::cast(last_match_info_handle->elements()); ASSERT_EQ(capture_count * 2 + 2, RegExpImpl::GetLastCaptureCount(match_info_array)); @@ -2345,7 +2357,7 @@ static Object* Runtime_StringMatch(Arguments args) { int end; { AssertNoAllocation no_alloc; - FixedArray* elements = regexp_info->elements(); + FixedArray* elements = FixedArray::cast(regexp_info->elements()); start = Smi::cast(elements->get(RegExpImpl::kFirstCapture))->value(); end = Smi::cast(elements->get(RegExpImpl::kFirstCapture + 1))->value(); } @@ -3022,7 +3034,7 @@ static Object* Runtime_ToSlowProperties(Arguments args) { Handle<Object> object = args.at<Object>(0); if (object->IsJSObject()) { Handle<JSObject> js_object = Handle<JSObject>::cast(object); - js_object->NormalizeProperties(CLEAR_INOBJECT_PROPERTIES); + js_object->NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0); } return *object; } @@ -4885,7 +4897,7 @@ static Object* Runtime_DateParseString(Arguments args) { AssertNoAllocation no_allocation; - FixedArray* output_array = output->elements(); + FixedArray* output_array = FixedArray::cast(output->elements()); RUNTIME_ASSERT(output_array->length() >= DateParser::OUTPUT_SIZE); bool result; if (str->IsAsciiRepresentation()) { @@ -5173,37 +5185,62 @@ static uint32_t IterateElements(Handle<JSObject> receiver, ArrayConcatVisitor* visitor) { uint32_t num_of_elements = 0; - if (receiver->HasFastElements()) { - Handle<FixedArray> elements(FixedArray::cast(receiver->elements())); - uint32_t len = elements->length(); - if (range < len) len = range; + switch (receiver->GetElementsKind()) { + case JSObject::FAST_ELEMENTS: { + Handle<FixedArray> elements(FixedArray::cast(receiver->elements())); + uint32_t len = elements->length(); + if (range < len) { + len = range; + } - for (uint32_t j = 0; j < len; j++) { - Handle<Object> e(elements->get(j)); - if (!e->IsTheHole()) { + for (uint32_t j = 0; j < len; j++) { + Handle<Object> e(elements->get(j)); + if (!e->IsTheHole()) { + num_of_elements++; + if (visitor) { + visitor->visit(j, e); + } + } + } + break; + } + case JSObject::PIXEL_ELEMENTS: { + Handle<PixelArray> pixels(PixelArray::cast(receiver->elements())); + uint32_t len = pixels->length(); + if (range < len) { + len = range; + } + + for (uint32_t j = 0; j < len; j++) { num_of_elements++; - if (visitor) + if (visitor != NULL) { + Handle<Smi> e(Smi::FromInt(pixels->get(j))); visitor->visit(j, e); + } } + break; } - - } else { - Handle<NumberDictionary> dict(receiver->element_dictionary()); - uint32_t capacity = dict->Capacity(); - for (uint32_t j = 0; j < capacity; j++) { - Handle<Object> k(dict->KeyAt(j)); - if (dict->IsKey(*k)) { - ASSERT(k->IsNumber()); - uint32_t index = static_cast<uint32_t>(k->Number()); - if (index < range) { - num_of_elements++; - if (visitor) { - visitor->visit(index, - Handle<Object>(dict->ValueAt(j))); + case JSObject::DICTIONARY_ELEMENTS: { + Handle<NumberDictionary> dict(receiver->element_dictionary()); + uint32_t capacity = dict->Capacity(); + for (uint32_t j = 0; j < capacity; j++) { + Handle<Object> k(dict->KeyAt(j)); + if (dict->IsKey(*k)) { + ASSERT(k->IsNumber()); + uint32_t index = static_cast<uint32_t>(k->Number()); + if (index < range) { + num_of_elements++; + if (visitor) { + visitor->visit(index, Handle<Object>(dict->ValueAt(j))); + } } } } + break; } + default: + UNREACHABLE(); + break; } return num_of_elements; @@ -7408,14 +7445,15 @@ static bool ShowFrameInStackTrace(StackFrame* raw_frame, Object* caller, // Not sure when this can happen but skip it just in case. if (!raw_fun->IsJSFunction()) return false; - if ((raw_fun == caller) && !(*seen_caller) && frame->IsConstructor()) { + if ((raw_fun == caller) && !(*seen_caller)) { *seen_caller = true; return false; } - // Skip the most obvious builtin calls. Some builtin calls (such as - // Number.ADD which is invoked using 'call') are very difficult to - // recognize so we're leaving them in for now. - return !frame->receiver()->IsJSBuiltinsObject(); + // Skip all frames until we've seen the caller. Also, skip the most + // obvious builtin calls. Some builtin calls (such as Number.ADD + // which is invoked using 'call') are very difficult to recognize + // so we're leaving them in for now. + return *seen_caller && !frame->receiver()->IsJSBuiltinsObject(); } @@ -7424,7 +7462,7 @@ static bool ShowFrameInStackTrace(StackFrame* raw_frame, Object* caller, // code offset. static Object* Runtime_CollectStackTrace(Arguments args) { ASSERT_EQ(args.length(), 2); - Object* caller = args[0]; + Handle<Object> caller = args.at<Object>(0); CONVERT_NUMBER_CHECKED(int32_t, limit, Int32, args[1]); HandleScope scope; @@ -7433,12 +7471,14 @@ static Object* Runtime_CollectStackTrace(Arguments args) { Handle<JSArray> result = Factory::NewJSArray(initial_size * 3); StackFrameIterator iter; - bool seen_caller = false; + // If the caller parameter is a function we skip frames until we're + // under it before starting to collect. + bool seen_caller = !caller->IsJSFunction(); int cursor = 0; int frames_seen = 0; while (!iter.done() && frames_seen < limit) { StackFrame* raw_frame = iter.frame(); - if (ShowFrameInStackTrace(raw_frame, caller, &seen_caller)) { + if (ShowFrameInStackTrace(raw_frame, *caller, &seen_caller)) { frames_seen++; JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame); Object* recv = frame->receiver(); @@ -7446,15 +7486,17 @@ static Object* Runtime_CollectStackTrace(Arguments args) { Address pc = frame->pc(); Address start = frame->code()->address(); Smi* offset = Smi::FromInt(pc - start); - FixedArray* elements = result->elements(); + FixedArray* elements = FixedArray::cast(result->elements()); if (cursor + 2 < elements->length()) { elements->set(cursor++, recv); elements->set(cursor++, fun); elements->set(cursor++, offset, SKIP_WRITE_BARRIER); } else { HandleScope scope; - SetElement(result, cursor++, Handle<Object>(recv)); - SetElement(result, cursor++, Handle<Object>(fun)); + Handle<Object> recv_handle(recv); + Handle<Object> fun_handle(fun); + SetElement(result, cursor++, recv_handle); + SetElement(result, cursor++, fun_handle); SetElement(result, cursor++, Handle<Smi>(offset)); } } diff --git a/deps/v8/src/serialize.cc b/deps/v8/src/serialize.cc index 592cf5aaed..963138e741 100644 --- a/deps/v8/src/serialize.cc +++ b/deps/v8/src/serialize.cc @@ -1454,9 +1454,9 @@ void Deserializer::GetLog() { static void InitPagedSpace(PagedSpace* space, int capacity, List<Page*>* page_list) { - space->EnsureCapacity(capacity); - // TODO(1240712): PagedSpace::EnsureCapacity can return false due to - // a failure to allocate from the OS to expand the space. + if (!space->EnsureCapacity(capacity)) { + V8::FatalProcessOutOfMemory("InitPagedSpace"); + } PageIterator it(space, PageIterator::ALL_PAGES); while (it.has_next()) page_list->Add(it.next()); } diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc index 2393281bd7..4f8119fe1d 100644 --- a/deps/v8/src/spaces.cc +++ b/deps/v8/src/spaces.cc @@ -133,8 +133,6 @@ PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) { #endif stop_page_ = space->last_page_; break; - default: - UNREACHABLE(); } } @@ -725,11 +723,15 @@ void PagedSpace::Shrink() { Page* current_page = top_page->next_page(); // Loop over the pages to the end of the space. while (current_page->is_valid()) { +#if defined(ANDROID) + // Free all chunks if possible +#else // Advance last_page_to_keep every other step to end up at the midpoint. if ((free_pages & 0x1) == 1) { pages_to_keep++; last_page_to_keep = last_page_to_keep->next_page(); } +#endif free_pages++; current_page = current_page->next_page(); } diff --git a/deps/v8/src/spaces.h b/deps/v8/src/spaces.h index ccd1d2786c..9841a5fddf 100644 --- a/deps/v8/src/spaces.h +++ b/deps/v8/src/spaces.h @@ -393,6 +393,9 @@ class MemoryAllocator : public AllStatic { // Returns the maximum available bytes of heaps. static int Available() { return capacity_ < size_ ? 0 : capacity_ - size_; } + // Returns allocated spaces in bytes. + static int Size() { return size_; } + // Returns maximum available bytes that the old space can have. static int MaxAvailable() { return (Available() / Page::kPageSize) * Page::kObjectAreaSize; @@ -434,7 +437,11 @@ class MemoryAllocator : public AllStatic { static const int kMaxNofChunks = 1 << Page::kPageSizeBits; // If a chunk has at least 32 pages, the maximum heap size is about // 8 * 1024 * 32 * 8K = 2G bytes. +#if defined(ANDROID) + static const int kPagesPerChunk = 16; +#else static const int kPagesPerChunk = 64; +#endif static const int kChunkSize = kPagesPerChunk * Page::kPageSize; private: @@ -924,34 +931,41 @@ class PagedSpace : public Space { #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) -// HistogramInfo class for recording a single "bar" of a histogram. This -// class is used for collecting statistics to print to stdout (when compiled -// with DEBUG) or to the log file (when compiled with -// ENABLE_LOGGING_AND_PROFILING). -class HistogramInfo BASE_EMBEDDED { +class NumberAndSizeInfo BASE_EMBEDDED { public: - HistogramInfo() : number_(0), bytes_(0) {} + NumberAndSizeInfo() : number_(0), bytes_(0) {} - const char* name() { return name_; } - void set_name(const char* name) { name_ = name; } - - int number() { return number_; } + int number() const { return number_; } void increment_number(int num) { number_ += num; } - int bytes() { return bytes_; } + int bytes() const { return bytes_; } void increment_bytes(int size) { bytes_ += size; } - // Clear the number of objects and size fields, but not the name. void clear() { number_ = 0; bytes_ = 0; } private: - const char* name_; int number_; int bytes_; }; + + +// HistogramInfo class for recording a single "bar" of a histogram. This +// class is used for collecting statistics to print to stdout (when compiled +// with DEBUG) or to the log file (when compiled with +// ENABLE_LOGGING_AND_PROFILING). +class HistogramInfo: public NumberAndSizeInfo { + public: + HistogramInfo() : NumberAndSizeInfo() {} + + const char* name() { return name_; } + void set_name(const char* name) { name_ = name; } + + private: + const char* name_; +}; #endif diff --git a/deps/v8/src/string-stream.cc b/deps/v8/src/string-stream.cc index 9a137e3216..ee343a5f2f 100644 --- a/deps/v8/src/string-stream.cc +++ b/deps/v8/src/string-stream.cc @@ -44,12 +44,6 @@ char* HeapStringAllocator::allocate(unsigned bytes) { } -NoAllocationStringAllocator::NoAllocationStringAllocator(unsigned bytes) { - size_ = bytes; - space_ = NewArray<char>(bytes); -} - - NoAllocationStringAllocator::NoAllocationStringAllocator(char* memory, unsigned size) { size_ = size; diff --git a/deps/v8/src/string-stream.h b/deps/v8/src/string-stream.h index 15a72e0f3a..5732944a27 100644 --- a/deps/v8/src/string-stream.h +++ b/deps/v8/src/string-stream.h @@ -57,11 +57,10 @@ class HeapStringAllocator: public StringAllocator { // Allocator for use when no new c++ heap allocation is allowed. -// Allocates all space up front and does no allocation while building -// message. +// Given a preallocated buffer up front and does no allocation while +// building message. class NoAllocationStringAllocator: public StringAllocator { public: - explicit NoAllocationStringAllocator(unsigned bytes); NoAllocationStringAllocator(char* memory, unsigned size); char* allocate(unsigned bytes) { return space_; } char* grow(unsigned* bytes); diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc index 7eb8cd3c67..b25f5b4bb5 100644 --- a/deps/v8/src/stub-cache.cc +++ b/deps/v8/src/stub-cache.cc @@ -736,22 +736,22 @@ Handle<Code> ComputeCallMiss(int argc) { Object* LoadCallbackProperty(Arguments args) { Handle<JSObject> recv = args.at<JSObject>(0); - AccessorInfo* callback = AccessorInfo::cast(args[1]); + Handle<JSObject> holder = args.at<JSObject>(1); + AccessorInfo* callback = AccessorInfo::cast(args[2]); + Handle<Object> data = args.at<Object>(3); Address getter_address = v8::ToCData<Address>(callback->getter()); v8::AccessorGetter fun = FUNCTION_CAST<v8::AccessorGetter>(getter_address); ASSERT(fun != NULL); - Handle<String> name = args.at<String>(2); - Handle<JSObject> holder = args.at<JSObject>(3); - HandleScope scope; - Handle<Object> data(callback->data()); - LOG(ApiNamedPropertyAccess("load", *recv, *name)); + Handle<String> name = args.at<String>(4); // NOTE: If we can align the structure of an AccessorInfo with the // locations of the arguments to this function maybe we don't have // to explicitly create the structure but can just pass a pointer // into the stack. + LOG(ApiNamedPropertyAccess("load", *recv, *name)); v8::AccessorInfo info(v8::Utils::ToLocal(recv), v8::Utils::ToLocal(data), v8::Utils::ToLocal(holder)); + HandleScope scope; v8::Handle<v8::Value> result; { // Leaving JavaScript. @@ -787,49 +787,129 @@ Object* StoreCallbackProperty(Arguments args) { return *value; } +/** + * Attempts to load a property with an interceptor (which must be present), + * but doesn't search the prototype chain. + * + * Returns |Heap::no_interceptor_result_sentinel()| if interceptor doesn't + * provide any value for the given name. + */ +Object* LoadPropertyWithInterceptorOnly(Arguments args) { + Handle<JSObject> receiver_handle = args.at<JSObject>(0); + Handle<JSObject> holder_handle = args.at<JSObject>(1); + Handle<String> name_handle = args.at<String>(2); + Handle<InterceptorInfo> interceptor_info = args.at<InterceptorInfo>(3); + Handle<Object> data_handle = args.at<Object>(4); + + Address getter_address = v8::ToCData<Address>(interceptor_info->getter()); + v8::NamedPropertyGetter getter = + FUNCTION_CAST<v8::NamedPropertyGetter>(getter_address); + ASSERT(getter != NULL); -Object* LoadInterceptorProperty(Arguments args) { - JSObject* recv = JSObject::cast(args[0]); - JSObject* holder = JSObject::cast(args[1]); - String* name = String::cast(args[2]); - Smi* lookup_hint = Smi::cast(args[3]); - ASSERT(holder->HasNamedInterceptor()); - PropertyAttributes attr = NONE; - - Object* result = holder->GetInterceptorPropertyWithLookupHint( - recv, lookup_hint, name, &attr); - if (result->IsFailure()) return result; + { + // Use the interceptor getter. + v8::AccessorInfo info(v8::Utils::ToLocal(receiver_handle), + v8::Utils::ToLocal(data_handle), + v8::Utils::ToLocal(holder_handle)); + HandleScope scope; + v8::Handle<v8::Value> r; + { + // Leaving JavaScript. + VMState state(EXTERNAL); + r = getter(v8::Utils::ToLocal(name_handle), info); + } + RETURN_IF_SCHEDULED_EXCEPTION(); + if (!r.IsEmpty()) { + return *v8::Utils::OpenHandle(*r); + } + } - // If the property is present, return it. - if (attr != ABSENT) return result; + return Heap::no_interceptor_result_sentinel(); +} - // If the top frame is an internal frame, this is really a call - // IC. In this case, we simply return the undefined result which - // will lead to an exception when trying to invoke the result as a - // function. - StackFrameIterator it; - it.Advance(); // skip exit frame - if (it.frame()->is_internal()) return result; +static Object* ThrowReferenceError(String* name) { // If the load is non-contextual, just return the undefined result. // Note that both keyed and non-keyed loads may end up here, so we // can't use either LoadIC or KeyedLoadIC constructors. IC ic(IC::NO_EXTRA_FRAME); ASSERT(ic.target()->is_load_stub() || ic.target()->is_keyed_load_stub()); - if (!ic.is_contextual()) return result; + if (!ic.is_contextual()) return Heap::undefined_value(); // Throw a reference error. + HandleScope scope; + Handle<String> name_handle(name); + Handle<Object> error = + Factory::NewReferenceError("not_defined", + HandleVector(&name_handle, 1)); + return Top::Throw(*error); +} + + +static Object* LoadWithInterceptor(Arguments* args, + PropertyAttributes* attrs) { + Handle<JSObject> receiver_handle = args->at<JSObject>(0); + Handle<JSObject> holder_handle = args->at<JSObject>(1); + Handle<String> name_handle = args->at<String>(2); + Handle<InterceptorInfo> interceptor_info = args->at<InterceptorInfo>(3); + Handle<Object> data_handle = args->at<Object>(4); + + Address getter_address = v8::ToCData<Address>(interceptor_info->getter()); + v8::NamedPropertyGetter getter = + FUNCTION_CAST<v8::NamedPropertyGetter>(getter_address); + ASSERT(getter != NULL); + { + // Use the interceptor getter. + v8::AccessorInfo info(v8::Utils::ToLocal(receiver_handle), + v8::Utils::ToLocal(data_handle), + v8::Utils::ToLocal(holder_handle)); HandleScope scope; - // We cannot use the raw name pointer here since getting the - // property might cause a GC. However, we can get the name from - // the stack using the arguments object. - Handle<String> name_handle = args.at<String>(2); - Handle<Object> error = - Factory::NewReferenceError("not_defined", - HandleVector(&name_handle, 1)); - return Top::Throw(*error); + v8::Handle<v8::Value> r; + { + // Leaving JavaScript. + VMState state(EXTERNAL); + r = getter(v8::Utils::ToLocal(name_handle), info); + } + RETURN_IF_SCHEDULED_EXCEPTION(); + if (!r.IsEmpty()) { + *attrs = NONE; + return *v8::Utils::OpenHandle(*r); + } } + + Object* result = holder_handle->GetPropertyPostInterceptor( + *receiver_handle, + *name_handle, + attrs); + RETURN_IF_SCHEDULED_EXCEPTION(); + return result; +} + + +/** + * Loads a property with an interceptor performing post interceptor + * lookup if interceptor failed. + */ +Object* LoadPropertyWithInterceptorForLoad(Arguments args) { + PropertyAttributes attr = NONE; + Object* result = LoadWithInterceptor(&args, &attr); + if (result->IsFailure()) return result; + + // If the property is present, return it. + if (attr != ABSENT) return result; + return ThrowReferenceError(String::cast(args[2])); +} + + +Object* LoadPropertyWithInterceptorForCall(Arguments args) { + PropertyAttributes attr; + Object* result = LoadWithInterceptor(&args, &attr); + RETURN_IF_SCHEDULED_EXCEPTION(); + // This is call IC. In this case, we simply return the undefined result which + // will lead to an exception when trying to invoke the result as a + // function. + return result; } @@ -863,6 +943,8 @@ Object* StubCompiler::CompileCallInitialize(Code::Flags flags) { Object* StubCompiler::CompileCallPreMonomorphic(Code::Flags flags) { HandleScope scope; int argc = Code::ExtractArgumentsCountFromFlags(flags); + // The code of the PreMonomorphic stub is the same as the code + // of the Initialized stub. They just differ on the code object flags. CallIC::GenerateInitialize(masm(), argc); Object* result = GetCodeWithFlags(flags, "CompileCallPreMonomorphic"); if (!result->IsFailure()) { diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h index 8bee37074c..3b3caad56c 100644 --- a/deps/v8/src/stub-cache.h +++ b/deps/v8/src/stub-cache.h @@ -256,11 +256,14 @@ class StubCache : public AllStatic { } // Compute the entry for a given offset in exactly the same way as - // we done in generated code. This makes it a lot easier to avoid - // making mistakes in the hashed offset computations. + // we do in generated code. We generate an hash code that already + // ends in String::kHashShift 0s. Then we shift it so it is a multiple + // of sizeof(Entry). This makes it easier to avoid making mistakes + // in the hashed offset computations. static Entry* entry(Entry* table, int offset) { + const int shift_amount = kPointerSizeLog2 + 1 - String::kHashShift; return reinterpret_cast<Entry*>( - reinterpret_cast<Address>(table) + (offset << 1)); + reinterpret_cast<Address>(table) + (offset << shift_amount)); } }; @@ -304,7 +307,9 @@ Object* StoreCallbackProperty(Arguments args); // Support functions for IC stubs for interceptors. -Object* LoadInterceptorProperty(Arguments args); +Object* LoadPropertyWithInterceptorOnly(Arguments args); +Object* LoadPropertyWithInterceptorForLoad(Arguments args); +Object* LoadPropertyWithInterceptorForCall(Arguments args); Object* StoreInterceptorProperty(Arguments args); Object* CallInterceptorProperty(Arguments args); @@ -374,13 +379,6 @@ class StubCompiler BASE_EMBEDDED { Label* miss_label); static void GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind); - protected: - Object* GetCodeWithFlags(Code::Flags flags, const char* name); - Object* GetCodeWithFlags(Code::Flags flags, String* name); - - MacroAssembler* masm() { return &masm_; } - void set_failure(Failure* failure) { failure_ = failure; } - // Check the integrity of the prototype chain to make sure that the // current IC is still valid. Register CheckPrototypes(JSObject* object, @@ -391,6 +389,13 @@ class StubCompiler BASE_EMBEDDED { String* name, Label* miss); + protected: + Object* GetCodeWithFlags(Code::Flags flags, const char* name); + Object* GetCodeWithFlags(Code::Flags flags, String* name); + + MacroAssembler* masm() { return &masm_; } + void set_failure(Failure* failure) { failure_ = failure; } + void GenerateLoadField(JSObject* object, JSObject* holder, Register receiver, @@ -421,7 +426,7 @@ class StubCompiler BASE_EMBEDDED { void GenerateLoadInterceptor(JSObject* object, JSObject* holder, - Smi* lookup_hint, + LookupResult* lookup, Register receiver, Register name_reg, Register scratch1, diff --git a/deps/v8/src/usage-analyzer.cc b/deps/v8/src/usage-analyzer.cc index 36464fa59b..5514f405c6 100644 --- a/deps/v8/src/usage-analyzer.cc +++ b/deps/v8/src/usage-analyzer.cc @@ -42,7 +42,7 @@ static const int InitialWeight = 100; class UsageComputer: public AstVisitor { public: - static bool Traverse(Node* node); + static bool Traverse(AstNode* node); void VisitBlock(Block* node); void VisitDeclaration(Declaration* node); @@ -116,7 +116,7 @@ class WeightScaler BASE_EMBEDDED { // ---------------------------------------------------------------------------- // Implementation of UsageComputer -bool UsageComputer::Traverse(Node* node) { +bool UsageComputer::Traverse(AstNode* node) { UsageComputer uc(InitialWeight, false); uc.Visit(node); return !uc.HasStackOverflow(); diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc index f1833a5934..911f6e0010 100644 --- a/deps/v8/src/version.cc +++ b/deps/v8/src/version.cc @@ -33,9 +33,9 @@ // NOTE these macros are used by the SCons build script so their names // cannot be changed without changing the SCons build script. #define MAJOR_VERSION 1 -#define MINOR_VERSION 2 -#define BUILD_NUMBER 14 -#define PATCH_LEVEL 0 +#define MINOR_VERSION 3 +#define BUILD_NUMBER 1 +#define PATCH_LEVEL 1 #define CANDIDATE_VERSION false // Define SONAME to have the SCons build the put a specific SONAME into the diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc index c4ee454836..5e70f9e065 100644 --- a/deps/v8/src/x64/assembler-x64.cc +++ b/deps/v8/src/x64/assembler-x64.cc @@ -456,13 +456,13 @@ void Assembler::arithmetic_op_32(byte opcode, Register dst, Register src) { void Assembler::arithmetic_op_32(byte opcode, - const Operand& dst, - Register src) { + Register reg, + const Operand& rm_reg) { EnsureSpace ensure_space(this); last_pc_ = pc_; - emit_optional_rex_32(src, dst); + emit_optional_rex_32(reg, rm_reg); emit(opcode); - emit_operand(src, dst); + emit_operand(reg, rm_reg); } @@ -687,6 +687,13 @@ void Assembler::call(const Operand& op) { } +void Assembler::cdq() { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit(0x99); +} + + void Assembler::cmovq(Condition cc, Register dst, Register src) { // No need to check CpuInfo for CMOV support, it's a required part of the // 64-bit architecture. @@ -773,6 +780,15 @@ void Assembler::decq(const Operand& dst) { } +void Assembler::decl(Register dst) { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit_optional_rex_32(dst); + emit(0xFF); + emit_modrm(0x1, dst); +} + + void Assembler::decl(const Operand& dst) { EnsureSpace ensure_space(this); last_pc_ = pc_; @@ -798,7 +814,7 @@ void Assembler::hlt() { } -void Assembler::idiv(Register src) { +void Assembler::idivq(Register src) { EnsureSpace ensure_space(this); last_pc_ = pc_; emit_rex_64(src); @@ -807,6 +823,15 @@ void Assembler::idiv(Register src) { } +void Assembler::idivl(Register src) { + EnsureSpace ensure_space(this); + last_pc_ = pc_; + emit_optional_rex_32(src); + emit(0xF7); + emit_modrm(0x7, src); +} + + void Assembler::imul(Register src) { EnsureSpace ensure_space(this); last_pc_ = pc_; @@ -1115,6 +1140,9 @@ void Assembler::movq(const Operand& dst, Register src) { void Assembler::movq(Register dst, void* value, RelocInfo::Mode rmode) { + // This method must not be used with heap object references. The stored + // address is not GC safe. Use the handle version instead. + ASSERT(rmode > RelocInfo::LAST_GCED_ENUM); EnsureSpace ensure_space(this); last_pc_ = pc_; emit_rex_64(dst); @@ -1521,7 +1549,7 @@ void Assembler::store_rax(ExternalReference ref) { void Assembler::testb(Register reg, Immediate mask) { - ASSERT(is_int8(mask.value_)); + ASSERT(is_int8(mask.value_) || is_uint8(mask.value_)); EnsureSpace ensure_space(this); last_pc_ = pc_; if (reg.is(rax)) { @@ -1540,7 +1568,7 @@ void Assembler::testb(Register reg, Immediate mask) { void Assembler::testb(const Operand& op, Immediate mask) { - ASSERT(is_int8(mask.value_)); + ASSERT(is_int8(mask.value_) || is_uint8(mask.value_)); EnsureSpace ensure_space(this); last_pc_ = pc_; emit_optional_rex_32(rax, op); @@ -2183,48 +2211,3 @@ const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE; } } // namespace v8::internal - - -// TODO(x64): Implement and move these to their correct cc-files: -#include "ast.h" -#include "bootstrapper.h" -#include "codegen-inl.h" -#include "cpu.h" -#include "debug.h" -#include "disasm.h" -#include "disassembler.h" -#include "frames-inl.h" -#include "x64/macro-assembler-x64.h" -#include "x64/regexp-macro-assembler-x64.h" -#include "ic-inl.h" -#include "log.h" -#include "macro-assembler.h" -#include "parser.h" -#include "regexp-macro-assembler.h" -#include "regexp-stack.h" -#include "register-allocator-inl.h" -#include "register-allocator.h" -#include "runtime.h" -#include "scopes.h" -#include "serialize.h" -#include "stub-cache.h" -#include "unicode.h" - -namespace v8 { -namespace internal { - - -void BreakLocationIterator::ClearDebugBreakAtReturn() { - UNIMPLEMENTED(); -} - -bool BreakLocationIterator::IsDebugBreakAtReturn() { - UNIMPLEMENTED(); - return false; -} - -void BreakLocationIterator::SetDebugBreakAtReturn() { - UNIMPLEMENTED(); -} - -} } // namespace v8::internal diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h index e8953329bf..ad4721d6ad 100644 --- a/deps/v8/src/x64/assembler-x64.h +++ b/deps/v8/src/x64/assembler-x64.h @@ -292,6 +292,7 @@ enum ScaleFactor { times_4 = 2, times_8 = 3, times_int_size = times_4, + times_half_pointer_size = times_4, times_pointer_size = times_8 }; @@ -521,10 +522,6 @@ class Assembler : public Malloced { void xchg(Register dst, Register src); // Arithmetics - void addq(Register dst, Register src) { - arithmetic_op(0x03, dst, src); - } - void addl(Register dst, Register src) { arithmetic_op_32(0x03, dst, src); } @@ -533,14 +530,21 @@ class Assembler : public Malloced { immediate_arithmetic_op_32(0x0, dst, src); } + void addl(Register dst, const Operand& src) { + arithmetic_op_32(0x03, dst, src); + } + void addl(const Operand& dst, Immediate src) { immediate_arithmetic_op_32(0x0, dst, src); } - void addq(Register dst, const Operand& src) { + void addq(Register dst, Register src) { arithmetic_op(0x03, dst, src); } + void addq(Register dst, const Operand& src) { + arithmetic_op(0x03, dst, src); + } void addq(const Operand& dst, Register src) { arithmetic_op(0x01, src, dst); @@ -567,11 +571,11 @@ class Assembler : public Malloced { } void cmpl(Register dst, const Operand& src) { - arithmetic_op_32(0x3B, src, dst); + arithmetic_op_32(0x3B, dst, src); } void cmpl(const Operand& dst, Register src) { - arithmetic_op_32(0x39, dst, src); + arithmetic_op_32(0x39, src, dst); } void cmpl(Register dst, Immediate src) { @@ -624,13 +628,18 @@ class Assembler : public Malloced { void decq(Register dst); void decq(const Operand& dst); + void decl(Register dst); void decl(const Operand& dst); // Sign-extends rax into rdx:rax. void cqo(); + // Sign-extends eax into edx:eax. + void cdq(); // Divide rdx:rax by src. Quotient in rax, remainder in rdx. - void idiv(Register src); + void idivq(Register src); + // Divide edx:eax by lower 32 bits of src. Quotient in eax, rem. in edx. + void idivl(Register src); // Signed multiply instructions. void imul(Register src); // rdx:rax = rax * src. @@ -718,6 +727,10 @@ class Assembler : public Malloced { shift_32(dst, 0x4); } + void shll(Register dst, Immediate shift_amount) { + shift_32(dst, shift_amount, 0x4); + } + void shr(Register dst, Immediate shift_amount) { shift(dst, shift_amount, 0x5); } @@ -730,6 +743,10 @@ class Assembler : public Malloced { shift_32(dst, 0x5); } + void shrl(Register dst, Immediate shift_amount) { + shift_32(dst, shift_amount, 0x5); + } + void store_rax(void* dst, RelocInfo::Mode mode); void store_rax(ExternalReference ref); @@ -1114,8 +1131,8 @@ class Assembler : public Malloced { // ModR/M byte. void arithmetic_op(byte opcode, Register dst, Register src); void arithmetic_op_32(byte opcode, Register dst, Register src); - void arithmetic_op_32(byte opcode, const Operand& dst, Register src); - void arithmetic_op(byte opcode, Register reg, const Operand& op); + void arithmetic_op_32(byte opcode, Register reg, const Operand& rm_reg); + void arithmetic_op(byte opcode, Register reg, const Operand& rm_reg); void immediate_arithmetic_op(byte subcode, Register dst, Immediate src); void immediate_arithmetic_op(byte subcode, const Operand& dst, Immediate src); // Operate on a 32-bit word in memory or register. diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc index 459921cd40..08f8338c8d 100644 --- a/deps/v8/src/x64/builtins-x64.cc +++ b/deps/v8/src/x64/builtins-x64.cc @@ -394,9 +394,9 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { // If given receiver is already a JavaScript object then there's no // reason for converting it. __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, rcx); - __ j(less, &call_to_object); + __ j(below, &call_to_object); __ CmpInstanceType(rcx, LAST_JS_OBJECT_TYPE); - __ j(less_equal, &push_receiver); + __ j(below_equal, &push_receiver); // Convert the receiver to an object. __ bind(&call_to_object); @@ -562,7 +562,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // If the type of the result (stored in its map) is less than // FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense. __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx); - __ j(greater_equal, &exit); + __ j(above_equal, &exit); // Throw away the result of the constructor invocation and use the // on-stack receiver as the result. diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc index e3e32e694f..3112ecc126 100644 --- a/deps/v8/src/x64/codegen-x64.cc +++ b/deps/v8/src/x64/codegen-x64.cc @@ -389,6 +389,112 @@ bool CodeGenerator::HasValidEntryRegisters() { #endif +class DeferredReferenceGetKeyedValue: public DeferredCode { + public: + explicit DeferredReferenceGetKeyedValue(Register dst, + Register receiver, + Register key, + bool is_global) + : dst_(dst), receiver_(receiver), key_(key), is_global_(is_global) { + set_comment("[ DeferredReferenceGetKeyedValue"); + } + + virtual void Generate(); + + Label* patch_site() { return &patch_site_; } + + private: + Label patch_site_; + Register dst_; + Register receiver_; + Register key_; + bool is_global_; +}; + + +void DeferredReferenceGetKeyedValue::Generate() { + __ push(receiver_); // First IC argument. + __ push(key_); // Second IC argument. + + // Calculate the delta from the IC call instruction to the map check + // movq instruction in the inlined version. This delta is stored in + // a test(rax, delta) instruction after the call so that we can find + // it in the IC initialization code and patch the movq instruction. + // This means that we cannot allow test instructions after calls to + // KeyedLoadIC stubs in other places. + Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); + RelocInfo::Mode mode = is_global_ + ? RelocInfo::CODE_TARGET_CONTEXT + : RelocInfo::CODE_TARGET; + __ Call(ic, mode); + // The delta from the start of the map-compare instruction to the + // test instruction. We use masm_-> directly here instead of the __ + // macro because the macro sometimes uses macro expansion to turn + // into something that can't return a value. This is encountered + // when doing generated code coverage tests. + int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site()); + // Here we use masm_-> instead of the __ macro because this is the + // instruction that gets patched and coverage code gets in the way. + // TODO(X64): Consider whether it's worth switching the test to a + // 7-byte NOP with non-zero immediate (0f 1f 80 xxxxxxxx) which won't + // be generated normally. + masm_->testl(rax, Immediate(-delta_to_patch_site)); + __ IncrementCounter(&Counters::keyed_load_inline_miss, 1); + + if (!dst_.is(rax)) __ movq(dst_, rax); + __ pop(key_); + __ pop(receiver_); +} + + +class DeferredReferenceSetKeyedValue: public DeferredCode { + public: + DeferredReferenceSetKeyedValue(Register value, + Register key, + Register receiver) + : value_(value), key_(key), receiver_(receiver) { + set_comment("[ DeferredReferenceSetKeyedValue"); + } + + virtual void Generate(); + + Label* patch_site() { return &patch_site_; } + + private: + Register value_; + Register key_; + Register receiver_; + Label patch_site_; +}; + + +void DeferredReferenceSetKeyedValue::Generate() { + __ IncrementCounter(&Counters::keyed_store_inline_miss, 1); + // Push receiver and key arguments on the stack. + __ push(receiver_); + __ push(key_); + // Move value argument to eax as expected by the IC stub. + if (!value_.is(rax)) __ movq(rax, value_); + // Call the IC stub. + Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); + __ Call(ic, RelocInfo::CODE_TARGET); + // The delta from the start of the map-compare instructions (initial movq) + // to the test instruction. We use masm_-> directly here instead of the + // __ macro because the macro sometimes uses macro expansion to turn + // into something that can't return a value. This is encountered + // when doing generated code coverage tests. + int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site()); + // Here we use masm_-> instead of the __ macro because this is the + // instruction that gets patched and coverage code gets in the way. + masm_->testl(rax, Immediate(-delta_to_patch_site)); + // Restore value (returned from store IC), key and receiver + // registers. + if (!value_.is(rax)) __ movq(value_, rax); + __ pop(key_); + __ pop(receiver_); +} + + class DeferredStackCheck: public DeferredCode { public: DeferredStackCheck() { @@ -2193,9 +2299,8 @@ void CodeGenerator::VisitAssignment(Assignment* node) { // The receiver is the argument to the runtime call. It is the // first value pushed when the reference was loaded to the // frame. - // TODO(X64): Enable this and the switch back to fast, once they work. - // frame_->PushElementAt(target.size() - 1); - // Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1); + frame_->PushElementAt(target.size() - 1); + Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1); } if (node->op() == Token::ASSIGN || node->op() == Token::INIT_VAR || @@ -2203,20 +2308,18 @@ void CodeGenerator::VisitAssignment(Assignment* node) { Load(node->value()); } else { - // Literal* literal = node->value()->AsLiteral(); + Literal* literal = node->value()->AsLiteral(); bool overwrite_value = (node->value()->AsBinaryOperation() != NULL && node->value()->AsBinaryOperation()->ResultOverwriteAllowed()); - // Variable* right_var = node->value()->AsVariableProxy()->AsVariable(); + Variable* right_var = node->value()->AsVariableProxy()->AsVariable(); // There are two cases where the target is not read in the right hand // side, that are easy to test for: the right hand side is a literal, // or the right hand side is a different variable. TakeValue invalidates // the target, with an implicit promise that it will be written to again // before it is read. - // TODO(X64): Implement TakeValue optimization. Check issue 150016. - if (false) { - // if (literal != NULL || (right_var != NULL && right_var != var)) { - // target.TakeValue(NOT_INSIDE_TYPEOF); + if (literal != NULL || (right_var != NULL && right_var != var)) { + target.TakeValue(NOT_INSIDE_TYPEOF); } else { target.GetValue(NOT_INSIDE_TYPEOF); } @@ -2247,9 +2350,8 @@ void CodeGenerator::VisitAssignment(Assignment* node) { // argument to the runtime call is the receiver, which is the // first value pushed as part of the reference, which is below // the lhs value. - // TODO(X64): Enable this once ToFastProperties works. - // frame_->PushElementAt(target.size()); - // Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1); + frame_->PushElementAt(target.size()); + Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1); } } } @@ -3379,7 +3481,7 @@ void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) { // functions to make sure they have 'Function' as their class. __ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg()); - null.Branch(less); + null.Branch(below); // As long as JS_FUNCTION_TYPE is the last instance type and it is // right after LAST_JS_OBJECT_TYPE, we can avoid checking for @@ -3645,7 +3747,7 @@ void CodeGenerator::ToBoolean(ControlDestination* dest) { // Smi => false iff zero. ASSERT(kSmiTag == 0); - __ testq(value.reg(), value.reg()); + __ testl(value.reg(), value.reg()); dest->false_target()->Branch(zero); __ testl(value.reg(), Immediate(kSmiTagMask)); dest->true_target()->Branch(zero); @@ -3728,7 +3830,7 @@ void CodeGenerator::LoadReference(Reference* ref) { } else { // Anything else is a runtime error. Load(e); - // frame_->CallRuntime(Runtime::kThrowReferenceError, 1); + frame_->CallRuntime(Runtime::kThrowReferenceError, 1); } in_spilled_code_ = was_in_spilled_code; @@ -4130,7 +4232,7 @@ Result CodeGenerator::LoadFromGlobalSlotCheckExtensions( // A test rax instruction following the call signals that the inobject // property case was inlined. Ensure that there is not a test eax // instruction here. - __ nop(); + masm_->nop(); // Discard the global object. The result is in answer. frame_->Drop(); return answer; @@ -4700,7 +4802,7 @@ void DeferredReferenceGetNamedValue::Generate() { int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site()); // Here we use masm_-> instead of the __ macro because this is the // instruction that gets patched and coverage code gets in the way. - masm_->testq(rax, Immediate(-delta_to_patch_site)); + masm_->testl(rax, Immediate(-delta_to_patch_site)); __ IncrementCounter(&Counters::named_load_inline_miss, 1); if (!dst_.is(rax)) __ movq(dst_, rax); @@ -4851,10 +4953,8 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op, Label add_success; __ j(no_overflow, &add_success); __ subl(operand->reg(), Immediate(smi_value)); - __ movsxlq(operand->reg(), operand->reg()); deferred->Jump(); __ bind(&add_success); - __ movsxlq(operand->reg(), operand->reg()); deferred->BindExit(); frame_->Push(operand); break; @@ -4965,35 +5065,36 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op, } deferred->Branch(not_zero); - if (!left_is_in_rax) __ movq(rax, left->reg()); - // Sign extend rax into rdx:rax. - __ cqo(); + // All operations on the smi values are on 32-bit registers, which are + // zero-extended into 64-bits by all 32-bit operations. + if (!left_is_in_rax) __ movl(rax, left->reg()); + // Sign extend eax into edx:eax. + __ cdq(); // Check for 0 divisor. - __ testq(right->reg(), right->reg()); + __ testl(right->reg(), right->reg()); deferred->Branch(zero); // Divide rdx:rax by the right operand. - __ idiv(right->reg()); + __ idivl(right->reg()); // Complete the operation. if (op == Token::DIV) { - // Check for negative zero result. If result is zero, and divisor - // is negative, return a floating point negative zero. The - // virtual frame is unchanged in this block, so local control flow - // can use a Label rather than a JumpTarget. + // Check for negative zero result. If the result is zero, and the + // divisor is negative, return a floating point negative zero. Label non_zero_result; - __ testq(left->reg(), left->reg()); + __ testl(left->reg(), left->reg()); __ j(not_zero, &non_zero_result); - __ testq(right->reg(), right->reg()); + __ testl(right->reg(), right->reg()); deferred->Branch(negative); + // The frame is identical on all paths reaching this label. __ bind(&non_zero_result); // Check for the corner case of dividing the most negative smi by // -1. We cannot use the overflow flag, since it is not set by // idiv instruction. ASSERT(kSmiTag == 0 && kSmiTagSize == 1); - __ cmpq(rax, Immediate(0x40000000)); + __ cmpl(rax, Immediate(0x40000000)); deferred->Branch(equal); // Check that the remainder is zero. - __ testq(rdx, rdx); + __ testl(rdx, rdx); deferred->Branch(not_zero); // Tag the result and store it in the quotient register. ASSERT(kSmiTagSize == times_2); // adjust code if not the case @@ -5004,15 +5105,14 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op, frame_->Push("ient); } else { ASSERT(op == Token::MOD); - // Check for a negative zero result. If the result is zero, and - // the dividend is negative, return a floating point negative - // zero. The frame is unchanged in this block, so local control - // flow can use a Label rather than a JumpTarget. + // Check for a negative zero result. If the result is zero, and the + // dividend is negative, return a floating point negative zero. Label non_zero_result; - __ testq(rdx, rdx); + __ testl(rdx, rdx); __ j(not_zero, &non_zero_result); - __ testq(left->reg(), left->reg()); + __ testl(left->reg(), left->reg()); deferred->Branch(negative); + // The frame is identical on all paths reaching this label. __ bind(&non_zero_result); deferred->BindExit(); left->Unuse(); @@ -5056,9 +5156,9 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op, deferred->Branch(not_zero); // Untag both operands. - __ movq(answer.reg(), left->reg()); - __ sar(answer.reg(), Immediate(kSmiTagSize)); - __ sar(rcx, Immediate(kSmiTagSize)); + __ movl(answer.reg(), left->reg()); + __ sarl(answer.reg(), Immediate(kSmiTagSize)); + __ sarl(rcx, Immediate(kSmiTagSize)); // Perform the operation. switch (op) { case Token::SAR: @@ -5154,7 +5254,7 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op, ASSERT(kSmiTag == 0); // Adjust code below if not the case. // Remove smi tag from the left operand (but keep sign). // Left-hand operand has been copied into answer. - __ sar(answer.reg(), Immediate(kSmiTagSize)); + __ sarl(answer.reg(), Immediate(kSmiTagSize)); // Do multiplication of smis, leaving result in answer. __ imull(answer.reg(), right->reg()); // Go slow on overflows. @@ -5164,7 +5264,7 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op, // in this block, so local control flow can use a Label rather // than a JumpTarget. Label non_zero_result; - __ testq(answer.reg(), answer.reg()); + __ testl(answer.reg(), answer.reg()); __ j(not_zero, &non_zero_result); __ movq(answer.reg(), left->reg()); __ or_(answer.reg(), right->reg()); @@ -5183,6 +5283,7 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op, break; case Token::BIT_XOR: + ASSERT(kSmiTag == 0); // Adjust code below if not the case. __ xor_(answer.reg(), right->reg()); break; @@ -5288,7 +5389,8 @@ void Reference::GetValue(TypeofState typeof_state) { kScratchRegister); // This branch is always a forwards branch so it's always a fixed // size which allows the assert below to succeed and patching to work. - deferred->Branch(not_equal); + // Don't use deferred->Branch(...), since that might add coverage code. + masm->j(not_equal, deferred->entry_label()); // The delta from the patch label to the load offset must be // statically known. @@ -5315,25 +5417,118 @@ void Reference::GetValue(TypeofState typeof_state) { Variable* var = expression_->AsVariableProxy()->AsVariable(); bool is_global = var != NULL; ASSERT(!is_global || var->is_global()); + // Inline array load code if inside of a loop. We do not know // the receiver map yet, so we initially generate the code with // a check against an invalid map. In the inline cache code, we // patch the map check if appropriate. + if (cgen_->loop_nesting() > 0) { + Comment cmnt(masm, "[ Inlined load from keyed Property"); - // TODO(x64): Implement inlined loads for keyed properties. - // Comment cmnt(masm, "[ Load from keyed Property"); - - RelocInfo::Mode mode = is_global - ? RelocInfo::CODE_TARGET_CONTEXT - : RelocInfo::CODE_TARGET; - Result answer = cgen_->frame()->CallKeyedLoadIC(mode); - // Make sure that we do not have a test instruction after the - // call. A test instruction after the call is used to - // indicate that we have generated an inline version of the - // keyed load. The explicit nop instruction is here because - // the push that follows might be peep-hole optimized away. - __ nop(); - cgen_->frame()->Push(&answer); + Result key = cgen_->frame()->Pop(); + Result receiver = cgen_->frame()->Pop(); + key.ToRegister(); + receiver.ToRegister(); + + // Use a fresh temporary to load the elements without destroying + // the receiver which is needed for the deferred slow case. + Result elements = cgen_->allocator()->Allocate(); + ASSERT(elements.is_valid()); + + // Use a fresh temporary for the index and later the loaded + // value. + Result index = cgen_->allocator()->Allocate(); + ASSERT(index.is_valid()); + + DeferredReferenceGetKeyedValue* deferred = + new DeferredReferenceGetKeyedValue(index.reg(), + receiver.reg(), + key.reg(), + is_global); + + // Check that the receiver is not a smi (only needed if this + // is not a load from the global context) and that it has the + // expected map. + if (!is_global) { + __ testl(receiver.reg(), Immediate(kSmiTagMask)); + deferred->Branch(zero); + } + + // Initially, use an invalid map. The map is patched in the IC + // initialization code. + __ bind(deferred->patch_site()); + // Use masm-> here instead of the double underscore macro since extra + // coverage code can interfere with the patching. + masm->movq(kScratchRegister, Factory::null_value(), + RelocInfo::EMBEDDED_OBJECT); + masm->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset), + kScratchRegister); + deferred->Branch(not_equal); + + // Check that the key is a non-negative smi. + __ testl(key.reg(), + Immediate(static_cast<int32_t>(kSmiTagMask | 0x80000000u))); + deferred->Branch(not_zero); + + // Get the elements array from the receiver and check that it + // is not a dictionary. + __ movq(elements.reg(), + FieldOperand(receiver.reg(), JSObject::kElementsOffset)); + __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset), + Factory::fixed_array_map()); + deferred->Branch(not_equal); + + // Shift the key to get the actual index value and check that + // it is within bounds. + __ movl(index.reg(), key.reg()); + __ shrl(index.reg(), Immediate(kSmiTagSize)); + __ cmpl(index.reg(), + FieldOperand(elements.reg(), FixedArray::kLengthOffset)); + deferred->Branch(above_equal); + + // The index register holds the un-smi-tagged key. It has been + // zero-extended to 64-bits, so it can be used directly as index in the + // operand below. + // Load and check that the result is not the hole. We could + // reuse the index or elements register for the value. + // + // TODO(206): Consider whether it makes sense to try some + // heuristic about which register to reuse. For example, if + // one is rax, the we can reuse that one because the value + // coming from the deferred code will be in rax. + Result value = index; + __ movq(value.reg(), + Operand(elements.reg(), + index.reg(), + times_pointer_size, + FixedArray::kHeaderSize - kHeapObjectTag)); + elements.Unuse(); + index.Unuse(); + __ Cmp(value.reg(), Factory::the_hole_value()); + deferred->Branch(equal); + __ IncrementCounter(&Counters::keyed_load_inline, 1); + + deferred->BindExit(); + // Restore the receiver and key to the frame and push the + // result on top of it. + cgen_->frame()->Push(&receiver); + cgen_->frame()->Push(&key); + cgen_->frame()->Push(&value); + + } else { + Comment cmnt(masm, "[ Load from keyed Property"); + RelocInfo::Mode mode = is_global + ? RelocInfo::CODE_TARGET_CONTEXT + : RelocInfo::CODE_TARGET; + Result answer = cgen_->frame()->CallKeyedLoadIC(mode); + // Make sure that we do not have a test instruction after the + // call. A test instruction after the call is used to + // indicate that we have generated an inline version of the + // keyed load. The explicit nop instruction is here because + // the push that follows might be peep-hole optimized away. + __ nop(); + cgen_->frame()->Push(&answer); + } break; } @@ -5400,15 +5595,105 @@ void Reference::SetValue(InitState init_state) { case KEYED: { Comment cmnt(masm, "[ Store to keyed Property"); - // TODO(x64): Implement inlined version of keyed stores. + // Generate inlined version of the keyed store if the code is in + // a loop and the key is likely to be a smi. + Property* property = expression()->AsProperty(); + ASSERT(property != NULL); + SmiAnalysis* key_smi_analysis = property->key()->type(); - Result answer = cgen_->frame()->CallKeyedStoreIC(); - // Make sure that we do not have a test instruction after the - // call. A test instruction after the call is used to - // indicate that we have generated an inline version of the - // keyed store. - __ nop(); - cgen_->frame()->Push(&answer); + if (cgen_->loop_nesting() > 0 && key_smi_analysis->IsLikelySmi()) { + Comment cmnt(masm, "[ Inlined store to keyed Property"); + + // Get the receiver, key and value into registers. + Result value = cgen_->frame()->Pop(); + Result key = cgen_->frame()->Pop(); + Result receiver = cgen_->frame()->Pop(); + + Result tmp = cgen_->allocator_->Allocate(); + ASSERT(tmp.is_valid()); + + // Determine whether the value is a constant before putting it + // in a register. + bool value_is_constant = value.is_constant(); + + // Make sure that value, key and receiver are in registers. + value.ToRegister(); + key.ToRegister(); + receiver.ToRegister(); + + DeferredReferenceSetKeyedValue* deferred = + new DeferredReferenceSetKeyedValue(value.reg(), + key.reg(), + receiver.reg()); + + // Check that the value is a smi if it is not a constant. + // We can skip the write barrier for smis and constants. + if (!value_is_constant) { + __ testl(value.reg(), Immediate(kSmiTagMask)); + deferred->Branch(not_zero); + } + + // Check that the key is a non-negative smi. + __ testl(key.reg(), + Immediate(static_cast<uint32_t>(kSmiTagMask | 0x80000000U))); + deferred->Branch(not_zero); + + // Check that the receiver is not a smi. + __ testl(receiver.reg(), Immediate(kSmiTagMask)); + deferred->Branch(zero); + + // Check that the receiver is a JSArray. + __ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, kScratchRegister); + deferred->Branch(not_equal); + + // Check that the key is within bounds. Both the key and the + // length of the JSArray are smis, so compare only low 32 bits. + __ cmpl(key.reg(), + FieldOperand(receiver.reg(), JSArray::kLengthOffset)); + deferred->Branch(greater_equal); + + // Get the elements array from the receiver and check that it + // is a flat array (not a dictionary). + __ movq(tmp.reg(), + FieldOperand(receiver.reg(), JSObject::kElementsOffset)); + // Bind the deferred code patch site to be able to locate the + // fixed array map comparison. When debugging, we patch this + // comparison to always fail so that we will hit the IC call + // in the deferred code which will allow the debugger to + // break for fast case stores. + __ bind(deferred->patch_site()); + // Avoid using __ to ensure the distance from patch_site + // to the map address is always the same. + masm->movq(kScratchRegister, Factory::fixed_array_map(), + RelocInfo::EMBEDDED_OBJECT); + __ cmpq(FieldOperand(tmp.reg(), HeapObject::kMapOffset), + kScratchRegister); + deferred->Branch(not_equal); + + // Store the value. + ASSERT_EQ(1, kSmiTagSize); + ASSERT_EQ(0, kSmiTag); + __ movq(Operand(tmp.reg(), + key.reg(), + times_half_pointer_size, + FixedArray::kHeaderSize - kHeapObjectTag), + value.reg()); + __ IncrementCounter(&Counters::keyed_store_inline, 1); + + deferred->BindExit(); + + cgen_->frame()->Push(&receiver); + cgen_->frame()->Push(&key); + cgen_->frame()->Push(&value); + } else { + Result answer = cgen_->frame()->CallKeyedStoreIC(); + // Make sure that we do not have a test instruction after the + // call. A test instruction after the call is used to + // indicate that we have generated an inline version of the + // keyed store. + masm->nop(); + cgen_->frame()->Push(&answer); + } break; } @@ -6429,7 +6714,7 @@ void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm, __ jmp(&done); __ bind(&load_smi); - __ sar(src, Immediate(kSmiTagSize)); + __ sarl(src, Immediate(kSmiTagSize)); __ cvtlsi2sd(dst, src); __ bind(&done); @@ -6562,7 +6847,7 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { // Smi check both operands. __ movq(rcx, rbx); - __ or_(rcx, rax); + __ or_(rcx, rax); // The value in ecx is used for negative zero test later. __ testl(rcx, Immediate(kSmiTagMask)); __ j(not_zero, slow); @@ -6570,14 +6855,12 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { case Token::ADD: { __ addl(rax, rbx); __ j(overflow, slow); // The slow case rereads operands from the stack. - __ movsxlq(rax, rax); // Sign extend eax into rax. break; } case Token::SUB: { __ subl(rax, rbx); __ j(overflow, slow); // The slow case rereads operands from the stack. - __ movsxlq(rax, rax); // Sign extend eax into rax. break; } @@ -6585,27 +6868,25 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { // If the smi tag is 0 we can just leave the tag on one operand. ASSERT(kSmiTag == 0); // adjust code below if not the case // Remove tag from one of the operands (but keep sign). - __ sar(rax, Immediate(kSmiTagSize)); + __ sarl(rax, Immediate(kSmiTagSize)); // Do multiplication. __ imull(rax, rbx); // multiplication of smis; result in eax // Go slow on overflows. __ j(overflow, slow); // Check for negative zero result. - __ movsxlq(rax, rax); // Sign extend eax into rax. - __ NegativeZeroTest(rax, rcx, slow); // use rcx = x | y + __ NegativeZeroTest(rax, rcx, slow); // ecx (not rcx) holds x | y. break; case Token::DIV: - // Sign extend rax into rdx:rax - // (also sign extends eax into edx if eax is Smi). - __ cqo(); + // Sign extend eax into edx:eax. + __ cdq(); // Check for 0 divisor. - __ testq(rbx, rbx); + __ testl(rbx, rbx); __ j(zero, slow); - // Divide rdx:rax by rbx (where rdx:rax is equivalent to the smi in eax). - __ idiv(rbx); + // Divide edx:eax by ebx (where edx:eax is equivalent to the smi in eax). + __ idivl(rbx); // Check that the remainder is zero. - __ testq(rdx, rdx); + __ testl(rdx, rdx); __ j(not_zero, slow); // Check for the corner case of dividing the most negative smi // by -1. We cannot use the overflow flag, since it is not set @@ -6613,28 +6894,27 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { ASSERT(kSmiTag == 0 && kSmiTagSize == 1); // TODO(X64): TODO(Smi): Smi implementation dependent constant. // Value is Smi::fromInt(-(1<<31)) / Smi::fromInt(-1) - __ cmpq(rax, Immediate(0x40000000)); + __ cmpl(rax, Immediate(0x40000000)); __ j(equal, slow); // Check for negative zero result. - __ NegativeZeroTest(rax, rcx, slow); // use ecx = x | y + __ NegativeZeroTest(rax, rcx, slow); // ecx (not rcx) holds x | y. // Tag the result and store it in register rax. ASSERT(kSmiTagSize == times_2); // adjust code if not the case __ lea(rax, Operand(rax, rax, times_1, kSmiTag)); break; case Token::MOD: - // Sign extend rax into rdx:rax - // (also sign extends eax into edx if eax is Smi). - __ cqo(); + // Sign extend eax into edx:eax + __ cdq(); // Check for 0 divisor. - __ testq(rbx, rbx); + __ testl(rbx, rbx); __ j(zero, slow); - // Divide rdx:rax by rbx. - __ idiv(rbx); + // Divide edx:eax by ebx. + __ idivl(rbx); // Check for negative zero result. - __ NegativeZeroTest(rdx, rcx, slow); // use ecx = x | y + __ NegativeZeroTest(rdx, rcx, slow); // ecx (not rcx) holds x | y. // Move remainder to register rax. - __ movq(rax, rdx); + __ movl(rax, rdx); break; case Token::BIT_OR: @@ -6654,7 +6934,7 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { case Token::SHR: case Token::SAR: // Move the second operand into register ecx. - __ movq(rcx, rbx); + __ movl(rcx, rbx); // Remove tags from operands (but keep sign). __ sarl(rax, Immediate(kSmiTagSize)); __ sarl(rcx, Immediate(kSmiTagSize)); diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h index bb4b53880f..9e69007f31 100644 --- a/deps/v8/src/x64/codegen-x64.h +++ b/deps/v8/src/x64/codegen-x64.h @@ -361,7 +361,7 @@ class CodeGenerator: public AstVisitor { #define DEF_VISIT(type) \ void Visit##type(type* node); - NODE_LIST(DEF_VISIT) + AST_NODE_LIST(DEF_VISIT) #undef DEF_VISIT // Visit a statement and then spill the virtual frame if control flow can @@ -548,7 +548,7 @@ class CodeGenerator: public AstVisitor { // information. void CodeForFunctionPosition(FunctionLiteral* fun); void CodeForReturnPosition(FunctionLiteral* fun); - void CodeForStatementPosition(Node* node); + void CodeForStatementPosition(AstNode* node); void CodeForSourcePosition(int pos); #ifdef DEBUG diff --git a/deps/v8/src/x64/debug-x64.cc b/deps/v8/src/x64/debug-x64.cc index e94e781d48..177eb90a49 100644 --- a/deps/v8/src/x64/debug-x64.cc +++ b/deps/v8/src/x64/debug-x64.cc @@ -80,6 +80,21 @@ void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) { masm->int3(); // UNIMPLEMENTED } +void BreakLocationIterator::ClearDebugBreakAtReturn() { + // TODO(X64): Implement this when we start setting Debug breaks. + UNIMPLEMENTED(); +} + +bool BreakLocationIterator::IsDebugBreakAtReturn() { + // TODO(X64): Implement this when we start setting Debug breaks. + UNIMPLEMENTED(); + return false; +} + +void BreakLocationIterator::SetDebugBreakAtReturn() { + UNIMPLEMENTED(); +} + #endif // ENABLE_DEBUGGER_SUPPORT } } // namespace v8::internal diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc index f962c0193f..83e31492d9 100644 --- a/deps/v8/src/x64/disasm-x64.cc +++ b/deps/v8/src/x64/disasm-x64.cc @@ -34,8 +34,15 @@ namespace disasm { -enum OperandOrder { - UNSET_OP_ORDER = 0, REG_OPER_OP_ORDER, OPER_REG_OP_ORDER +enum OperandType { + UNSET_OP_ORDER = 0, + // Operand size decides between 16, 32 and 64 bit operands. + REG_OPER_OP_ORDER = 1, // Register destination, operand source. + OPER_REG_OP_ORDER = 2, // Operand destination, register source. + // Fixed 8-bit operands. + BYTE_SIZE_OPERAND_FLAG = 4, + BYTE_REG_OPER_OP_ORDER = REG_OPER_OP_ORDER | BYTE_SIZE_OPERAND_FLAG, + BYTE_OPER_REG_OP_ORDER = OPER_REG_OP_ORDER | BYTE_SIZE_OPERAND_FLAG }; //------------------------------------------------------------------ @@ -43,28 +50,53 @@ enum OperandOrder { //------------------------------------------------------------------ struct ByteMnemonic { int b; // -1 terminates, otherwise must be in range (0..255) - OperandOrder op_order_; + OperandType op_order_; const char* mnem; }; static ByteMnemonic two_operands_instr[] = { - { 0x03, REG_OPER_OP_ORDER, "add" }, - { 0x21, OPER_REG_OP_ORDER, "and" }, - { 0x23, REG_OPER_OP_ORDER, "and" }, - { 0x3B, REG_OPER_OP_ORDER, "cmp" }, - { 0x8D, REG_OPER_OP_ORDER, "lea" }, - { 0x09, OPER_REG_OP_ORDER, "or" }, - { 0x0B, REG_OPER_OP_ORDER, "or" }, - { 0x1B, REG_OPER_OP_ORDER, "sbb" }, - { 0x29, OPER_REG_OP_ORDER, "sub" }, - { 0x2B, REG_OPER_OP_ORDER, "sub" }, - { 0x85, REG_OPER_OP_ORDER, "test" }, - { 0x31, OPER_REG_OP_ORDER, "xor" }, - { 0x33, REG_OPER_OP_ORDER, "xor" }, - { 0x87, REG_OPER_OP_ORDER, "xchg" }, - { 0x8A, REG_OPER_OP_ORDER, "movb" }, - { 0x8B, REG_OPER_OP_ORDER, "mov" }, + { 0x00, BYTE_OPER_REG_OP_ORDER, "add" }, + { 0x01, OPER_REG_OP_ORDER, "add" }, + { 0x02, BYTE_REG_OPER_OP_ORDER, "add" }, + { 0x03, REG_OPER_OP_ORDER, "add" }, + { 0x08, BYTE_OPER_REG_OP_ORDER, "or" }, + { 0x09, OPER_REG_OP_ORDER, "or" }, + { 0x0A, BYTE_REG_OPER_OP_ORDER, "or" }, + { 0x0B, REG_OPER_OP_ORDER, "or" }, + { 0x10, BYTE_OPER_REG_OP_ORDER, "adc" }, + { 0x11, OPER_REG_OP_ORDER, "adc" }, + { 0x12, BYTE_REG_OPER_OP_ORDER, "adc" }, + { 0x13, REG_OPER_OP_ORDER, "adc" }, + { 0x18, BYTE_OPER_REG_OP_ORDER, "sbb" }, + { 0x19, OPER_REG_OP_ORDER, "sbb" }, + { 0x1A, BYTE_REG_OPER_OP_ORDER, "sbb" }, + { 0x1B, REG_OPER_OP_ORDER, "sbb" }, + { 0x20, BYTE_OPER_REG_OP_ORDER, "and" }, + { 0x21, OPER_REG_OP_ORDER, "and" }, + { 0x22, BYTE_REG_OPER_OP_ORDER, "and" }, + { 0x23, REG_OPER_OP_ORDER, "and" }, + { 0x28, BYTE_OPER_REG_OP_ORDER, "sub" }, + { 0x29, OPER_REG_OP_ORDER, "sub" }, + { 0x2A, BYTE_REG_OPER_OP_ORDER, "sub" }, + { 0x2B, REG_OPER_OP_ORDER, "sub" }, + { 0x30, BYTE_OPER_REG_OP_ORDER, "xor" }, + { 0x31, OPER_REG_OP_ORDER, "xor" }, + { 0x32, BYTE_REG_OPER_OP_ORDER, "xor" }, + { 0x33, REG_OPER_OP_ORDER, "xor" }, + { 0x38, BYTE_OPER_REG_OP_ORDER, "cmp" }, + { 0x39, OPER_REG_OP_ORDER, "cmp" }, + { 0x3A, BYTE_REG_OPER_OP_ORDER, "cmp" }, + { 0x3B, REG_OPER_OP_ORDER, "cmp" }, + { 0x8D, REG_OPER_OP_ORDER, "lea" }, + { 0x84, BYTE_REG_OPER_OP_ORDER, "test" }, + { 0x85, REG_OPER_OP_ORDER, "test" }, + { 0x86, BYTE_REG_OPER_OP_ORDER, "xchg" }, + { 0x87, REG_OPER_OP_ORDER, "xchg" }, + { 0x88, BYTE_OPER_REG_OP_ORDER, "mov" }, + { 0x89, OPER_REG_OP_ORDER, "mov" }, + { 0x8A, BYTE_REG_OPER_OP_ORDER, "mov" }, + { 0x8B, REG_OPER_OP_ORDER, "mov" }, { -1, UNSET_OP_ORDER, "" } }; @@ -97,6 +129,7 @@ static ByteMnemonic short_immediate_instr[] = { { 0x05, UNSET_OP_ORDER, "add" }, { 0x0D, UNSET_OP_ORDER, "or" }, { 0x15, UNSET_OP_ORDER, "adc" }, + { 0x1D, UNSET_OP_ORDER, "sbb" }, { 0x25, UNSET_OP_ORDER, "and" }, { 0x2D, UNSET_OP_ORDER, "sub" }, { 0x35, UNSET_OP_ORDER, "xor" }, @@ -127,7 +160,8 @@ enum InstructionType { struct InstructionDesc { const char* mnem; InstructionType type; - OperandOrder op_order_; + OperandType op_order_; + bool byte_size_operation; // Fixed 8-bit operation. }; @@ -143,7 +177,7 @@ class InstructionTable { void Clear(); void Init(); void CopyTable(ByteMnemonic bm[], InstructionType type); - void SetTableRange(InstructionType type, byte start, byte end, + void SetTableRange(InstructionType type, byte start, byte end, bool byte_size, const char* mnem); void AddJumpConditionalShort(); }; @@ -157,9 +191,10 @@ InstructionTable::InstructionTable() { void InstructionTable::Clear() { for (int i = 0; i < 256; i++) { - instructions_[i].mnem = ""; + instructions_[i].mnem = "(bad)"; instructions_[i].type = NO_INSTR; instructions_[i].op_order_ = UNSET_OP_ORDER; + instructions_[i].byte_size_operation = false; } } @@ -170,9 +205,9 @@ void InstructionTable::Init() { CopyTable(call_jump_instr, CALL_JUMP_INSTR); CopyTable(short_immediate_instr, SHORT_IMMEDIATE_INSTR); AddJumpConditionalShort(); - SetTableRange(PUSHPOP_INSTR, 0x50, 0x57, "push"); - SetTableRange(PUSHPOP_INSTR, 0x58, 0x5F, "pop"); - SetTableRange(MOVE_REG_INSTR, 0xB8, 0xBF, "mov"); + SetTableRange(PUSHPOP_INSTR, 0x50, 0x57, false, "push"); + SetTableRange(PUSHPOP_INSTR, 0x58, 0x5F, false, "pop"); + SetTableRange(MOVE_REG_INSTR, 0xB8, 0xBF, false, "mov"); } @@ -180,20 +215,27 @@ void InstructionTable::CopyTable(ByteMnemonic bm[], InstructionType type) { for (int i = 0; bm[i].b >= 0; i++) { InstructionDesc* id = &instructions_[bm[i].b]; id->mnem = bm[i].mnem; - id->op_order_ = bm[i].op_order_; - assert(id->type == NO_INSTR); // Information already entered + OperandType op_order = bm[i].op_order_; + id->op_order_ = + static_cast<OperandType>(op_order & ~BYTE_SIZE_OPERAND_FLAG); + assert(id->type == NO_INSTR); // Information not already entered id->type = type; + id->byte_size_operation = ((op_order & BYTE_SIZE_OPERAND_FLAG) != 0); } } -void InstructionTable::SetTableRange(InstructionType type, byte start, - byte end, const char* mnem) { +void InstructionTable::SetTableRange(InstructionType type, + byte start, + byte end, + bool byte_size, + const char* mnem) { for (byte b = start; b <= end; b++) { InstructionDesc* id = &instructions_[b]; assert(id->type == NO_INSTR); // Information already entered id->mnem = mnem; id->type = type; + id->byte_size_operation = byte_size; } } @@ -211,13 +253,16 @@ void InstructionTable::AddJumpConditionalShort() { static InstructionTable instruction_table; -// The X64 disassembler implementation. +//------------------------------------------------------------------------------ +// DisassemblerX64 implementation. + enum UnimplementedOpcodeAction { CONTINUE_ON_UNIMPLEMENTED_OPCODE, ABORT_ON_UNIMPLEMENTED_OPCODE }; - +// A new DisassemblerX64 object is created to disassemble each instruction. +// The object can only disassemble a single instruction. class DisassemblerX64 { public: DisassemblerX64(const NameConverter& converter, @@ -228,7 +273,9 @@ class DisassemblerX64 { abort_on_unimplemented_( unimplemented_action == ABORT_ON_UNIMPLEMENTED_OPCODE), rex_(0), - operand_size_(0) { + operand_size_(0), + group_1_prefix_(0), + byte_size_operand_(false) { tmp_buffer_[0] = '\0'; } @@ -240,6 +287,12 @@ class DisassemblerX64 { int InstructionDecode(v8::internal::Vector<char> buffer, byte* instruction); private: + enum OperandSize { + BYTE_SIZE = 0, + WORD_SIZE = 1, + DOUBLEWORD_SIZE = 2, + QUADWORD_SIZE = 3 + }; const NameConverter& converter_; v8::internal::EmbeddedVector<char, 128> tmp_buffer_; @@ -247,12 +300,10 @@ class DisassemblerX64 { bool abort_on_unimplemented_; // Prefixes parsed byte rex_; - byte operand_size_; - - void setOperandSizePrefix(byte prefix) { - ASSERT_EQ(0x66, prefix); - operand_size_ = prefix; - } + byte operand_size_; // 0x66 or (if no group 3 prefix is present) 0x0. + byte group_1_prefix_; // 0xF2, 0xF3, or (if no group 1 prefix is present) 0. + // Byte size operand override. + bool byte_size_operand_; void setRex(byte rex) { ASSERT_EQ(0x40, rex & 0xF0); @@ -272,12 +323,15 @@ class DisassemblerX64 { bool rex_w() { return (rex_ & 0x08) != 0; } - int operand_size() { - return rex_w() ? 64 : (operand_size_ != 0) ? 16 : 32; + OperandSize operand_size() { + if (byte_size_operand_) return BYTE_SIZE; + if (rex_w()) return QUADWORD_SIZE; + if (operand_size_ != 0) return WORD_SIZE; + return DOUBLEWORD_SIZE; } char operand_size_code() { - return rex_w() ? 'q' : (operand_size_ != 0) ? 'w' : 'l'; + return "bwlq"[operand_size()]; } const char* NameOfCPURegister(int reg) const { @@ -312,7 +366,7 @@ class DisassemblerX64 { int* base) { *scale = (data >> 6) & 3; *index = ((data >> 3) & 7) | (rex_x() ? 8 : 0); - *base = data & 7 | (rex_b() ? 8 : 0); + *base = (data & 7) | (rex_b() ? 8 : 0); } typedef const char* (DisassemblerX64::*RegisterNameMapping)(int reg) const; @@ -322,11 +376,14 @@ class DisassemblerX64 { int PrintRightOperand(byte* modrmp); int PrintRightByteOperand(byte* modrmp); int PrintOperands(const char* mnem, - OperandOrder op_order, + OperandType op_order, byte* data); + int PrintImmediate(byte* data, OperandSize size); int PrintImmediateOp(byte* data); + const char* TwoByteMnemonic(byte opcode); + int TwoByteOpcodeInstruction(byte* data); int F7Instruction(byte* data); - int D1D3C1Instruction(byte* data); + int ShiftInstruction(byte* data); int JumpShort(byte* data); int JumpConditional(byte* data); int JumpConditionalShort(byte* data); @@ -336,7 +393,7 @@ class DisassemblerX64 { void UnimplementedInstruction() { if (abort_on_unimplemented_) { - UNIMPLEMENTED(); + CHECK(false); } else { AppendToBuffer("'Unimplemented Instruction'"); } @@ -451,6 +508,36 @@ int DisassemblerX64::PrintRightOperandHelper( } +int DisassemblerX64::PrintImmediate(byte* data, OperandSize size) { + int64_t value; + int count; + switch (size) { + case BYTE_SIZE: + value = *data; + count = 1; + break; + case WORD_SIZE: + value = *reinterpret_cast<int16_t*>(data); + count = 2; + break; + case DOUBLEWORD_SIZE: + value = *reinterpret_cast<uint32_t*>(data); + count = 4; + break; + case QUADWORD_SIZE: + value = *reinterpret_cast<int32_t*>(data); + count = 4; + break; + default: + UNREACHABLE(); + value = 0; // Initialize variables on all paths to satisfy the compiler. + count = 0; + } + AppendToBuffer(V8_PTR_PREFIX"x", value); + return count; +} + + int DisassemblerX64::PrintRightOperand(byte* modrmp) { return PrintRightOperandHelper(modrmp, &DisassemblerX64::NameOfCPURegister); @@ -466,25 +553,30 @@ int DisassemblerX64::PrintRightByteOperand(byte* modrmp) { // Returns number of bytes used including the current *data. // Writes instruction's mnemonic, left and right operands to 'tmp_buffer_'. int DisassemblerX64::PrintOperands(const char* mnem, - OperandOrder op_order, + OperandType op_order, byte* data) { byte modrm = *data; int mod, regop, rm; get_modrm(modrm, &mod, ®op, &rm); int advance = 0; + const char* register_name = + byte_size_operand_ ? NameOfByteCPURegister(regop) + : NameOfCPURegister(regop); switch (op_order) { case REG_OPER_OP_ORDER: { AppendToBuffer("%s%c %s,", mnem, operand_size_code(), - NameOfCPURegister(regop)); - advance = PrintRightOperand(data); + register_name); + advance = byte_size_operand_ ? PrintRightByteOperand(data) + : PrintRightOperand(data); break; } case OPER_REG_OP_ORDER: { AppendToBuffer("%s%c ", mnem, operand_size_code()); - advance = PrintRightOperand(data); - AppendToBuffer(",%s", NameOfCPURegister(regop)); + advance = byte_size_operand_ ? PrintRightByteOperand(data) + : PrintRightOperand(data); + AppendToBuffer(",%s", register_name); break; } default: @@ -498,7 +590,7 @@ int DisassemblerX64::PrintOperands(const char* mnem, // Returns number of bytes used by machine instruction, including *data byte. // Writes immediate instructions to 'tmp_buffer_'. int DisassemblerX64::PrintImmediateOp(byte* data) { - bool sign_extension_bit = (*data & 0x02) != 0; + bool byte_size_immediate = (*data & 0x02) != 0; byte modrm = *(data + 1); int mod, regop, rm; get_modrm(modrm, &mod, ®op, &rm); @@ -528,15 +620,12 @@ int DisassemblerX64::PrintImmediateOp(byte* data) { default: UnimplementedInstruction(); } - AppendToBuffer("%s ", mnem); + AppendToBuffer("%s%c ", mnem, operand_size_code()); int count = PrintRightOperand(data + 1); - if (sign_extension_bit) { - AppendToBuffer(",0x%x", *(data + 1 + count)); - return 1 + count + 1 /*int8*/; - } else { - AppendToBuffer(",0x%x", *reinterpret_cast<int32_t*>(data + 1 + count)); - return 1 + count + 4 /*int32_t*/; - } + AppendToBuffer(",0x"); + OperandSize immediate_size = byte_size_immediate ? BYTE_SIZE : operand_size(); + count += PrintImmediate(data + 1 + count, immediate_size); + return 1 + count; } @@ -589,78 +678,65 @@ int DisassemblerX64::F7Instruction(byte* data) { } -int DisassemblerX64::D1D3C1Instruction(byte* data) { - byte op = *data; - assert(op == 0xD1 || op == 0xD3 || op == 0xC1); +int DisassemblerX64::ShiftInstruction(byte* data) { + byte op = *data & (~1); + if (op != 0xD0 && op != 0xD2 && op != 0xC0) { + UnimplementedInstruction(); + return 1; + } byte modrm = *(data + 1); int mod, regop, rm; get_modrm(modrm, &mod, ®op, &rm); - ASSERT(regop < 8); + regop &= 0x7; // The REX.R bit does not affect the operation. int imm8 = -1; int num_bytes = 2; - if (mod == 3) { - const char* mnem = NULL; - if (op == 0xD1) { - imm8 = 1; - switch (regop) { - case 2: - mnem = "rcl"; - break; - case 7: - mnem = "sar"; - break; - case 4: - mnem = "shl"; - break; - default: - UnimplementedInstruction(); - } - } else if (op == 0xC1) { - imm8 = *(data + 2); - num_bytes = 3; - switch (regop) { - case 2: - mnem = "rcl"; - break; - case 4: - mnem = "shl"; - break; - case 5: - mnem = "shr"; - break; - case 7: - mnem = "sar"; - break; - default: - UnimplementedInstruction(); - } - } else if (op == 0xD3) { - switch (regop) { - case 4: - mnem = "shl"; - break; - case 5: - mnem = "shr"; - break; - case 7: - mnem = "sar"; - break; - default: - UnimplementedInstruction(); - } - } - assert(mnem != NULL); - AppendToBuffer("%s%c %s,", - mnem, - operand_size_code(), - NameOfCPURegister(rm)); - if (imm8 > 0) { - AppendToBuffer("%d", imm8); - } else { - AppendToBuffer("cl"); - } - } else { + if (mod != 3) { UnimplementedInstruction(); + return num_bytes; + } + const char* mnem = NULL; + switch (regop) { + case 0: + mnem = "rol"; + break; + case 1: + mnem = "ror"; + break; + case 2: + mnem = "rcl"; + break; + case 3: + mnem = "rcr"; + break; + case 4: + mnem = "shl"; + break; + case 5: + mnem = "shr"; + break; + case 7: + mnem = "sar"; + break; + default: + UnimplementedInstruction(); + return num_bytes; + } + assert(mnem != NULL); + if (op == 0xD0) { + imm8 = 1; + } else if (op == 0xC0) { + imm8 = *(data + 2); + num_bytes = 3; + } + AppendToBuffer("%s%c %s,", + mnem, + operand_size_code(), + byte_size_operand_ ? NameOfByteCPURegister(rm) + : NameOfCPURegister(rm)); + if (op == 0xD2) { + AppendToBuffer("cl"); + } else { + AppendToBuffer("%d", imm8); } return num_bytes; } @@ -716,20 +792,14 @@ int DisassemblerX64::FPUInstruction(byte* data) { if (b1 == 0xD9) { const char* mnem = NULL; switch (b2) { - case 0xE8: - mnem = "fld1"; - break; - case 0xEE: - mnem = "fldz"; + case 0xE0: + mnem = "fchs"; break; case 0xE1: mnem = "fabs"; break; - case 0xE0: - mnem = "fchs"; - break; - case 0xF8: - mnem = "fprem"; + case 0xE4: + mnem = "ftst"; break; case 0xF5: mnem = "fprem1"; @@ -737,8 +807,14 @@ int DisassemblerX64::FPUInstruction(byte* data) { case 0xF7: mnem = "fincstp"; break; - case 0xE4: - mnem = "ftst"; + case 0xE8: + mnem = "fld1"; + break; + case 0xEE: + mnem = "fldz"; + break; + case 0xF8: + mnem = "fprem"; break; } if (mnem != NULL) { @@ -862,38 +938,146 @@ int DisassemblerX64::FPUInstruction(byte* data) { return 2; } -// Mnemonics for instructions 0xF0 byte. + +// Handle all two-byte opcodes, which start with 0x0F. +// These instructions may be affected by an 0x66, 0xF2, or 0xF3 prefix. +// We do not use any three-byte opcodes, which start with 0x0F38 or 0x0F3A. +int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) { + byte opcode = *(data + 1); + byte* current = data + 2; + // At return, "current" points to the start of the next instruction. + const char* mnemonic = TwoByteMnemonic(opcode); + if (opcode == 0x1F) { + // NOP + int mod, regop, rm; + get_modrm(*current, &mod, ®op, &rm); + current++; + if (regop == 4) { // SIB byte present. + current++; + } + if (mod == 1) { // Byte displacement. + current += 1; + } else if (mod == 2) { // 32-bit displacement. + current += 4; + } // else no immediate displacement. + AppendToBuffer("nop"); + + } else if (opcode == 0xA2 || opcode == 0x31) { + // RDTSC or CPUID + AppendToBuffer("%s", mnemonic); + + } else if ((opcode & 0xF0) == 0x80) { + // Jcc: Conditional jump (branch). + current = data + JumpConditional(data); + + } else if (opcode == 0xBE || opcode == 0xBF || opcode == 0xB6 || + opcode == 0xB7 || opcode == 0xAF) { + // Size-extending moves, IMUL. + current += PrintOperands(mnemonic, REG_OPER_OP_ORDER, current); + + } else if ((opcode & 0xF0) == 0x90) { + // SETcc: Set byte on condition. Needs pointer to beginning of instruction. + current = data + SetCC(data); + + } else if (opcode == 0xAB || opcode == 0xA5 || opcode == 0xAD) { + // SHLD, SHRD (double-precision shift), BTS (bit set). + AppendToBuffer("%s ", mnemonic); + int mod, regop, rm; + get_modrm(*current, &mod, ®op, &rm); + current += PrintRightOperand(current); + if (opcode == 0xAB) { + AppendToBuffer(",%s", NameOfCPURegister(regop)); + } else { + AppendToBuffer(",%s,cl", NameOfCPURegister(regop)); + } + } else if (group_1_prefix_ == 0xF2) { + // Beginning of instructions with prefix 0xF2. + + if (opcode == 0x11 || opcode == 0x10) { + // MOVSD: Move scalar double-precision fp to/from/between XMM registers. + AppendToBuffer("movsd "); + int mod, regop, rm; + get_modrm(*current, &mod, ®op, &rm); + if (opcode == 0x11) { + current += PrintRightOperand(current); + AppendToBuffer(",%s", NameOfXMMRegister(regop)); + } else { + AppendToBuffer("%s,", NameOfXMMRegister(regop)); + current += PrintRightOperand(current); + } + } else if (opcode == 0x2A) { + // CVTSI2SD: integer to XMM double conversion. + int mod, regop, rm; + get_modrm(*current, &mod, ®op, &rm); + AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop)); + data += PrintRightOperand(data); + } else if ((opcode & 0xF8) == 0x58) { + // XMM arithmetic. Mnemonic was retrieved at the start of this function. + int mod, regop, rm; + get_modrm(*current, &mod, ®op, &rm); + AppendToBuffer("%s %s,%s", mnemonic, NameOfXMMRegister(regop), + NameOfXMMRegister(rm)); + } else { + UnimplementedInstruction(); + } + } else if (opcode == 0x2C && group_1_prefix_ == 0xF3) { + // Instruction with prefix 0xF3. + + // CVTTSS2SI: Convert scalar single-precision FP to dword integer. + // Assert that mod is not 3, so source is memory, not an XMM register. + ASSERT((*current & 0xC0) != 0xC0); + current += PrintOperands("cvttss2si", REG_OPER_OP_ORDER, current); + } else { + UnimplementedInstruction(); + } + return current - data; +} + + +// Mnemonics for two-byte opcode instructions starting with 0x0F. +// The argument is the second byte of the two-byte opcode. // Returns NULL if the instruction is not handled here. -static const char* F0Mnem(byte f0byte) { - switch (f0byte) { +const char* DisassemblerX64::TwoByteMnemonic(byte opcode) { + switch (opcode) { case 0x1F: return "nop"; + case 0x2A: // F2 prefix. + return "cvtsi2sd"; case 0x31: return "rdtsc"; + case 0x58: // F2 prefix. + return "addsd"; + case 0x59: // F2 prefix. + return "mulsd"; + case 0x5C: // F2 prefix. + return "subsd"; + case 0x5E: // F2 prefix. + return "divsd"; case 0xA2: return "cpuid"; - case 0xBE: - return "movsxb"; - case 0xBF: - return "movsxw"; - case 0xB6: - return "movzxb"; - case 0xB7: - return "movzxw"; - case 0xAF: - return "imul"; case 0xA5: return "shld"; - case 0xAD: - return "shrd"; case 0xAB: return "bts"; + case 0xAD: + return "shrd"; + case 0xAF: + return "imul"; + case 0xB6: + return "movzxb"; + case 0xB7: + return "movzxw"; + case 0xBE: + return "movsxb"; + case 0xBF: + return "movsxw"; default: return NULL; } } -// Disassembled instruction '*instr' and writes it into 'out_buffer'. + +// Disassembles the instruction at instr, and writes it into out_buffer. int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer, byte* instr) { tmp_buffer_pos_ = 0; // starting to write as position 0 @@ -905,19 +1089,21 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer, // Scan for prefixes. while (true) { current = *data; - if (current == 0x66) { - setOperandSizePrefix(current); - data++; - } else if ((current & 0xF0) == 0x40) { + if (current == 0x66) { // Group 3 prefix. + operand_size_ = current; + } else if ((current & 0xF0) == 0x40) { // REX prefix. setRex(current); if (rex_w()) AppendToBuffer("REX.W "); - data++; - } else { + } else if ((current & 0xFE) == 0xF2) { // Group 1 prefix. + group_1_prefix_ = current; + } else { // Not a prefix - an opcode. break; } + data++; } const InstructionDesc& idesc = instruction_table.Get(current); + byte_size_operand_ = idesc.byte_size_operation; switch (idesc.type) { case ZERO_OPERANDS_INSTR: AppendToBuffer(idesc.mnem); @@ -949,15 +1135,15 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer, case MOVE_REG_INSTR: { byte* addr = NULL; switch (operand_size()) { - case 16: + case WORD_SIZE: addr = reinterpret_cast<byte*>(*reinterpret_cast<int16_t*>(data + 1)); data += 3; break; - case 32: + case DOUBLEWORD_SIZE: addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data + 1)); data += 5; break; - case 64: + case QUADWORD_SIZE: addr = reinterpret_cast<byte*>(*reinterpret_cast<int64_t*>(data + 1)); data += 9; break; @@ -1012,8 +1198,8 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer, AppendToBuffer("imul %s,%s,0x%x", NameOfCPURegister(regop), NameOfCPURegister(rm), imm); data += 2 + (*data == 0x6B ? 1 : 4); - } break; + } case 0xF6: { int mod, regop, rm; @@ -1024,63 +1210,16 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer, UnimplementedInstruction(); } data += 3; - } break; + } case 0x81: // fall through case 0x83: // 0x81 with sign extension bit set data += PrintImmediateOp(data); break; - case 0x0F: { - byte f0byte = *(data + 1); - const char* f0mnem = F0Mnem(f0byte); - if (f0byte == 0x1F) { - data += 1; - byte modrm = *data; - data += 1; - if (((modrm >> 3) & 7) == 4) { - // SIB byte present. - data += 1; - } - int mod = modrm >> 6; - if (mod == 1) { - // Byte displacement. - data += 1; - } else if (mod == 2) { - // 32-bit displacement. - data += 4; - } - AppendToBuffer("nop"); - } else if (f0byte == 0xA2 || f0byte == 0x31) { - AppendToBuffer("%s", f0mnem); - data += 2; - } else if ((f0byte & 0xF0) == 0x80) { - data += JumpConditional(data); - } else if (f0byte == 0xBE || f0byte == 0xBF || f0byte == 0xB6 || f0byte - == 0xB7 || f0byte == 0xAF) { - data += 2; - data += PrintOperands(f0mnem, REG_OPER_OP_ORDER, data); - } else if ((f0byte & 0xF0) == 0x90) { - data += SetCC(data); - } else { - data += 2; - if (f0byte == 0xAB || f0byte == 0xA5 || f0byte == 0xAD) { - // shrd, shld, bts - AppendToBuffer("%s ", f0mnem); - int mod, regop, rm; - get_modrm(*data, &mod, ®op, &rm); - data += PrintRightOperand(data); - if (f0byte == 0xAB) { - AppendToBuffer(",%s", NameOfCPURegister(regop)); - } else { - AppendToBuffer(",%s,cl", NameOfCPURegister(regop)); - } - } else { - UnimplementedInstruction(); - } - } - } + case 0x0F: + data += TwoByteOpcodeInstruction(data); break; case 0x8F: { @@ -1170,13 +1309,13 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer, case 0x95: case 0x96: case 0x97: { - int reg = current & 0x7 | (rex_b() ? 8 : 0); + int reg = (current & 0x7) | (rex_b() ? 8 : 0); if (reg == 0) { AppendToBuffer("nop"); // Common name for xchg rax,rax. } else { AppendToBuffer("xchg%c rax, %s", operand_size_code(), - NameOfByteCPURegister(reg)); + NameOfCPURegister(reg)); } } @@ -1204,22 +1343,77 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer, data += 2; break; + case 0xA1: // Fall through. + case 0xA3: + switch (operand_size()) { + case DOUBLEWORD_SIZE: { + const char* memory_location = NameOfAddress( + reinterpret_cast<byte*>( + *reinterpret_cast<int32_t*>(data + 1))); + if (*data == 0xA3) { // Opcode 0xA3 + AppendToBuffer("movzxlq rax,(%s)", memory_location); + } else { // Opcode 0xA1 + AppendToBuffer("movzxlq (%s),rax", memory_location); + } + data += 5; + break; + } + case QUADWORD_SIZE: { + // New x64 instruction mov rax,(imm_64). + const char* memory_location = NameOfAddress( + *reinterpret_cast<byte**>(data + 1)); + if (*data == 0xA3) { // Opcode 0xA3 + AppendToBuffer("movq rax,(%s)", memory_location); + } else { // Opcode 0xA1 + AppendToBuffer("movq (%s),rax", memory_location); + } + data += 9; + break; + } + default: + UnimplementedInstruction(); + data += 2; + } + break; + case 0xA8: AppendToBuffer("test al,0x%x", *reinterpret_cast<uint8_t*>(data + 1)); data += 2; break; - case 0xA9: - AppendToBuffer("test%c rax,0x%x", // CHECKME! + case 0xA9: { + int64_t value = 0; + switch (operand_size()) { + case WORD_SIZE: + value = *reinterpret_cast<uint16_t*>(data + 1); + data += 3; + break; + case DOUBLEWORD_SIZE: + value = *reinterpret_cast<uint32_t*>(data + 1); + data += 5; + break; + case QUADWORD_SIZE: + value = *reinterpret_cast<int32_t*>(data + 1); + data += 5; + break; + default: + UNREACHABLE(); + } + AppendToBuffer("test%c rax,0x%"V8_PTR_PREFIX"ux", operand_size_code(), - *reinterpret_cast<int32_t*>(data + 1)); - data += 5; + value); break; - + } case 0xD1: // fall through case 0xD3: // fall through case 0xC1: - data += D1D3C1Instruction(data); + data += ShiftInstruction(data); + break; + case 0xD0: // fall through + case 0xD2: // fall through + case 0xC0: + byte_size_operand_ = true; + data += ShiftInstruction(data); break; case 0xD9: // fall through @@ -1236,73 +1430,13 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer, data += JumpShort(data); break; - case 0xF2: - if (*(data + 1) == 0x0F) { - byte b2 = *(data + 2); - if (b2 == 0x11) { - AppendToBuffer("movsd "); - data += 3; - int mod, regop, rm; - get_modrm(*data, &mod, ®op, &rm); - data += PrintRightOperand(data); - AppendToBuffer(",%s", NameOfXMMRegister(regop)); - } else if (b2 == 0x10) { - data += 3; - int mod, regop, rm; - get_modrm(*data, &mod, ®op, &rm); - AppendToBuffer("movsd %s,", NameOfXMMRegister(regop)); - data += PrintRightOperand(data); - } else { - const char* mnem = "?"; - switch (b2) { - case 0x2A: - mnem = "cvtsi2sd"; - break; - case 0x58: - mnem = "addsd"; - break; - case 0x59: - mnem = "mulsd"; - break; - case 0x5C: - mnem = "subsd"; - break; - case 0x5E: - mnem = "divsd"; - break; - } - data += 3; - int mod, regop, rm; - get_modrm(*data, &mod, ®op, &rm); - if (b2 == 0x2A) { - AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop)); - data += PrintRightOperand(data); - } else { - AppendToBuffer("%s %s,%s", mnem, NameOfXMMRegister(regop), - NameOfXMMRegister(rm)); - data++; - } - } - } else { - UnimplementedInstruction(); - } - break; - - case 0xF3: - if (*(data + 1) == 0x0F && *(data + 2) == 0x2C) { - data += 3; - data += PrintOperands("cvttss2si", REG_OPER_OP_ORDER, data); - } else { - UnimplementedInstruction(); - } - break; - case 0xF7: data += F7Instruction(data); break; default: UnimplementedInstruction(); + data += 1; } } // !processed diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc index 7b8699f6d3..0ef75f87e1 100644 --- a/deps/v8/src/x64/ic-x64.cc +++ b/deps/v8/src/x64/ic-x64.cc @@ -42,16 +42,181 @@ namespace internal { #define __ ACCESS_MASM(masm) +// Helper function used to load a property from a dictionary backing storage. +// This function may return false negatives, so miss_label +// must always call a backup property load that is complete. +// This function is safe to call if the receiver has fast properties, +// or if name is not a symbol, and will jump to the miss_label in that case. +static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label, + Register r0, Register r1, Register r2, + Register name) { + // Register use: + // + // r0 - used to hold the property dictionary. + // + // r1 - initially the receiver + // - used for the index into the property dictionary + // - holds the result on exit. + // + // r2 - used to hold the capacity of the property dictionary. + // + // name - holds the name of the property and is unchanged. + + Label done; + + // Check for the absence of an interceptor. + // Load the map into r0. + __ movq(r0, FieldOperand(r1, JSObject::kMapOffset)); + // Test the has_named_interceptor bit in the map. + __ testl(FieldOperand(r0, Map::kInstanceAttributesOffset), + Immediate(1 << (Map::kHasNamedInterceptor + (3 * 8)))); + + // Jump to miss if the interceptor bit is set. + __ j(not_zero, miss_label); + + // Bail out if we have a JS global proxy object. + __ movzxbq(r0, FieldOperand(r0, Map::kInstanceTypeOffset)); + __ cmpb(r0, Immediate(JS_GLOBAL_PROXY_TYPE)); + __ j(equal, miss_label); + + // Possible work-around for http://crbug.com/16276. + __ cmpb(r0, Immediate(JS_GLOBAL_OBJECT_TYPE)); + __ j(equal, miss_label); + __ cmpb(r0, Immediate(JS_BUILTINS_OBJECT_TYPE)); + __ j(equal, miss_label); + + // Check that the properties array is a dictionary. + __ movq(r0, FieldOperand(r1, JSObject::kPropertiesOffset)); + __ Cmp(FieldOperand(r0, HeapObject::kMapOffset), Factory::hash_table_map()); + __ j(not_equal, miss_label); + + // Compute the capacity mask. + const int kCapacityOffset = + StringDictionary::kHeaderSize + + StringDictionary::kCapacityIndex * kPointerSize; + __ movq(r2, FieldOperand(r0, kCapacityOffset)); + __ shrl(r2, Immediate(kSmiTagSize)); // convert smi to int + __ decl(r2); + + // Generate an unrolled loop that performs a few probes before + // giving up. Measurements done on Gmail indicate that 2 probes + // cover ~93% of loads from dictionaries. + static const int kProbes = 4; + const int kElementsStartOffset = + StringDictionary::kHeaderSize + + StringDictionary::kElementsStartIndex * kPointerSize; + for (int i = 0; i < kProbes; i++) { + // Compute the masked index: (hash + i + i * i) & mask. + __ movl(r1, FieldOperand(name, String::kLengthOffset)); + __ shrl(r1, Immediate(String::kHashShift)); + if (i > 0) { + __ addl(r1, Immediate(StringDictionary::GetProbeOffset(i))); + } + __ and_(r1, r2); + + // Scale the index by multiplying by the entry size. + ASSERT(StringDictionary::kEntrySize == 3); + __ lea(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3 + + // Check if the key is identical to the name. + __ cmpq(name, Operand(r0, r1, times_pointer_size, + kElementsStartOffset - kHeapObjectTag)); + if (i != kProbes - 1) { + __ j(equal, &done); + } else { + __ j(not_equal, miss_label); + } + } + + // Check that the value is a normal property. + __ bind(&done); + const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; + __ testl(Operand(r0, r1, times_pointer_size, kDetailsOffset - kHeapObjectTag), + Immediate(PropertyDetails::TypeField::mask() << kSmiTagSize)); + __ j(not_zero, miss_label); + + // Get the value at the masked, scaled index. + const int kValueOffset = kElementsStartOffset + kPointerSize; + __ movq(r1, + Operand(r0, r1, times_pointer_size, kValueOffset - kHeapObjectTag)); +} + + +// Helper function used to check that a value is either not an object +// or is loaded if it is an object. +static void GenerateCheckNonObjectOrLoaded(MacroAssembler* masm, Label* miss, + Register value) { + Label done; + // Check if the value is a Smi. + __ testl(value, Immediate(kSmiTagMask)); + __ j(zero, &done); + // Check if the object has been loaded. + __ movq(kScratchRegister, FieldOperand(value, JSFunction::kMapOffset)); + __ testb(FieldOperand(kScratchRegister, Map::kBitField2Offset), + Immediate(1 << Map::kNeedsLoading)); + __ j(not_zero, miss); + __ bind(&done); +} + + +// One byte opcode for test eax,0xXXXXXXXX. +static const byte kTestEaxByte = 0xA9; + + +static bool PatchInlinedMapCheck(Address address, Object* map) { + // Arguments are address of start of call sequence that called + // the IC, + Address test_instruction_address = + address + Assembler::kTargetAddrToReturnAddrDist; + // The keyed load has a fast inlined case if the IC call instruction + // is immediately followed by a test instruction. + if (*test_instruction_address != kTestEaxByte) return false; + + // Fetch the offset from the test instruction to the map compare + // instructions (starting with the 64-bit immediate mov of the map + // address). This offset is stored in the last 4 bytes of the 5 + // byte test instruction. + Address delta_address = test_instruction_address + 1; + int delta = *reinterpret_cast<int*>(delta_address); + // Compute the map address. The map address is in the last 8 bytes + // of the 10-byte immediate mov instruction (incl. REX prefix), so we add 2 + // to the offset to get the map address. + Address map_address = test_instruction_address + delta + 2; + // Patch the map check. + *(reinterpret_cast<Object**>(map_address)) = map; + return true; +} + + +bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) { + return PatchInlinedMapCheck(address, map); +} + + +bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) { + return PatchInlinedMapCheck(address, map); +} + + void KeyedLoadIC::ClearInlinedVersion(Address address) { - UNIMPLEMENTED(); + // Insert null as the map to check for to make sure the map check fails + // sending control flow to the IC instead of the inlined version. + PatchInlinedLoad(address, Heap::null_value()); } + void KeyedStoreIC::ClearInlinedVersion(Address address) { - UNIMPLEMENTED(); + // Insert null as the elements map to check for. This will make + // sure that the elements fast-case map check fails so that control + // flows to the IC instead of the inlined version. + PatchInlinedStore(address, Heap::null_value()); } + void KeyedStoreIC::RestoreInlinedVersion(Address address) { - UNIMPLEMENTED(); + // Restore the fast-case elements map check so that the inlined + // version can be used again. + PatchInlinedStore(address, Heap::fixed_array_map()); } @@ -65,83 +230,133 @@ void KeyedLoadIC::Generate(MacroAssembler* masm, __ movq(rax, Operand(rsp, kPointerSize)); __ movq(rcx, Operand(rsp, 2 * kPointerSize)); - - // Move the return address below the arguments. __ pop(rbx); - __ push(rcx); - __ push(rax); - __ push(rbx); + __ push(rcx); // receiver + __ push(rax); // name + __ push(rbx); // return address // Perform tail call to the entry. __ TailCallRuntime(f, 2); } -void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { - masm->int3(); // UNIMPLEMENTED. - masm->movq(kScratchRegister, Immediate(0xC0AB)); // Debugging aid. -} - -void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { - masm->int3(); // UNIMPLEMENTED. - masm->movq(kScratchRegister, Immediate(0xC1AB)); // Debugging aid. -} - -bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) { - UNIMPLEMENTED(); - return false; +#ifdef DEBUG +// For use in assert below. +static int TenToThe(int exponent) { + ASSERT(exponent <= 9); + ASSERT(exponent >= 1); + int answer = 10; + for (int i = 1; i < exponent; i++) answer *= 10; + return answer; } +#endif -bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) { - UNIMPLEMENTED(); - return false; -} - -Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) { - UNIMPLEMENTED(); - return NULL; -} -Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name, - JSObject* object, - JSObject* holder, - AccessorInfo* callback) { - UNIMPLEMENTED(); - return NULL; -} +void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- rsp[0] : return address + // -- rsp[8] : name + // -- rsp[16] : receiver + // ----------------------------------- + Label slow, fast, check_string, index_int, index_string; -Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name, - JSObject* object, - JSObject* holder, - Object* callback) { - UNIMPLEMENTED(); - return NULL; -} + // Load name and receiver. + __ movq(rax, Operand(rsp, kPointerSize)); + __ movq(rcx, Operand(rsp, 2 * kPointerSize)); -Object* KeyedLoadStubCompiler::CompileLoadField(String* name, - JSObject* object, - JSObject* holder, - int index) { - UNIMPLEMENTED(); - return NULL; + // Check that the object isn't a smi. + __ testl(rcx, Immediate(kSmiTagMask)); + __ j(zero, &slow); + + // Check that the object is some kind of JS object EXCEPT JS Value type. + // In the case that the object is a value-wrapper object, + // we enter the runtime system to make sure that indexing + // into string objects work as intended. + ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE); + __ CmpObjectType(rcx, JS_OBJECT_TYPE, rdx); + __ j(below, &slow); + // Check that the receiver does not require access checks. We need + // to check this explicitly since this generic stub does not perform + // map checks. The map is already in rdx. + __ testb(FieldOperand(rdx, Map::kBitFieldOffset), + Immediate(1 << Map::kIsAccessCheckNeeded)); + __ j(not_zero, &slow); + + // Check that the key is a smi. + __ testl(rax, Immediate(kSmiTagMask)); + __ j(not_zero, &check_string); + __ sarl(rax, Immediate(kSmiTagSize)); + // Get the elements array of the object. + __ bind(&index_int); + __ movq(rcx, FieldOperand(rcx, JSObject::kElementsOffset)); + // Check that the object is in fast mode (not dictionary). + __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset), Factory::fixed_array_map()); + __ j(not_equal, &slow); + // Check that the key (index) is within bounds. + __ cmpl(rax, FieldOperand(rcx, FixedArray::kLengthOffset)); + __ j(below, &fast); // Unsigned comparison rejects negative indices. + // Slow case: Load name and receiver from stack and jump to runtime. + __ bind(&slow); + __ IncrementCounter(&Counters::keyed_load_generic_slow, 1); + KeyedLoadIC::Generate(masm, ExternalReference(Runtime::kKeyedGetProperty)); + __ bind(&check_string); + // The key is not a smi. + // Is it a string? + __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdx); + __ j(above_equal, &slow); + // Is the string an array index, with cached numeric value? + __ movl(rbx, FieldOperand(rax, String::kLengthOffset)); + __ testl(rbx, Immediate(String::kIsArrayIndexMask)); + + // If the string is a symbol, do a quick inline probe of the receiver's + // dictionary, if it exists. + __ j(not_zero, &index_string); // The value in rbx is used at jump target. + __ testb(FieldOperand(rdx, Map::kInstanceTypeOffset), + Immediate(kIsSymbolMask)); + __ j(zero, &slow); + // Probe the dictionary leaving result in ecx. + GenerateDictionaryLoad(masm, &slow, rbx, rcx, rdx, rax); + GenerateCheckNonObjectOrLoaded(masm, &slow, rcx); + __ movq(rax, rcx); + __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1); + __ ret(0); + // Array index string: If short enough use cache in length/hash field (ebx). + // We assert that there are enough bits in an int32_t after the hash shift + // bits have been subtracted to allow space for the length and the cached + // array index. + ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) < + (1 << (String::kShortLengthShift - String::kHashShift))); + __ bind(&index_string); + const int kLengthFieldLimit = + (String::kMaxCachedArrayIndexLength + 1) << String::kShortLengthShift; + __ cmpl(rbx, Immediate(kLengthFieldLimit)); + __ j(above_equal, &slow); + __ movl(rax, rbx); + __ and_(rax, Immediate((1 << String::kShortLengthShift) - 1)); + __ shrl(rax, Immediate(String::kLongLengthShift)); + __ jmp(&index_int); + // Fast case: Do the load. + __ bind(&fast); + __ movq(rax, Operand(rcx, rax, times_pointer_size, + FixedArray::kHeaderSize - kHeapObjectTag)); + __ Cmp(rax, Factory::the_hole_value()); + // In case the loaded value is the_hole we have to consult GetProperty + // to ensure the prototype chain is searched. + __ j(equal, &slow); + __ IncrementCounter(&Counters::keyed_load_generic_smi, 1); + __ ret(0); } -Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) { - UNIMPLEMENTED(); - return NULL; -} -Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* object, - JSObject* holder, - String* name) { - UNIMPLEMENTED(); - return NULL; +void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- rsp[0] : return address + // -- rsp[8] : name + // -- rsp[16] : receiver + // ----------------------------------- + Generate(masm, ExternalReference(IC_Utility(kKeyedLoadIC_Miss))); } -Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) { - UNIMPLEMENTED(); - return NULL; -} void KeyedStoreIC::Generate(MacroAssembler* masm, ExternalReference const& f) { // ----------- S t a t e ------------- @@ -151,33 +366,148 @@ void KeyedStoreIC::Generate(MacroAssembler* masm, ExternalReference const& f) { // -- rsp[16] : receiver // ----------------------------------- - // Move the return address below the arguments. __ pop(rcx); - __ push(Operand(rsp, 1 * kPointerSize)); - __ push(Operand(rsp, 1 * kPointerSize)); - __ push(rax); - __ push(rcx); + __ push(Operand(rsp, 1 * kPointerSize)); // receiver + __ push(Operand(rsp, 1 * kPointerSize)); // key + __ push(rax); // value + __ push(rcx); // return address // Do tail-call to runtime routine. __ TailCallRuntime(f, 3); } + void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) { - masm->int3(); // UNIMPLEMENTED. - masm->movq(kScratchRegister, Immediate(0xC2AB)); // Debugging aid. + // ----------- S t a t e ------------- + // -- rax : value + // -- rcx : transition map + // -- rsp[0] : return address + // -- rsp[8] : key + // -- rsp[16] : receiver + // ----------------------------------- + + __ pop(rbx); + __ push(Operand(rsp, 1 * kPointerSize)); // receiver + __ push(rcx); // transition map + __ push(rax); // value + __ push(rbx); // return address + + // Do tail-call to runtime routine. + __ TailCallRuntime( + ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3); } + void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { - masm->int3(); // UNIMPLEMENTED. - masm->movq(kScratchRegister, Immediate(0xC3AB)); // Debugging aid. -} + // ----------- S t a t e ------------- + // -- rax : value + // -- rsp[0] : return address + // -- rsp[8] : key + // -- rsp[16] : receiver + // ----------------------------------- + Label slow, fast, array, extra; -Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object, - int index, - Map* transition, - String* name) { - UNIMPLEMENTED(); - return NULL; + // Get the receiver from the stack. + __ movq(rdx, Operand(rsp, 2 * kPointerSize)); // 2 ~ return address, key + // Check that the object isn't a smi. + __ testl(rdx, Immediate(kSmiTagMask)); + __ j(zero, &slow); + // Get the map from the receiver. + __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset)); + // Check that the receiver does not require access checks. We need + // to do this because this generic stub does not perform map checks. + __ testb(FieldOperand(rcx, Map::kBitFieldOffset), + Immediate(1 << Map::kIsAccessCheckNeeded)); + __ j(not_zero, &slow); + // Get the key from the stack. + __ movq(rbx, Operand(rsp, 1 * kPointerSize)); // 1 ~ return address + // Check that the key is a smi. + __ testl(rbx, Immediate(kSmiTagMask)); + __ j(not_zero, &slow); + + __ CmpInstanceType(rcx, JS_ARRAY_TYPE); + __ j(equal, &array); + // Check that the object is some kind of JS object. + __ CmpInstanceType(rcx, FIRST_JS_OBJECT_TYPE); + __ j(below, &slow); + + // Object case: Check key against length in the elements array. + // rax: value + // rdx: JSObject + // rbx: index (as a smi) + __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset)); + // Check that the object is in fast mode (not dictionary). + __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset), Factory::fixed_array_map()); + __ j(not_equal, &slow); + // Untag the key (for checking against untagged length in the fixed array). + __ movl(rdx, rbx); + __ sarl(rdx, Immediate(kSmiTagSize)); + __ cmpl(rdx, FieldOperand(rcx, Array::kLengthOffset)); + // rax: value + // rcx: FixedArray + // rbx: index (as a smi) + __ j(below, &fast); + + + // Slow case: Push extra copies of the arguments (3). + __ bind(&slow); + __ pop(rcx); + __ push(Operand(rsp, 1 * kPointerSize)); + __ push(Operand(rsp, 1 * kPointerSize)); + __ push(rax); + __ push(rcx); + // Do tail-call to runtime routine. + __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3); + + + // Extra capacity case: Check if there is extra capacity to + // perform the store and update the length. Used for adding one + // element to the array by writing to array[array.length]. + __ bind(&extra); + // rax: value + // rdx: JSArray + // rcx: FixedArray + // rbx: index (as a smi) + // flags: compare (rbx, rdx.length()) + __ j(not_equal, &slow); // do not leave holes in the array + __ sarl(rbx, Immediate(kSmiTagSize)); // untag + __ cmpl(rbx, FieldOperand(rcx, FixedArray::kLengthOffset)); + __ j(above_equal, &slow); + // Restore tag and increment. + __ lea(rbx, Operand(rbx, rbx, times_1, 1 << kSmiTagSize)); + __ movq(FieldOperand(rdx, JSArray::kLengthOffset), rbx); + __ subl(rbx, Immediate(1 << kSmiTagSize)); // decrement rbx again + __ jmp(&fast); + + + // Array case: Get the length and the elements array from the JS + // array. Check that the array is in fast mode; if it is the + // length is always a smi. + __ bind(&array); + // rax: value + // rdx: JSArray + // rbx: index (as a smi) + __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset)); + __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset), Factory::fixed_array_map()); + __ j(not_equal, &slow); + + // Check the key against the length in the array, compute the + // address to store into and fall through to fast case. + __ cmpl(rbx, FieldOperand(rdx, JSArray::kLengthOffset)); + __ j(above_equal, &extra); + + + // Fast case: Do the store. + __ bind(&fast); + // rax: value + // rcx: FixedArray + // rbx: index (as a smi) + __ movq(Operand(rcx, rbx, times_4, FixedArray::kHeaderSize - kHeapObjectTag), + rax); + // Update write barrier for the elements array address. + __ movq(rdx, rax); + __ RecordWrite(rcx, 0, rdx, rbx); + __ ret(0); } @@ -228,20 +558,27 @@ void CallIC::Generate(MacroAssembler* masm, __ InvokeFunction(rdi, actual, JUMP_FUNCTION); } -void CallIC::GenerateMegamorphic(MacroAssembler* a, int b) { - UNIMPLEMENTED(); +void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) { + // Cache miss: Jump to runtime. + Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss))); } -void CallIC::GenerateNormal(MacroAssembler* a, int b) { - UNIMPLEMENTED(); +void CallIC::GenerateNormal(MacroAssembler* masm, int argc) { + // Cache miss: Jump to runtime. + Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss))); } +// The offset from the inlined patch site to the start of the +// inlined load instruction. const int LoadIC::kOffsetToLoadInstruction = 20; void LoadIC::ClearInlinedVersion(Address address) { - UNIMPLEMENTED(); + // Reset the map check of the inlined inobject property load (if + // present) to guarantee failure by holding an invalid map (the null + // value). The offset can be patched to anything. + PatchInlinedLoad(address, Heap::null_value(), kMaxInt); } @@ -254,11 +591,10 @@ void LoadIC::Generate(MacroAssembler* masm, ExternalReference const& f) { __ movq(rax, Operand(rsp, kPointerSize)); - // Move the return address below the arguments. __ pop(rbx); - __ push(rax); - __ push(rcx); - __ push(rbx); + __ push(rax); // receiver + __ push(rcx); // name + __ push(rbx); // return address // Perform tail call to the entry. __ TailCallRuntime(f, 2); @@ -266,38 +602,79 @@ void LoadIC::Generate(MacroAssembler* masm, ExternalReference const& f) { void LoadIC::GenerateArrayLength(MacroAssembler* masm) { - masm->int3(); // UNIMPLEMENTED. - masm->movq(kScratchRegister, Immediate(0xC4AB)); // Debugging aid. + Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss))); } void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) { - masm->int3(); // UNIMPLEMENTED. - masm->movq(kScratchRegister, Immediate(0xC5AB)); // Debugging aid. + Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss))); } + void LoadIC::GenerateMegamorphic(MacroAssembler* masm) { - masm->int3(); // UNIMPLEMENTED. - masm->movq(kScratchRegister, Immediate(0xC6AB)); // Debugging aid. + // ----------- S t a t e ------------- + // -- rcx : name + // -- rsp[0] : return address + // -- rsp[8] : receiver + // ----------------------------------- + + __ movq(rax, Operand(rsp, kPointerSize)); + + // Probe the stub cache. + Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC, + NOT_IN_LOOP, + MONOMORPHIC); + StubCache::GenerateProbe(masm, flags, rax, rcx, rbx, rdx); + + // Cache miss: Jump to runtime. + Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss))); } + void LoadIC::GenerateMiss(MacroAssembler* masm) { - masm->int3(); // UNIMPLEMENTED. - masm->movq(kScratchRegister, Immediate(0xC7AB)); // Debugging aid. + // ----------- S t a t e ------------- + // -- rcx : name + // -- rsp[0] : return address + // -- rsp[8] : receiver + // ----------------------------------- + + Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss))); } void LoadIC::GenerateNormal(MacroAssembler* masm) { - masm->int3(); // UNIMPLEMENTED. - masm->movq(kScratchRegister, Immediate(0xC8AB)); // Debugging aid. + Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss))); } + void LoadIC::GenerateStringLength(MacroAssembler* masm) { - masm->int3(); // UNIMPLEMENTED. - masm->movq(kScratchRegister, Immediate(0xC9AB)); // Debugging aid. + Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss))); } -bool LoadIC::PatchInlinedLoad(Address address, Object* map, int index) { - UNIMPLEMENTED(); - return false; + +bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) { + // The address of the instruction following the call. + Address test_instruction_address = + address + Assembler::kTargetAddrToReturnAddrDist; + // If the instruction following the call is not a test eax, nothing + // was inlined. + if (*test_instruction_address != kTestEaxByte) return false; + + Address delta_address = test_instruction_address + 1; + // The delta to the start of the map check instruction. + int delta = *reinterpret_cast<int*>(delta_address); + + // The map address is the last 8 bytes of the 10-byte + // immediate move instruction, so we add 2 to get the + // offset to the last 8 bytes. + Address map_address = test_instruction_address + delta + 2; + *(reinterpret_cast<Object**>(map_address)) = map; + + // The offset is in the 32-bit displacement of a seven byte + // memory-to-register move instruction (REX.W 0x88 ModR/M disp32), + // so we add 3 to get the offset of the displacement. + Address offset_address = + test_instruction_address + delta + kOffsetToLoadInstruction + 3; + *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag; + return true; } void StoreIC::Generate(MacroAssembler* masm, ExternalReference const& f) { @@ -307,25 +684,37 @@ void StoreIC::Generate(MacroAssembler* masm, ExternalReference const& f) { // -- rsp[0] : return address // -- rsp[8] : receiver // ----------------------------------- - // Move the return address below the arguments. __ pop(rbx); - __ push(Operand(rsp, 0)); - __ push(rcx); - __ push(rax); - __ push(rbx); + __ push(Operand(rsp, 0)); // receiver + __ push(rcx); // name + __ push(rax); // value + __ push(rbx); // return address // Perform tail call to the entry. __ TailCallRuntime(f, 3); } void StoreIC::GenerateExtendStorage(MacroAssembler* masm) { - masm->int3(); // UNIMPLEMENTED. - masm->movq(kScratchRegister, Immediate(0xCAAB)); // Debugging aid. + // ----------- S t a t e ------------- + // -- rax : value + // -- rcx : Map (target of map transition) + // -- rsp[0] : return address + // -- rsp[8] : receiver + // ----------------------------------- + + __ pop(rbx); + __ push(Operand(rsp, 0)); // receiver + __ push(rcx); // transition map + __ push(rax); // value + __ push(rbx); // return address + + // Perform tail call to the entry. + __ TailCallRuntime( + ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3); } void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { - masm->int3(); // UNIMPLEMENTED. - masm->movq(kScratchRegister, Immediate(0xCBAB)); // Debugging aid. + Generate(masm, ExternalReference(IC_Utility(kStoreIC_Miss))); } diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc index 099a46103c..f58e1cdf9b 100644 --- a/deps/v8/src/x64/macro-assembler-x64.cc +++ b/deps/v8/src/x64/macro-assembler-x64.cc @@ -71,9 +71,9 @@ void MacroAssembler::NegativeZeroTest(Register result, Register op, Label* then_label) { Label ok; - testq(result, result); + testl(result, result); j(not_zero, &ok); - testq(op, op); + testl(op, op); j(sign, then_label); bind(&ok); } @@ -151,6 +151,13 @@ void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) { void MacroAssembler::TailCallRuntime(ExternalReference const& ext, int num_arguments) { + // ----------- S t a t e ------------- + // -- rsp[0] : return address + // -- rsp[8] : argument num_arguments - 1 + // ... + // -- rsp[8 * num_arguments] : argument 0 (receiver) + // ----------------------------------- + // TODO(1236192): Most runtime routines don't need the number of // arguments passed in because it is constant. At some point we // should remove this need and make the runtime routine entry code @@ -311,6 +318,17 @@ void MacroAssembler::Push(Handle<Object> source) { } +void MacroAssembler::Push(Smi* source) { + if (IsUnsafeSmi(source)) { + LoadUnsafeSmi(kScratchRegister, source); + push(kScratchRegister); + } else { + int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(source)); + push(Immediate(smi)); + } +} + + void MacroAssembler::Jump(ExternalReference ext) { movq(kScratchRegister, ext); jmp(kScratchRegister); @@ -356,6 +374,7 @@ void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) { ASSERT(RelocInfo::IsCodeTarget(rmode)); movq(kScratchRegister, code_object, rmode); #ifdef DEBUG + // Patch target is kPointer size bytes *before* target label. Label target; bind(&target); #endif @@ -882,4 +901,154 @@ void MacroAssembler::LeaveExitFrame(StackFrame::Type type) { } +Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg, + JSObject* holder, Register holder_reg, + Register scratch, + Label* miss) { + // Make sure there's no overlap between scratch and the other + // registers. + ASSERT(!scratch.is(object_reg) && !scratch.is(holder_reg)); + + // Keep track of the current object in register reg. On the first + // iteration, reg is an alias for object_reg, on later iterations, + // it is an alias for holder_reg. + Register reg = object_reg; + int depth = 1; + + // Check the maps in the prototype chain. + // Traverse the prototype chain from the object and do map checks. + while (object != holder) { + depth++; + + // Only global objects and objects that do not require access + // checks are allowed in stubs. + ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); + + JSObject* prototype = JSObject::cast(object->GetPrototype()); + if (Heap::InNewSpace(prototype)) { + // Get the map of the current object. + movq(scratch, FieldOperand(reg, HeapObject::kMapOffset)); + Cmp(scratch, Handle<Map>(object->map())); + // Branch on the result of the map check. + j(not_equal, miss); + // Check access rights to the global object. This has to happen + // after the map check so that we know that the object is + // actually a global object. + if (object->IsJSGlobalProxy()) { + CheckAccessGlobalProxy(reg, scratch, miss); + + // Restore scratch register to be the map of the object. + // We load the prototype from the map in the scratch register. + movq(scratch, FieldOperand(reg, HeapObject::kMapOffset)); + } + // The prototype is in new space; we cannot store a reference + // to it in the code. Load it from the map. + reg = holder_reg; // from now the object is in holder_reg + movq(reg, FieldOperand(scratch, Map::kPrototypeOffset)); + + } else { + // Check the map of the current object. + Cmp(FieldOperand(reg, HeapObject::kMapOffset), + Handle<Map>(object->map())); + // Branch on the result of the map check. + j(not_equal, miss); + // Check access rights to the global object. This has to happen + // after the map check so that we know that the object is + // actually a global object. + if (object->IsJSGlobalProxy()) { + CheckAccessGlobalProxy(reg, scratch, miss); + } + // The prototype is in old space; load it directly. + reg = holder_reg; // from now the object is in holder_reg + Move(reg, Handle<JSObject>(prototype)); + } + + // Go to the next object in the prototype chain. + object = prototype; + } + + // Check the holder map. + Cmp(FieldOperand(reg, HeapObject::kMapOffset), + Handle<Map>(holder->map())); + j(not_equal, miss); + + // Log the check depth. + LOG(IntEvent("check-maps-depth", depth)); + + // Perform security check for access to the global object and return + // the holder register. + ASSERT(object == holder); + ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); + if (object->IsJSGlobalProxy()) { + CheckAccessGlobalProxy(reg, scratch, miss); + } + return reg; +} + + + + +void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, + Register scratch, + Label* miss) { + Label same_contexts; + + ASSERT(!holder_reg.is(scratch)); + ASSERT(!scratch.is(kScratchRegister)); + // Load current lexical context from the stack frame. + movq(scratch, Operand(rbp, StandardFrameConstants::kContextOffset)); + + // When generating debug code, make sure the lexical context is set. + if (FLAG_debug_code) { + cmpq(scratch, Immediate(0)); + Check(not_equal, "we should not have an empty lexical context"); + } + // Load the global context of the current context. + int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize; + movq(scratch, FieldOperand(scratch, offset)); + movq(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset)); + + // Check the context is a global context. + if (FLAG_debug_code) { + Cmp(FieldOperand(scratch, HeapObject::kMapOffset), + Factory::global_context_map()); + Check(equal, "JSGlobalObject::global_context should be a global context."); + } + + // Check if both contexts are the same. + cmpq(scratch, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset)); + j(equal, &same_contexts); + + // Compare security tokens. + // Check that the security token in the calling global object is + // compatible with the security token in the receiving global + // object. + + // Check the context is a global context. + if (FLAG_debug_code) { + // Preserve original value of holder_reg. + push(holder_reg); + movq(holder_reg, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset)); + Cmp(holder_reg, Factory::null_value()); + Check(not_equal, "JSGlobalProxy::context() should not be null."); + + // Read the first word and compare to global_context_map(), + movq(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset)); + Cmp(holder_reg, Factory::global_context_map()); + Check(equal, "JSGlobalObject::global_context should be a global context."); + pop(holder_reg); + } + + movq(kScratchRegister, + FieldOperand(holder_reg, JSGlobalProxy::kContextOffset)); + int token_offset = Context::kHeaderSize + + Context::SECURITY_TOKEN_INDEX * kPointerSize; + movq(scratch, FieldOperand(scratch, token_offset)); + cmpq(scratch, FieldOperand(kScratchRegister, token_offset)); + j(not_equal, miss); + + bind(&same_contexts); +} + + } } // namespace v8::internal diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h index f13a7adae9..cba55eb27e 100644 --- a/deps/v8/src/x64/macro-assembler-x64.h +++ b/deps/v8/src/x64/macro-assembler-x64.h @@ -164,6 +164,7 @@ class MacroAssembler: public Assembler { void Cmp(Register dst, Handle<Object> source); void Cmp(const Operand& dst, Handle<Object> source); void Push(Handle<Object> source); + void Push(Smi* smi); // Control Flow void Jump(Address destination, RelocInfo::Mode rmode); @@ -175,11 +176,13 @@ class MacroAssembler: public Assembler { void Call(Handle<Code> code_object, RelocInfo::Mode rmode); // Compare object type for heap object. + // Always use unsigned comparisons: above and below, not less and greater. // Incoming register is heap_object and outgoing register is map. // They may be the same register, and may be kScratchRegister. void CmpObjectType(Register heap_object, InstanceType type, Register map); // Compare instance type for map. + // Always use unsigned comparisons: above and below, not less and greater. void CmpInstanceType(Register map, InstanceType type); // FCmp is similar to integer cmp, but requires unsigned @@ -212,7 +215,8 @@ class MacroAssembler: public Assembler { // Generate code for checking access rights - used for security checks // on access to global objects across environments. The holder register - // is left untouched, but the scratch register is clobbered. + // is left untouched, but the scratch register and kScratchRegister, + // which must be different, are clobbered. void CheckAccessGlobalProxy(Register holder_reg, Register scratch, Label* miss); diff --git a/deps/v8/src/x64/register-allocator-x64-inl.h b/deps/v8/src/x64/register-allocator-x64-inl.h index 926dd64c9b..54729d6d9c 100644 --- a/deps/v8/src/x64/register-allocator-x64-inl.h +++ b/deps/v8/src/x64/register-allocator-x64-inl.h @@ -46,7 +46,7 @@ bool RegisterAllocator::IsReserved(Register reg) { // non-reserved assembler registers. int RegisterAllocator::ToNumber(Register reg) { ASSERT(reg.is_valid() && !IsReserved(reg)); - static const int numbers[] = { + const int kNumbers[] = { 0, // rax 2, // rcx 3, // rdx @@ -64,15 +64,15 @@ int RegisterAllocator::ToNumber(Register reg) { 8, // r14 9 // r15 }; - return numbers[reg.code()]; + return kNumbers[reg.code()]; } Register RegisterAllocator::ToRegister(int num) { ASSERT(num >= 0 && num < kNumRegisters); - static Register registers[] = + const Register kRegisters[] = { rax, rbx, rcx, rdx, rdi, r8, r9, r11, r14, r15, r13, r12 }; - return registers[num]; + return kRegisters[num]; } diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc index c5776159d6..091c826ccc 100644 --- a/deps/v8/src/x64/stub-cache-x64.cc +++ b/deps/v8/src/x64/stub-cache-x64.cc @@ -36,32 +36,892 @@ namespace v8 { namespace internal { -#define __ ACCESS_MASM((&masm_)) +//----------------------------------------------------------------------------- +// StubCompiler static helper functions +#define __ ACCESS_MASM(masm) -Object* CallStubCompiler::CompileCallConstant(Object* a, - JSObject* b, - JSFunction* c, - String* d, - StubCompiler::CheckType e) { - UNIMPLEMENTED(); - return NULL; + +static void ProbeTable(MacroAssembler* masm, + Code::Flags flags, + StubCache::Table table, + Register name, + Register offset) { + ExternalReference key_offset(SCTableReference::keyReference(table)); + Label miss; + + __ movq(kScratchRegister, key_offset); + // Check that the key in the entry matches the name. + __ cmpl(name, Operand(kScratchRegister, offset, times_4, 0)); + __ j(not_equal, &miss); + // Get the code entry from the cache. + // Use key_offset + kPointerSize, rather than loading value_offset. + __ movq(kScratchRegister, + Operand(kScratchRegister, offset, times_4, kPointerSize)); + // Check that the flags match what we're looking for. + __ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset)); + __ and_(offset, Immediate(~Code::kFlagsNotUsedInLookup)); + __ cmpl(offset, Immediate(flags)); + __ j(not_equal, &miss); + + // Jump to the first instruction in the code stub. + __ addq(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag)); + __ jmp(kScratchRegister); + + __ bind(&miss); +} + + +void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) { + ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC); + Code* code = NULL; + if (kind == Code::LOAD_IC) { + code = Builtins::builtin(Builtins::LoadIC_Miss); + } else { + code = Builtins::builtin(Builtins::KeyedLoadIC_Miss); + } + + Handle<Code> ic(code); + __ Jump(ic, RelocInfo::CODE_TARGET); +} + + +void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm, + int index, + Register prototype) { + // Load the global or builtins object from the current context. + __ movq(prototype, + Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX))); + // Load the global context from the global or builtins object. + __ movq(prototype, + FieldOperand(prototype, GlobalObject::kGlobalContextOffset)); + // Load the function from the global context. + __ movq(prototype, Operand(prototype, Context::SlotOffset(index))); + // Load the initial map. The global functions all have initial maps. + __ movq(prototype, + FieldOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset)); + // Load the prototype from the initial map. + __ movq(prototype, FieldOperand(prototype, Map::kPrototypeOffset)); +} + + +// Load a fast property out of a holder object (src). In-object properties +// are loaded directly otherwise the property is loaded from the properties +// fixed array. +void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm, + Register dst, Register src, + JSObject* holder, int index) { + // Adjust for the number of properties stored in the holder. + index -= holder->map()->inobject_properties(); + if (index < 0) { + // Get the property straight out of the holder. + int offset = holder->map()->instance_size() + (index * kPointerSize); + __ movq(dst, FieldOperand(src, offset)); + } else { + // Calculate the offset into the properties array. + int offset = index * kPointerSize + FixedArray::kHeaderSize; + __ movq(dst, FieldOperand(src, JSObject::kPropertiesOffset)); + __ movq(dst, FieldOperand(dst, offset)); + } +} + + +template <typename Pushable> +static void PushInterceptorArguments(MacroAssembler* masm, + Register receiver, + Register holder, + Pushable name, + JSObject* holder_obj) { + __ push(receiver); + __ push(holder); + __ push(name); + InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor(); + __ movq(kScratchRegister, Handle<Object>(interceptor), + RelocInfo::EMBEDDED_OBJECT); + __ push(kScratchRegister); + __ push(FieldOperand(kScratchRegister, InterceptorInfo::kDataOffset)); +} + + +void StubCache::GenerateProbe(MacroAssembler* masm, + Code::Flags flags, + Register receiver, + Register name, + Register scratch, + Register extra) { + Label miss; + USE(extra); // The register extra is not used on the X64 platform. + // Make sure that code is valid. The shifting code relies on the + // entry size being 16. + ASSERT(sizeof(Entry) == 16); + + // Make sure the flags do not name a specific type. + ASSERT(Code::ExtractTypeFromFlags(flags) == 0); + + // Make sure that there are no register conflicts. + ASSERT(!scratch.is(receiver)); + ASSERT(!scratch.is(name)); + + // Check that the receiver isn't a smi. + __ testl(receiver, Immediate(kSmiTagMask)); + __ j(zero, &miss); + + // Get the map of the receiver and compute the hash. + __ movl(scratch, FieldOperand(name, String::kLengthOffset)); + // Use only the low 32 bits of the map pointer. + __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset)); + __ xor_(scratch, Immediate(flags)); + __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize)); + + // Probe the primary table. + ProbeTable(masm, flags, kPrimary, name, scratch); + + // Primary miss: Compute hash for secondary probe. + __ movl(scratch, FieldOperand(name, String::kLengthOffset)); + __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset)); + __ xor_(scratch, Immediate(flags)); + __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize)); + __ subl(scratch, name); + __ addl(scratch, Immediate(flags)); + __ and_(scratch, Immediate((kSecondaryTableSize - 1) << kHeapObjectTagSize)); + + // Probe the secondary table. + ProbeTable(masm, flags, kSecondary, name, scratch); + + // Cache miss: Fall-through and let caller handle the miss by + // entering the runtime system. + __ bind(&miss); +} + + +void StubCompiler::GenerateStoreField(MacroAssembler* masm, + Builtins::Name storage_extend, + JSObject* object, + int index, + Map* transition, + Register receiver_reg, + Register name_reg, + Register scratch, + Label* miss_label) { + // Check that the object isn't a smi. + __ testl(receiver_reg, Immediate(kSmiTagMask)); + __ j(zero, miss_label); + + // Check that the map of the object hasn't changed. + __ Cmp(FieldOperand(receiver_reg, HeapObject::kMapOffset), + Handle<Map>(object->map())); + __ j(not_equal, miss_label); + + // Perform global security token check if needed. + if (object->IsJSGlobalProxy()) { + __ CheckAccessGlobalProxy(receiver_reg, scratch, miss_label); + } + + // Stub never generated for non-global objects that require access + // checks. + ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); + + // Perform map transition for the receiver if necessary. + if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) { + // The properties must be extended before we can store the value. + // We jump to a runtime call that extends the properties array. + __ Move(rcx, Handle<Map>(transition)); + Handle<Code> ic(Builtins::builtin(storage_extend)); + __ Jump(ic, RelocInfo::CODE_TARGET); + return; + } + + if (transition != NULL) { + // Update the map of the object; no write barrier updating is + // needed because the map is never in new space. + __ Move(FieldOperand(receiver_reg, HeapObject::kMapOffset), + Handle<Map>(transition)); + } + + // Adjust for the number of properties stored in the object. Even in the + // face of a transition we can use the old map here because the size of the + // object and the number of in-object properties is not going to change. + index -= object->map()->inobject_properties(); + + if (index < 0) { + // Set the property straight into the object. + int offset = object->map()->instance_size() + (index * kPointerSize); + __ movq(FieldOperand(receiver_reg, offset), rax); + + // Update the write barrier for the array address. + // Pass the value being stored in the now unused name_reg. + __ movq(name_reg, rax); + __ RecordWrite(receiver_reg, offset, name_reg, scratch); + } else { + // Write to the properties array. + int offset = index * kPointerSize + FixedArray::kHeaderSize; + // Get the properties array (optimistically). + __ movq(scratch, FieldOperand(receiver_reg, JSObject::kPropertiesOffset)); + __ movq(FieldOperand(scratch, offset), rax); + + // Update the write barrier for the array address. + // Pass the value being stored in the now unused name_reg. + __ movq(name_reg, rax); + __ RecordWrite(scratch, offset, name_reg, receiver_reg); + } + + // Return the value (register rax). + __ ret(0); +} + + +void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm, + Register receiver, + Register scratch, + Label* miss_label) { + // Check that the receiver isn't a smi. + __ testl(receiver, Immediate(kSmiTagMask)); + __ j(zero, miss_label); + + // Check that the object is a JS array. + __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch); + __ j(not_equal, miss_label); + + // Load length directly from the JS array. + __ movq(rax, FieldOperand(receiver, JSArray::kLengthOffset)); + __ ret(0); +} + + +// Generate code to check if an object is a string. If the object is +// a string, the map's instance type is left in the scratch register. +static void GenerateStringCheck(MacroAssembler* masm, + Register receiver, + Register scratch, + Label* smi, + Label* non_string_object) { + // Check that the object isn't a smi. + __ testl(receiver, Immediate(kSmiTagMask)); + __ j(zero, smi); + + // Check that the object is a string. + __ movq(scratch, FieldOperand(receiver, HeapObject::kMapOffset)); + __ movzxbq(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset)); + ASSERT(kNotStringTag != 0); + __ testl(scratch, Immediate(kNotStringTag)); + __ j(not_zero, non_string_object); +} + + +void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm, + Register receiver, + Register scratch, + Label* miss) { + Label load_length, check_wrapper; + + // Check if the object is a string leaving the instance type in the + // scratch register. + GenerateStringCheck(masm, receiver, scratch, miss, &check_wrapper); + + // Load length directly from the string. + __ bind(&load_length); + __ and_(scratch, Immediate(kStringSizeMask)); + __ movl(rax, FieldOperand(receiver, String::kLengthOffset)); + // rcx is also the receiver. + __ lea(rcx, Operand(scratch, String::kLongLengthShift)); + __ shr(rax); // rcx is implicit shift register. + __ shl(rax, Immediate(kSmiTagSize)); + __ ret(0); + + // Check if the object is a JSValue wrapper. + __ bind(&check_wrapper); + __ cmpl(scratch, Immediate(JS_VALUE_TYPE)); + __ j(not_equal, miss); + + // Check if the wrapped value is a string and load the length + // directly if it is. + __ movq(receiver, FieldOperand(receiver, JSValue::kValueOffset)); + GenerateStringCheck(masm, receiver, scratch, miss, miss); + __ jmp(&load_length); +} + + +template <class Pushable> +static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm, + Register receiver, + Register holder, + Pushable name, + JSObject* holder_obj) { + PushInterceptorArguments(masm, receiver, holder, name, holder_obj); + + ExternalReference ref = + ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly)); + __ movq(rax, Immediate(5)); + __ movq(rbx, ref); + + CEntryStub stub; + __ CallStub(&stub); +} + + + +void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm, + Register receiver, + Register result, + Register scratch, + Label* miss_label) { + __ TryGetFunctionPrototype(receiver, result, miss_label); + if (!result.is(rax)) __ movq(rax, result); + __ ret(0); +} + + +static void LookupPostInterceptor(JSObject* holder, + String* name, + LookupResult* lookup) { + holder->LocalLookupRealNamedProperty(name, lookup); + if (lookup->IsNotFound()) { + Object* proto = holder->GetPrototype(); + if (proto != Heap::null_value()) { + proto->Lookup(name, lookup); + } + } +} + + +class LoadInterceptorCompiler BASE_EMBEDDED { + public: + explicit LoadInterceptorCompiler(Register name) : name_(name) {} + + void CompileCacheable(MacroAssembler* masm, + StubCompiler* stub_compiler, + Register receiver, + Register holder, + Register scratch1, + Register scratch2, + JSObject* holder_obj, + LookupResult* lookup, + String* name, + Label* miss_label) { + AccessorInfo* callback = 0; + bool optimize = false; + // So far the most popular follow ups for interceptor loads are FIELD + // and CALLBACKS, so inline only them, other cases may be added + // later. + if (lookup->type() == FIELD) { + optimize = true; + } else if (lookup->type() == CALLBACKS) { + Object* callback_object = lookup->GetCallbackObject(); + if (callback_object->IsAccessorInfo()) { + callback = AccessorInfo::cast(callback_object); + optimize = callback->getter() != NULL; + } + } + + if (!optimize) { + CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label); + return; + } + + // Note: starting a frame here makes GC aware of pointers pushed below. + __ EnterInternalFrame(); + + if (lookup->type() == CALLBACKS) { + __ push(receiver); + } + __ push(holder); + __ push(name_); + + CompileCallLoadPropertyWithInterceptor(masm, + receiver, + holder, + name_, + holder_obj); + + Label interceptor_failed; + __ Cmp(rax, Factory::no_interceptor_result_sentinel()); + __ j(equal, &interceptor_failed); + __ LeaveInternalFrame(); + __ ret(0); + + __ bind(&interceptor_failed); + __ pop(name_); + __ pop(holder); + if (lookup->type() == CALLBACKS) { + __ pop(receiver); + } + + __ LeaveInternalFrame(); + + if (lookup->type() == FIELD) { + holder = stub_compiler->CheckPrototypes(holder_obj, + holder, + lookup->holder(), + scratch1, + scratch2, + name, + miss_label); + stub_compiler->GenerateFastPropertyLoad(masm, + rax, + holder, + lookup->holder(), + lookup->GetFieldIndex()); + __ ret(0); + } else { + ASSERT(lookup->type() == CALLBACKS); + ASSERT(lookup->GetCallbackObject()->IsAccessorInfo()); + ASSERT(callback != NULL); + ASSERT(callback->getter() != NULL); + + Label cleanup; + __ pop(scratch2); + __ push(receiver); + __ push(scratch2); + + holder = stub_compiler->CheckPrototypes(holder_obj, holder, + lookup->holder(), scratch1, + scratch2, + name, + &cleanup); + + __ pop(scratch2); // save old return address + __ push(holder); + __ Move(holder, Handle<AccessorInfo>(callback)); + __ push(holder); + __ push(FieldOperand(holder, AccessorInfo::kDataOffset)); + __ push(name_); + __ push(scratch2); // restore old return address + + ExternalReference ref = + ExternalReference(IC_Utility(IC::kLoadCallbackProperty)); + __ TailCallRuntime(ref, 5); + + __ bind(&cleanup); + __ pop(scratch1); + __ pop(scratch2); + __ push(scratch1); + } + } + + + void CompileRegular(MacroAssembler* masm, + Register receiver, + Register holder, + Register scratch, + JSObject* holder_obj, + Label* miss_label) { + __ pop(scratch); // save old return address + PushInterceptorArguments(masm, receiver, holder, name_, holder_obj); + __ push(scratch); // restore old return address + + ExternalReference ref = ExternalReference( + IC_Utility(IC::kLoadPropertyWithInterceptorForLoad)); + __ TailCallRuntime(ref, 5); + } + + private: + Register name_; +}; + + +template <class Compiler> +static void CompileLoadInterceptor(Compiler* compiler, + StubCompiler* stub_compiler, + MacroAssembler* masm, + JSObject* object, + JSObject* holder, + String* name, + LookupResult* lookup, + Register receiver, + Register scratch1, + Register scratch2, + Label* miss) { + ASSERT(holder->HasNamedInterceptor()); + ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined()); + + // Check that the receiver isn't a smi. + __ testl(receiver, Immediate(kSmiTagMask)); + __ j(zero, miss); + + // Check that the maps haven't changed. + Register reg = + stub_compiler->CheckPrototypes(object, receiver, holder, + scratch1, scratch2, name, miss); + + if (lookup->IsValid() && lookup->IsCacheable()) { + compiler->CompileCacheable(masm, + stub_compiler, + receiver, + reg, + scratch1, + scratch2, + holder, + lookup, + name, + miss); + } else { + compiler->CompileRegular(masm, + receiver, + reg, + scratch2, + holder, + miss); + } } -Object* CallStubCompiler::CompileCallField(Object* a, - JSObject* b, - int c, - String* d) { - UNIMPLEMENTED(); - return NULL; + +class CallInterceptorCompiler BASE_EMBEDDED { + public: + explicit CallInterceptorCompiler(const ParameterCount& arguments) + : arguments_(arguments), argc_(arguments.immediate()) {} + + void CompileCacheable(MacroAssembler* masm, + StubCompiler* stub_compiler, + Register receiver, + Register holder, + Register scratch1, + Register scratch2, + JSObject* holder_obj, + LookupResult* lookup, + String* name, + Label* miss_label) { + JSFunction* function = 0; + bool optimize = false; + // So far the most popular case for failed interceptor is + // CONSTANT_FUNCTION sitting below. + if (lookup->type() == CONSTANT_FUNCTION) { + function = lookup->GetConstantFunction(); + // JSArray holder is a special case for call constant function + // (see the corresponding code). + if (function->is_compiled() && !holder_obj->IsJSArray()) { + optimize = true; + } + } + + if (!optimize) { + CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label); + return; + } + + __ EnterInternalFrame(); + __ push(holder); // save the holder + + CompileCallLoadPropertyWithInterceptor( + masm, + receiver, + holder, + // Under EnterInternalFrame this refers to name. + Operand(rbp, (argc_ + 3) * kPointerSize), + holder_obj); + + __ pop(receiver); // restore holder + __ LeaveInternalFrame(); + + __ Cmp(rax, Factory::no_interceptor_result_sentinel()); + Label invoke; + __ j(not_equal, &invoke); + + stub_compiler->CheckPrototypes(holder_obj, receiver, + lookup->holder(), scratch1, + scratch2, + name, + miss_label); + if (lookup->holder()->IsGlobalObject()) { + __ movq(rdx, Operand(rsp, (argc_ + 1) * kPointerSize)); + __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset)); + __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdx); + } + + ASSERT(function->is_compiled()); + // Get the function and setup the context. + __ Move(rdi, Handle<JSFunction>(function)); + __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset)); + + // Jump to the cached code (tail call). + ASSERT(function->is_compiled()); + Handle<Code> code(function->code()); + ParameterCount expected(function->shared()->formal_parameter_count()); + __ InvokeCode(code, expected, arguments_, + RelocInfo::CODE_TARGET, JUMP_FUNCTION); + + __ bind(&invoke); + } + + void CompileRegular(MacroAssembler* masm, + Register receiver, + Register holder, + Register scratch, + JSObject* holder_obj, + Label* miss_label) { + __ EnterInternalFrame(); + + PushInterceptorArguments(masm, + receiver, + holder, + Operand(rbp, (argc_ + 3) * kPointerSize), + holder_obj); + + ExternalReference ref = ExternalReference( + IC_Utility(IC::kLoadPropertyWithInterceptorForCall)); + __ movq(rax, Immediate(5)); + __ movq(rbx, ref); + + CEntryStub stub; + __ CallStub(&stub); + + __ LeaveInternalFrame(); + } + + private: + const ParameterCount& arguments_; + int argc_; +}; + + +#undef __ + +#define __ ACCESS_MASM((masm())) + + +Object* CallStubCompiler::CompileCallConstant(Object* object, + JSObject* holder, + JSFunction* function, + String* name, + StubCompiler::CheckType check) { + // ----------- S t a t e ------------- + // ----------------------------------- + // rsp[0] return address + // rsp[8] argument argc + // rsp[16] argument argc - 1 + // ... + // rsp[argc * 8] argument 1 + // rsp[(argc + 1) * 8] argument 0 = reciever + // rsp[(argc + 2) * 8] function name + + Label miss; + + // Get the receiver from the stack. + const int argc = arguments().immediate(); + __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize)); + + // Check that the receiver isn't a smi. + if (check != NUMBER_CHECK) { + __ testl(rdx, Immediate(kSmiTagMask)); + __ j(zero, &miss); + } + + // Make sure that it's okay not to patch the on stack receiver + // unless we're doing a receiver map check. + ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK); + + switch (check) { + case RECEIVER_MAP_CHECK: + // Check that the maps haven't changed. + CheckPrototypes(JSObject::cast(object), rdx, holder, + rbx, rcx, name, &miss); + + // Patch the receiver on the stack with the global proxy if + // necessary. + if (object->IsGlobalObject()) { + __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset)); + __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx); + } + break; + + case STRING_CHECK: + // Check that the object is a two-byte string or a symbol. + __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rcx); + __ j(above_equal, &miss); + // Check that the maps starting from the prototype haven't changed. + GenerateLoadGlobalFunctionPrototype(masm(), + Context::STRING_FUNCTION_INDEX, + rcx); + CheckPrototypes(JSObject::cast(object->GetPrototype()), rcx, holder, + rbx, rdx, name, &miss); + break; + + case NUMBER_CHECK: { + Label fast; + // Check that the object is a smi or a heap number. + __ testl(rdx, Immediate(kSmiTagMask)); + __ j(zero, &fast); + __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx); + __ j(not_equal, &miss); + __ bind(&fast); + // Check that the maps starting from the prototype haven't changed. + GenerateLoadGlobalFunctionPrototype(masm(), + Context::NUMBER_FUNCTION_INDEX, + rcx); + CheckPrototypes(JSObject::cast(object->GetPrototype()), rcx, holder, + rbx, rdx, name, &miss); + break; + } + + case BOOLEAN_CHECK: { + Label fast; + // Check that the object is a boolean. + __ Cmp(rdx, Factory::true_value()); + __ j(equal, &fast); + __ Cmp(rdx, Factory::false_value()); + __ j(not_equal, &miss); + __ bind(&fast); + // Check that the maps starting from the prototype haven't changed. + GenerateLoadGlobalFunctionPrototype(masm(), + Context::BOOLEAN_FUNCTION_INDEX, + rcx); + CheckPrototypes(JSObject::cast(object->GetPrototype()), rcx, holder, + rbx, rdx, name, &miss); + break; + } + + case JSARRAY_HAS_FAST_ELEMENTS_CHECK: + CheckPrototypes(JSObject::cast(object), rdx, holder, + rbx, rcx, name, &miss); + // Make sure object->HasFastElements(). + // Get the elements array of the object. + __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset)); + // Check that the object is in fast mode (not dictionary). + __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset), + Factory::fixed_array_map()); + __ j(not_equal, &miss); + break; + + default: + UNREACHABLE(); + } + + // Get the function and setup the context. + __ Move(rdi, Handle<JSFunction>(function)); + __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset)); + + // Jump to the cached code (tail call). + ASSERT(function->is_compiled()); + Handle<Code> code(function->code()); + ParameterCount expected(function->shared()->formal_parameter_count()); + __ InvokeCode(code, expected, arguments(), + RelocInfo::CODE_TARGET, JUMP_FUNCTION); + + // Handle call cache miss. + __ bind(&miss); + Handle<Code> ic = ComputeCallMiss(arguments().immediate()); + __ Jump(ic, RelocInfo::CODE_TARGET); + + // Return the generated code. + String* function_name = NULL; + if (function->shared()->name()->IsString()) { + function_name = String::cast(function->shared()->name()); + } + return GetCode(CONSTANT_FUNCTION, function_name); } -Object* CallStubCompiler::CompileCallInterceptor(Object* a, - JSObject* b, - String* c) { - UNIMPLEMENTED(); - return NULL; +Object* CallStubCompiler::CompileCallField(Object* object, + JSObject* holder, + int index, + String* name) { + // ----------- S t a t e ------------- + // ----------------------------------- + // rsp[0] return address + // rsp[8] argument argc + // rsp[16] argument argc - 1 + // ... + // rsp[argc * 8] argument 1 + // rsp[(argc + 1) * 8] argument 0 = receiver + // rsp[(argc + 2) * 8] function name + Label miss; + + // Get the receiver from the stack. + const int argc = arguments().immediate(); + __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize)); + + // Check that the receiver isn't a smi. + __ testl(rdx, Immediate(kSmiTagMask)); + __ j(zero, &miss); + + // Do the right check and compute the holder register. + Register reg = + CheckPrototypes(JSObject::cast(object), rdx, holder, + rbx, rcx, name, &miss); + + GenerateFastPropertyLoad(masm(), rdi, reg, holder, index); + + // Check that the function really is a function. + __ testl(rdi, Immediate(kSmiTagMask)); + __ j(zero, &miss); + __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rbx); + __ j(not_equal, &miss); + + // Patch the receiver on the stack with the global proxy if + // necessary. + if (object->IsGlobalObject()) { + __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset)); + __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx); + } + + // Invoke the function. + __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION); + + // Handle call cache miss. + __ bind(&miss); + Handle<Code> ic = ComputeCallMiss(arguments().immediate()); + __ Jump(ic, RelocInfo::CODE_TARGET); + + // Return the generated code. + return GetCode(FIELD, name); +} + + +Object* CallStubCompiler::CompileCallInterceptor(Object* object, + JSObject* holder, + String* name) { + // ----------- S t a t e ------------- + // ----------------------------------- + Label miss; + + // Get the number of arguments. + const int argc = arguments().immediate(); + + LookupResult lookup; + LookupPostInterceptor(holder, name, &lookup); + + // Get the receiver from the stack. + __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize)); + + CallInterceptorCompiler compiler(arguments()); + CompileLoadInterceptor(&compiler, + this, + masm(), + JSObject::cast(object), + holder, + name, + &lookup, + rdx, + rbx, + rcx, + &miss); + + // Restore receiver. + __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize)); + + // Check that the function really is a function. + __ testl(rax, Immediate(kSmiTagMask)); + __ j(zero, &miss); + __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx); + __ j(not_equal, &miss); + + // Patch the receiver on the stack with the global proxy if + // necessary. + if (object->IsGlobalObject()) { + __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset)); + __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx); + } + + // Invoke the function. + __ movq(rdi, rax); + __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION); + + // Handle load cache miss. + __ bind(&miss); + Handle<Code> ic = ComputeCallMiss(argc); + __ Jump(ic, RelocInfo::CODE_TARGET); + + // Return the generated code. + return GetCode(INTERCEPTOR, name); } @@ -71,43 +931,165 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object, JSGlobalPropertyCell* cell, JSFunction* function, String* name) { - UNIMPLEMENTED(); - return NULL; + // ----------- S t a t e ------------- + // ----------------------------------- + // rsp[0] return address + // rsp[8] argument argc + // rsp[16] argument argc - 1 + // ... + // rsp[argc * 8] argument 1 + // rsp[(argc + 1) * 8] argument 0 = receiver + // rsp[(argc + 2) * 8] function name + Label miss; + + // Get the number of arguments. + const int argc = arguments().immediate(); + + // Get the receiver from the stack. + __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize)); + + // If the object is the holder then we know that it's a global + // object which can only happen for contextual calls. In this case, + // the receiver cannot be a smi. + if (object != holder) { + __ testl(rdx, Immediate(kSmiTagMask)); + __ j(zero, &miss); + } + + // Check that the maps haven't changed. + CheckPrototypes(object, rdx, holder, rbx, rcx, name, &miss); + + // Get the value from the cell. + __ Move(rdi, Handle<JSGlobalPropertyCell>(cell)); + __ movq(rdi, FieldOperand(rdi, JSGlobalPropertyCell::kValueOffset)); + + // Check that the cell contains the same function. + __ Cmp(rdi, Handle<JSFunction>(function)); + __ j(not_equal, &miss); + + // Patch the receiver on the stack with the global proxy. + if (object->IsGlobalObject()) { + __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset)); + __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx); + } + + // Setup the context (function already in edi). + __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset)); + + // Jump to the cached code (tail call). + __ IncrementCounter(&Counters::call_global_inline, 1); + ASSERT(function->is_compiled()); + Handle<Code> code(function->code()); + ParameterCount expected(function->shared()->formal_parameter_count()); + __ InvokeCode(code, expected, arguments(), + RelocInfo::CODE_TARGET, JUMP_FUNCTION); + + // Handle call cache miss. + __ bind(&miss); + __ IncrementCounter(&Counters::call_global_inline_miss, 1); + Handle<Code> ic = ComputeCallMiss(arguments().immediate()); + __ Jump(ic, RelocInfo::CODE_TARGET); + + // Return the generated code. + return GetCode(NORMAL, name); } -Object* LoadStubCompiler::CompileLoadCallback(JSObject* a, - JSObject* b, - AccessorInfo* c, - String* d) { - UNIMPLEMENTED(); - return NULL; +Object* LoadStubCompiler::CompileLoadCallback(JSObject* object, + JSObject* holder, + AccessorInfo* callback, + String* name) { + // ----------- S t a t e ------------- + // -- rcx : name + // -- rsp[0] : return address + // -- rsp[8] : receiver + // ----------------------------------- + Label miss; + + __ movq(rax, Operand(rsp, kPointerSize)); + GenerateLoadCallback(object, holder, rax, rcx, rbx, rdx, + callback, name, &miss); + __ bind(&miss); + GenerateLoadMiss(masm(), Code::LOAD_IC); + + // Return the generated code. + return GetCode(CALLBACKS, name); } -Object* LoadStubCompiler::CompileLoadConstant(JSObject* a, - JSObject* b, - Object* c, - String* d) { - UNIMPLEMENTED(); - return NULL; +Object* LoadStubCompiler::CompileLoadConstant(JSObject* object, + JSObject* holder, + Object* value, + String* name) { + // ----------- S t a t e ------------- + // -- rcx : name + // -- rsp[0] : return address + // -- rsp[8] : receiver + // ----------------------------------- + Label miss; + + __ movq(rax, Operand(rsp, kPointerSize)); + GenerateLoadConstant(object, holder, rax, rbx, rdx, value, name, &miss); + __ bind(&miss); + GenerateLoadMiss(masm(), Code::LOAD_IC); + + // Return the generated code. + return GetCode(CONSTANT_FUNCTION, name); } -Object* LoadStubCompiler::CompileLoadField(JSObject* a, - JSObject* b, - int c, - String* d) { - UNIMPLEMENTED(); - return NULL; +Object* LoadStubCompiler::CompileLoadField(JSObject* object, + JSObject* holder, + int index, + String* name) { + // ----------- S t a t e ------------- + // -- rcx : name + // -- rsp[0] : return address + // -- rsp[8] : receiver + // ----------------------------------- + Label miss; + + __ movq(rax, Operand(rsp, kPointerSize)); + GenerateLoadField(object, holder, rax, rbx, rdx, index, name, &miss); + __ bind(&miss); + GenerateLoadMiss(masm(), Code::LOAD_IC); + + // Return the generated code. + return GetCode(FIELD, name); } -Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* a, - JSObject* b, - String* c) { - UNIMPLEMENTED(); - return NULL; +Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver, + JSObject* holder, + String* name) { + // ----------- S t a t e ------------- + // -- rcx : name + // -- rsp[0] : return address + // -- rsp[8] : receiver + // ----------------------------------- + Label miss; + + LookupResult lookup; + LookupPostInterceptor(holder, name, &lookup); + + __ movq(rax, Operand(rsp, kPointerSize)); + // TODO(368): Compile in the whole chain: all the interceptors in + // prototypes and ultimate answer. + GenerateLoadInterceptor(receiver, + holder, + &lookup, + rax, + rcx, + rdx, + rbx, + name, + &miss); + + __ bind(&miss); + GenerateLoadMiss(masm(), Code::LOAD_IC); + + // Return the generated code. + return GetCode(INTERCEPTOR, name); } @@ -116,39 +1098,477 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object, JSGlobalPropertyCell* cell, String* name, bool is_dont_delete) { - UNIMPLEMENTED(); - return NULL; + // ----------- S t a t e ------------- + // -- rcx : name + // -- rsp[0] : return address + // -- rsp[8] : receiver + // ----------------------------------- + Label miss; + + // Get the receiver from the stack. + __ movq(rax, Operand(rsp, kPointerSize)); + + // If the object is the holder then we know that it's a global + // object which can only happen for contextual loads. In this case, + // the receiver cannot be a smi. + if (object != holder) { + __ testl(rax, Immediate(kSmiTagMask)); + __ j(zero, &miss); + } + + // Check that the maps haven't changed. + CheckPrototypes(object, rax, holder, rbx, rdx, name, &miss); + + // Get the value from the cell. + __ Move(rax, Handle<JSGlobalPropertyCell>(cell)); + __ movq(rax, FieldOperand(rax, JSGlobalPropertyCell::kValueOffset)); + + // Check for deleted property if property can actually be deleted. + if (!is_dont_delete) { + __ Cmp(rax, Factory::the_hole_value()); + __ j(equal, &miss); + } else if (FLAG_debug_code) { + __ Cmp(rax, Factory::the_hole_value()); + __ Check(not_equal, "DontDelete cells can't contain the hole"); + } + + __ IncrementCounter(&Counters::named_load_global_inline, 1); + __ ret(0); + + __ bind(&miss); + __ IncrementCounter(&Counters::named_load_global_inline_miss, 1); + GenerateLoadMiss(masm(), Code::LOAD_IC); + + // Return the generated code. + return GetCode(NORMAL, name); } -Object* StoreStubCompiler::CompileStoreCallback(JSObject* a, - AccessorInfo* b, - String* c) { - UNIMPLEMENTED(); - return NULL; +Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name, + JSObject* receiver, + JSObject* holder, + AccessorInfo* callback) { + // ----------- S t a t e ------------- + // -- rsp[0] : return address + // -- rsp[8] : name + // -- rsp[16] : receiver + // ----------------------------------- + Label miss; + + __ movq(rax, Operand(rsp, kPointerSize)); + __ movq(rcx, Operand(rsp, 2 * kPointerSize)); + __ IncrementCounter(&Counters::keyed_load_callback, 1); + + // Check that the name has not changed. + __ Cmp(rax, Handle<String>(name)); + __ j(not_equal, &miss); + + GenerateLoadCallback(receiver, holder, rcx, rax, rbx, rdx, + callback, name, &miss); + __ bind(&miss); + __ DecrementCounter(&Counters::keyed_load_callback, 1); + GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); + + // Return the generated code. + return GetCode(CALLBACKS, name); } -Object* StoreStubCompiler::CompileStoreField(JSObject* a, - int b, - Map* c, - String* d) { - UNIMPLEMENTED(); - return NULL; +Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) { + // ----------- S t a t e ------------- + // -- rsp[0] : return address + // -- rsp[8] : name + // -- rsp[16] : receiver + // ----------------------------------- + Label miss; + + __ movq(rax, Operand(rsp, kPointerSize)); + __ movq(rcx, Operand(rsp, 2 * kPointerSize)); + __ IncrementCounter(&Counters::keyed_load_array_length, 1); + + // Check that the name has not changed. + __ Cmp(rax, Handle<String>(name)); + __ j(not_equal, &miss); + + GenerateLoadArrayLength(masm(), rcx, rdx, &miss); + __ bind(&miss); + __ DecrementCounter(&Counters::keyed_load_array_length, 1); + GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); + + // Return the generated code. + return GetCode(CALLBACKS, name); } -Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* a, String* b) { - UNIMPLEMENTED(); - return NULL; +Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name, + JSObject* receiver, + JSObject* holder, + Object* value) { + // ----------- S t a t e ------------- + // -- rsp[0] : return address + // -- rsp[8] : name + // -- rsp[16] : receiver + // ----------------------------------- + Label miss; + + __ movq(rax, Operand(rsp, kPointerSize)); + __ movq(rcx, Operand(rsp, 2 * kPointerSize)); + __ IncrementCounter(&Counters::keyed_load_constant_function, 1); + + // Check that the name has not changed. + __ Cmp(rax, Handle<String>(name)); + __ j(not_equal, &miss); + + GenerateLoadConstant(receiver, holder, rcx, rbx, rdx, + value, name, &miss); + __ bind(&miss); + __ DecrementCounter(&Counters::keyed_load_constant_function, 1); + GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); + + // Return the generated code. + return GetCode(CONSTANT_FUNCTION, name); +} + + +Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) { + // ----------- S t a t e ------------- + // -- rsp[0] : return address + // -- rsp[8] : name + // -- rsp[16] : receiver + // ----------------------------------- + Label miss; + + __ movq(rax, Operand(rsp, kPointerSize)); + __ movq(rcx, Operand(rsp, 2 * kPointerSize)); + __ IncrementCounter(&Counters::keyed_load_function_prototype, 1); + + // Check that the name has not changed. + __ Cmp(rax, Handle<String>(name)); + __ j(not_equal, &miss); + + GenerateLoadFunctionPrototype(masm(), rcx, rdx, rbx, &miss); + __ bind(&miss); + __ DecrementCounter(&Counters::keyed_load_function_prototype, 1); + GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); + + // Return the generated code. + return GetCode(CALLBACKS, name); +} + + +Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver, + JSObject* holder, + String* name) { + // ----------- S t a t e ------------- + // -- rsp[0] : return address + // -- rsp[8] : name + // -- rsp[16] : receiver + // ----------------------------------- + Label miss; + + __ movq(rax, Operand(rsp, kPointerSize)); + __ movq(rcx, Operand(rsp, 2 * kPointerSize)); + __ IncrementCounter(&Counters::keyed_load_interceptor, 1); + + // Check that the name has not changed. + __ Cmp(rax, Handle<String>(name)); + __ j(not_equal, &miss); + + LookupResult lookup; + LookupPostInterceptor(holder, name, &lookup); + GenerateLoadInterceptor(receiver, + holder, + &lookup, + rcx, + rax, + rdx, + rbx, + name, + &miss); + __ bind(&miss); + __ DecrementCounter(&Counters::keyed_load_interceptor, 1); + GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); + + // Return the generated code. + return GetCode(INTERCEPTOR, name); +} + + +Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) { + // ----------- S t a t e ------------- + // -- rsp[0] : return address + // -- rsp[8] : name + // -- rsp[16] : receiver + // ----------------------------------- + Label miss; + + __ movq(rax, Operand(rsp, kPointerSize)); + __ movq(rcx, Operand(rsp, 2 * kPointerSize)); + __ IncrementCounter(&Counters::keyed_load_string_length, 1); + + // Check that the name has not changed. + __ Cmp(rax, Handle<String>(name)); + __ j(not_equal, &miss); + + GenerateLoadStringLength(masm(), rcx, rdx, &miss); + __ bind(&miss); + __ DecrementCounter(&Counters::keyed_load_string_length, 1); + GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); + + // Return the generated code. + return GetCode(CALLBACKS, name); +} + + +Object* StoreStubCompiler::CompileStoreCallback(JSObject* object, + AccessorInfo* callback, + String* name) { + // ----------- S t a t e ------------- + // -- rax : value + // -- rcx : name + // -- rsp[0] : return address + // -- rsp[8] : receiver + // ----------------------------------- + Label miss; + + // Get the object from the stack. + __ movq(rbx, Operand(rsp, 1 * kPointerSize)); + + // Check that the object isn't a smi. + __ testl(rbx, Immediate(kSmiTagMask)); + __ j(zero, &miss); + + // Check that the map of the object hasn't changed. + __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset), + Handle<Map>(object->map())); + __ j(not_equal, &miss); + + // Perform global security token check if needed. + if (object->IsJSGlobalProxy()) { + __ CheckAccessGlobalProxy(rbx, rdx, &miss); + } + + // Stub never generated for non-global objects that require access + // checks. + ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); + + __ pop(rbx); // remove the return address + __ push(Operand(rsp, 0)); // receiver + __ Push(Handle<AccessorInfo>(callback)); // callback info + __ push(rcx); // name + __ push(rax); // value + __ push(rbx); // restore return address + + // Do tail-call to the runtime system. + ExternalReference store_callback_property = + ExternalReference(IC_Utility(IC::kStoreCallbackProperty)); + __ TailCallRuntime(store_callback_property, 4); + + // Handle store cache miss. + __ bind(&miss); + __ Move(rcx, Handle<String>(name)); // restore name + Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss)); + __ Jump(ic, RelocInfo::CODE_TARGET); + + // Return the generated code. + return GetCode(CALLBACKS, name); +} + + +Object* StoreStubCompiler::CompileStoreField(JSObject* object, + int index, + Map* transition, + String* name) { + // ----------- S t a t e ------------- + // -- rax : value + // -- rcx : name + // -- rsp[0] : return address + // -- rsp[8] : receiver + // ----------------------------------- + Label miss; + + // Get the object from the stack. + __ movq(rbx, Operand(rsp, 1 * kPointerSize)); + + // Generate store field code. Trashes the name register. + GenerateStoreField(masm(), + Builtins::StoreIC_ExtendStorage, + object, + index, + transition, + rbx, rcx, rdx, + &miss); + + // Handle store cache miss. + __ bind(&miss); + __ Move(rcx, Handle<String>(name)); // restore name + Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss)); + __ Jump(ic, RelocInfo::CODE_TARGET); + + // Return the generated code. + return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name); +} + + +Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver, + String* name) { + // ----------- S t a t e ------------- + // -- rax : value + // -- rcx : name + // -- rsp[0] : return address + // -- rsp[8] : receiver + // ----------------------------------- + Label miss; + + // Get the object from the stack. + __ movq(rbx, Operand(rsp, 1 * kPointerSize)); + + // Check that the object isn't a smi. + __ testl(rbx, Immediate(kSmiTagMask)); + __ j(zero, &miss); + + // Check that the map of the object hasn't changed. + __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset), + Handle<Map>(receiver->map())); + __ j(not_equal, &miss); + + // Perform global security token check if needed. + if (receiver->IsJSGlobalProxy()) { + __ CheckAccessGlobalProxy(rbx, rdx, &miss); + } + + // Stub never generated for non-global objects that require access + // checks. + ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded()); + + __ pop(rbx); // remove the return address + __ push(Operand(rsp, 0)); // receiver + __ push(rcx); // name + __ push(rax); // value + __ push(rbx); // restore return address + + // Do tail-call to the runtime system. + ExternalReference store_ic_property = + ExternalReference(IC_Utility(IC::kStoreInterceptorProperty)); + __ TailCallRuntime(store_ic_property, 3); + + // Handle store cache miss. + __ bind(&miss); + __ Move(rcx, Handle<String>(name)); // restore name + Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss)); + __ Jump(ic, RelocInfo::CODE_TARGET); + + // Return the generated code. + return GetCode(INTERCEPTOR, name); } Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object, JSGlobalPropertyCell* cell, String* name) { - UNIMPLEMENTED(); - return NULL; + // ----------- S t a t e ------------- + // -- rax : value + // -- rcx : name + // -- rsp[0] : return address + // -- rsp[8] : receiver + // ----------------------------------- + Label miss; + + // Check that the map of the global has not changed. + __ movq(rbx, Operand(rsp, kPointerSize)); + __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset), + Handle<Map>(object->map())); + __ j(not_equal, &miss); + + // Store the value in the cell. + __ Move(rcx, Handle<JSGlobalPropertyCell>(cell)); + __ movq(FieldOperand(rcx, JSGlobalPropertyCell::kValueOffset), rax); + + // Return the value (register rax). + __ IncrementCounter(&Counters::named_store_global_inline, 1); + __ ret(0); + + // Handle store cache miss. + __ bind(&miss); + __ IncrementCounter(&Counters::named_store_global_inline_miss, 1); + Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss)); + __ Jump(ic, RelocInfo::CODE_TARGET); + + // Return the generated code. + return GetCode(NORMAL, name); +} + + +Object* KeyedLoadStubCompiler::CompileLoadField(String* name, + JSObject* receiver, + JSObject* holder, + int index) { + // ----------- S t a t e ------------- + // -- rsp[0] : return address + // -- rsp[8] : name + // -- rsp[16] : receiver + // ----------------------------------- + Label miss; + + __ movq(rax, Operand(rsp, kPointerSize)); + __ movq(rcx, Operand(rsp, 2 * kPointerSize)); + __ IncrementCounter(&Counters::keyed_load_field, 1); + + // Check that the name has not changed. + __ Cmp(rax, Handle<String>(name)); + __ j(not_equal, &miss); + + GenerateLoadField(receiver, holder, rcx, rbx, rdx, index, name, &miss); + + __ bind(&miss); + __ DecrementCounter(&Counters::keyed_load_field, 1); + GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); + + // Return the generated code. + return GetCode(FIELD, name); +} + + +Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object, + int index, + Map* transition, + String* name) { + // ----------- S t a t e ------------- + // -- rax : value + // -- rsp[0] : return address + // -- rsp[8] : key + // -- rsp[16] : receiver + // ----------------------------------- + Label miss; + + __ IncrementCounter(&Counters::keyed_store_field, 1); + + // Get the name from the stack. + __ movq(rcx, Operand(rsp, 1 * kPointerSize)); + // Check that the name has not changed. + __ Cmp(rcx, Handle<String>(name)); + __ j(not_equal, &miss); + + // Get the object from the stack. + __ movq(rbx, Operand(rsp, 2 * kPointerSize)); + + // Generate store field code. Trashes the name register. + GenerateStoreField(masm(), + Builtins::KeyedStoreIC_ExtendStorage, + object, + index, + transition, + rbx, rcx, rdx, + &miss); + + // Handle store cache miss. + __ bind(&miss); + __ DecrementCounter(&Counters::keyed_store_field, 1); + Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss)); + __ Jump(ic, RelocInfo::CODE_TARGET); + + // Return the generated code. + return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name); } @@ -175,7 +1595,149 @@ Object* StubCompiler::CompileLazyCompile(Code::Flags flags) { return GetCodeWithFlags(flags, "LazyCompileStub"); } -#undef __ +void StubCompiler::GenerateLoadInterceptor(JSObject* object, + JSObject* holder, + LookupResult* lookup, + Register receiver, + Register name_reg, + Register scratch1, + Register scratch2, + String* name, + Label* miss) { + LoadInterceptorCompiler compiler(name_reg); + CompileLoadInterceptor(&compiler, + this, + masm(), + object, + holder, + name, + lookup, + receiver, + scratch1, + scratch2, + miss); +} + + +void StubCompiler::GenerateLoadCallback(JSObject* object, + JSObject* holder, + Register receiver, + Register name_reg, + Register scratch1, + Register scratch2, + AccessorInfo* callback, + String* name, + Label* miss) { + // Check that the receiver isn't a smi. + __ testl(receiver, Immediate(kSmiTagMask)); + __ j(zero, miss); + + // Check that the maps haven't changed. + Register reg = + CheckPrototypes(object, receiver, holder, + scratch1, scratch2, name, miss); + + // Push the arguments on the JS stack of the caller. + __ pop(scratch2); // remove return address + __ push(receiver); // receiver + __ push(reg); // holder + __ Move(reg, Handle<AccessorInfo>(callback)); // callback data + __ push(reg); + __ push(FieldOperand(reg, AccessorInfo::kDataOffset)); + __ push(name_reg); // name + __ push(scratch2); // restore return address + + // Do tail-call to the runtime system. + ExternalReference load_callback_property = + ExternalReference(IC_Utility(IC::kLoadCallbackProperty)); + __ TailCallRuntime(load_callback_property, 5); +} + + +Register StubCompiler::CheckPrototypes(JSObject* object, + Register object_reg, + JSObject* holder, + Register holder_reg, + Register scratch, + String* name, + Label* miss) { + // Check that the maps haven't changed. + Register result = + __ CheckMaps(object, object_reg, holder, holder_reg, scratch, miss); + + // If we've skipped any global objects, it's not enough to verify + // that their maps haven't changed. + while (object != holder) { + if (object->IsGlobalObject()) { + GlobalObject* global = GlobalObject::cast(object); + Object* probe = global->EnsurePropertyCell(name); + if (probe->IsFailure()) { + set_failure(Failure::cast(probe)); + return result; + } + JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(probe); + ASSERT(cell->value()->IsTheHole()); + __ Move(scratch, Handle<Object>(cell)); + __ Cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset), + Factory::the_hole_value()); + __ j(not_equal, miss); + } + object = JSObject::cast(object->GetPrototype()); + } + + // Return the register containing the holder. + return result; +} + + +void StubCompiler::GenerateLoadField(JSObject* object, + JSObject* holder, + Register receiver, + Register scratch1, + Register scratch2, + int index, + String* name, + Label* miss) { + // Check that the receiver isn't a smi. + __ testl(receiver, Immediate(kSmiTagMask)); + __ j(zero, miss); + + // Check the prototype chain. + Register reg = + CheckPrototypes(object, receiver, holder, + scratch1, scratch2, name, miss); + + // Get the value from the properties. + GenerateFastPropertyLoad(masm(), rax, reg, holder, index); + __ ret(0); +} + + +void StubCompiler::GenerateLoadConstant(JSObject* object, + JSObject* holder, + Register receiver, + Register scratch1, + Register scratch2, + Object* value, + String* name, + Label* miss) { + // Check that the receiver isn't a smi. + __ testl(receiver, Immediate(kSmiTagMask)); + __ j(zero, miss); + + // Check that the maps haven't changed. + Register reg = + CheckPrototypes(object, receiver, holder, + scratch1, scratch2, name, miss); + + // Return the constant value. + __ Move(rax, Handle<Object>(value)); + __ ret(0); +} + + +#undef __ + } } // namespace v8::internal diff --git a/deps/v8/src/zone-inl.h b/deps/v8/src/zone-inl.h index 9af6251bf1..b3141a4810 100644 --- a/deps/v8/src/zone-inl.h +++ b/deps/v8/src/zone-inl.h @@ -68,6 +68,223 @@ void Zone::adjust_segment_bytes_allocated(int delta) { } +template <typename C> +bool ZoneSplayTree<C>::Insert(const Key& key, Locator* locator) { + if (is_empty()) { + // If the tree is empty, insert the new node. + root_ = new Node(key, C::kNoValue); + } else { + // Splay on the key to move the last node on the search path + // for the key to the root of the tree. + Splay(key); + // Ignore repeated insertions with the same key. + int cmp = C::Compare(key, root_->key_); + if (cmp == 0) { + locator->bind(root_); + return false; + } + // Insert the new node. + Node* node = new Node(key, C::kNoValue); + if (cmp > 0) { + node->left_ = root_; + node->right_ = root_->right_; + root_->right_ = NULL; + } else { + node->right_ = root_; + node->left_ = root_->left_; + root_->left_ = NULL; + } + root_ = node; + } + locator->bind(root_); + return true; +} + + +template <typename C> +bool ZoneSplayTree<C>::Find(const Key& key, Locator* locator) { + if (is_empty()) + return false; + Splay(key); + if (C::Compare(key, root_->key_) == 0) { + locator->bind(root_); + return true; + } else { + return false; + } +} + + +template <typename C> +bool ZoneSplayTree<C>::FindGreatestLessThan(const Key& key, + Locator* locator) { + if (is_empty()) + return false; + // Splay on the key to move the node with the given key or the last + // node on the search path to the top of the tree. + Splay(key); + // Now the result is either the root node or the greatest node in + // the left subtree. + int cmp = C::Compare(root_->key_, key); + if (cmp <= 0) { + locator->bind(root_); + return true; + } else { + Node* temp = root_; + root_ = root_->left_; + bool result = FindGreatest(locator); + root_ = temp; + return result; + } +} + + +template <typename C> +bool ZoneSplayTree<C>::FindLeastGreaterThan(const Key& key, + Locator* locator) { + if (is_empty()) + return false; + // Splay on the key to move the node with the given key or the last + // node on the search path to the top of the tree. + Splay(key); + // Now the result is either the root node or the least node in + // the right subtree. + int cmp = C::Compare(root_->key_, key); + if (cmp >= 0) { + locator->bind(root_); + return true; + } else { + Node* temp = root_; + root_ = root_->right_; + bool result = FindLeast(locator); + root_ = temp; + return result; + } +} + + +template <typename C> +bool ZoneSplayTree<C>::FindGreatest(Locator* locator) { + if (is_empty()) + return false; + Node* current = root_; + while (current->right_ != NULL) + current = current->right_; + locator->bind(current); + return true; +} + + +template <typename C> +bool ZoneSplayTree<C>::FindLeast(Locator* locator) { + if (is_empty()) + return false; + Node* current = root_; + while (current->left_ != NULL) + current = current->left_; + locator->bind(current); + return true; +} + + +template <typename C> +bool ZoneSplayTree<C>::Remove(const Key& key) { + // Bail if the tree is empty + if (is_empty()) + return false; + // Splay on the key to move the node with the given key to the top. + Splay(key); + // Bail if the key is not in the tree + if (C::Compare(key, root_->key_) != 0) + return false; + if (root_->left_ == NULL) { + // No left child, so the new tree is just the right child. + root_ = root_->right_; + } else { + // Left child exists. + Node* right = root_->right_; + // Make the original left child the new root. + root_ = root_->left_; + // Splay to make sure that the new root has an empty right child. + Splay(key); + // Insert the original right child as the right child of the new + // root. + root_->right_ = right; + } + return true; +} + + +template <typename C> +void ZoneSplayTree<C>::Splay(const Key& key) { + if (is_empty()) + return; + Node dummy_node(C::kNoKey, C::kNoValue); + // Create a dummy node. The use of the dummy node is a bit + // counter-intuitive: The right child of the dummy node will hold + // the L tree of the algorithm. The left child of the dummy node + // will hold the R tree of the algorithm. Using a dummy node, left + // and right will always be nodes and we avoid special cases. + Node* dummy = &dummy_node; + Node* left = dummy; + Node* right = dummy; + Node* current = root_; + while (true) { + int cmp = C::Compare(key, current->key_); + if (cmp < 0) { + if (current->left_ == NULL) + break; + if (C::Compare(key, current->left_->key_) < 0) { + // Rotate right. + Node* temp = current->left_; + current->left_ = temp->right_; + temp->right_ = current; + current = temp; + if (current->left_ == NULL) + break; + } + // Link right. + right->left_ = current; + right = current; + current = current->left_; + } else if (cmp > 0) { + if (current->right_ == NULL) + break; + if (C::Compare(key, current->right_->key_) > 0) { + // Rotate left. + Node* temp = current->right_; + current->right_ = temp->left_; + temp->left_ = current; + current = temp; + if (current->right_ == NULL) + break; + } + // Link left. + left->right_ = current; + left = current; + current = current->right_; + } else { + break; + } + } + // Assemble. + left->right_ = current->left_; + right->left_ = current->right_; + current->left_ = dummy->right_; + current->right_ = dummy->left_; + root_ = current; +} + + +template <typename Node, class Callback> +static void DoForEach(Node* node, Callback* callback) { + if (node == NULL) return; + DoForEach<Node, Callback>(node->left(), callback); + callback->Call(node->key(), node->value()); + DoForEach<Node, Callback>(node->right(), callback); +} + + } } // namespace v8::internal #endif // V8_ZONE_INL_H_ diff --git a/deps/v8/src/zone.cc b/deps/v8/src/zone.cc index d78c19b891..33fe5571f1 100644 --- a/deps/v8/src/zone.cc +++ b/deps/v8/src/zone.cc @@ -176,7 +176,10 @@ Address Zone::NewExpand(int size) { new_size = Max(kSegmentOverhead + size, kMaximumSegmentSize); } Segment* segment = Segment::New(new_size); - if (segment == NULL) V8::FatalProcessOutOfMemory("Zone"); + if (segment == NULL) { + V8::FatalProcessOutOfMemory("Zone"); + return NULL; + } // Recompute 'top' and 'limit' based on the new segment. Address result = RoundUp(segment->start(), kAlignment); diff --git a/deps/v8/src/zone.h b/deps/v8/src/zone.h index a8b26e9fd2..cdbab32821 100644 --- a/deps/v8/src/zone.h +++ b/deps/v8/src/zone.h @@ -204,6 +204,108 @@ class ZoneScope BASE_EMBEDDED { }; +template <typename Node, class Callback> +static void DoForEach(Node* node, Callback* callback); + + +// A zone splay tree. The config type parameter encapsulates the +// different configurations of a concrete splay tree: +// +// typedef Key: the key type +// typedef Value: the value type +// static const kNoKey: the dummy key used when no key is set +// static const kNoValue: the dummy value used to initialize nodes +// int (Compare)(Key& a, Key& b) -> {-1, 0, 1}: comparison function +// +template <typename Config> +class ZoneSplayTree : public ZoneObject { + public: + typedef typename Config::Key Key; + typedef typename Config::Value Value; + + class Locator; + + ZoneSplayTree() : root_(NULL) { } + + // Inserts the given key in this tree with the given value. Returns + // true if a node was inserted, otherwise false. If found the locator + // is enabled and provides access to the mapping for the key. + bool Insert(const Key& key, Locator* locator); + + // Looks up the key in this tree and returns true if it was found, + // otherwise false. If the node is found the locator is enabled and + // provides access to the mapping for the key. + bool Find(const Key& key, Locator* locator); + + // Finds the mapping with the greatest key less than or equal to the + // given key. + bool FindGreatestLessThan(const Key& key, Locator* locator); + + // Find the mapping with the greatest key in this tree. + bool FindGreatest(Locator* locator); + + // Finds the mapping with the least key greater than or equal to the + // given key. + bool FindLeastGreaterThan(const Key& key, Locator* locator); + + // Find the mapping with the least key in this tree. + bool FindLeast(Locator* locator); + + // Remove the node with the given key from the tree. + bool Remove(const Key& key); + + bool is_empty() { return root_ == NULL; } + + // Perform the splay operation for the given key. Moves the node with + // the given key to the top of the tree. If no node has the given + // key, the last node on the search path is moved to the top of the + // tree. + void Splay(const Key& key); + + class Node : public ZoneObject { + public: + Node(const Key& key, const Value& value) + : key_(key), + value_(value), + left_(NULL), + right_(NULL) { } + Key key() { return key_; } + Value value() { return value_; } + Node* left() { return left_; } + Node* right() { return right_; } + private: + friend class ZoneSplayTree; + friend class Locator; + Key key_; + Value value_; + Node* left_; + Node* right_; + }; + + // A locator provides access to a node in the tree without actually + // exposing the node. + class Locator { + public: + explicit Locator(Node* node) : node_(node) { } + Locator() : node_(NULL) { } + const Key& key() { return node_->key_; } + Value& value() { return node_->value_; } + void set_value(const Value& value) { node_->value_ = value; } + inline void bind(Node* node) { node_ = node; } + private: + Node* node_; + }; + + template <class Callback> + void ForEach(Callback* c) { + DoForEach<typename ZoneSplayTree<Config>::Node, Callback>(root_, c); + } + + private: + Node* root_; +}; + + } } // namespace v8::internal #endif // V8_ZONE_H_ diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status index bb82fc83c6..4fc2f3a5ee 100644 --- a/deps/v8/test/cctest/cctest.status +++ b/deps/v8/test/cctest/cctest.status @@ -63,7 +63,7 @@ test-api/TryCatchInTryFinally: FAIL [ $arch == x64 ] -test-regexp/Graph: CRASH || FAIL +test-regexp/Graph: PASS || CRASH || FAIL test-decls/Present: CRASH || FAIL test-decls/Unknown: CRASH || FAIL test-decls/Appearing: CRASH || FAIL @@ -108,17 +108,17 @@ test-debug/StepWithException: CRASH || FAIL test-debug/DebugBreak: CRASH || FAIL test-debug/DisableBreak: CRASH || FAIL test-debug/MessageQueues: CRASH || FAIL -test-debug/CallFunctionInDebugger: CRASH || FAIL +test-debug/CallFunctionInDebugger: SKIP test-debug/RecursiveBreakpoints: CRASH || FAIL test-debug/DebuggerUnload: CRASH || FAIL -test-debug/DebuggerClearMessageHandler: CRASH || FAIL -test-debug/DebuggerClearMessageHandlerWhileActive: CRASH || FAIL test-debug/DebuggerHostDispatch: CRASH || FAIL test-debug/DebugBreakInMessageHandler: CRASH || FAIL +test-debug/NoDebugBreakInAfterCompileMessageHandler: CRASH || FAIL test-api/HugeConsStringOutOfMemory: CRASH || FAIL test-api/OutOfMemory: CRASH || FAIL test-api/OutOfMemoryNested: CRASH || FAIL test-api/Threading: CRASH || FAIL +test-api/Threading2: PASS || TIMEOUT test-api/TryCatchSourceInfo: CRASH || FAIL -test-api/RegExpInterruption: CRASH || FAIL -test-api/RegExpStringModification: CRASH || FAIL +test-api/RegExpInterruption: PASS || TIMEOUT +test-api/RegExpStringModification: PASS || TIMEOUT diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc index 5b04b2cd8e..e3f52b4769 100644 --- a/deps/v8/test/cctest/test-api.cc +++ b/deps/v8/test/cctest/test-api.cc @@ -1266,6 +1266,38 @@ THREADED_TEST(InternalFields) { } +THREADED_TEST(InternalFieldsNativePointers) { + v8::HandleScope scope; + LocalContext env; + + Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(); + Local<v8::ObjectTemplate> instance_templ = templ->InstanceTemplate(); + instance_templ->SetInternalFieldCount(1); + Local<v8::Object> obj = templ->GetFunction()->NewInstance(); + CHECK_EQ(1, obj->InternalFieldCount()); + CHECK(obj->GetPointerFromInternalField(0) == NULL); + + char* data = new char[100]; + + void* aligned = data; + CHECK_EQ(0, reinterpret_cast<uintptr_t>(aligned) & 0x1); + void* unaligned = data + 1; + CHECK_EQ(1, reinterpret_cast<uintptr_t>(unaligned) & 0x1); + + // Check reading and writing aligned pointers. + obj->SetPointerInInternalField(0, aligned); + i::Heap::CollectAllGarbage(); + CHECK_EQ(aligned, obj->GetPointerFromInternalField(0)); + + // Check reading and writing unaligned pointers. + obj->SetPointerInInternalField(0, unaligned); + i::Heap::CollectAllGarbage(); + CHECK_EQ(unaligned, obj->GetPointerFromInternalField(0)); + + delete[] data; +} + + THREADED_TEST(IdentityHash) { v8::HandleScope scope; LocalContext env; @@ -5024,6 +5056,236 @@ THREADED_TEST(InterceptorLoadICWithOverride) { } +// Test the case when we stored field into +// a stub, but interceptor produced value on its own. +THREADED_TEST(InterceptorLoadICFieldNotNeeded) { + CheckInterceptorLoadIC(InterceptorLoadXICGetter, + "proto = new Object();" + "o.__proto__ = proto;" + "proto.x = 239;" + "for (var i = 0; i < 1000; i++) {" + " o.x;" + // Now it should be ICed and keep a reference to x defined on proto + "}" + "var result = 0;" + "for (var i = 0; i < 1000; i++) {" + " result += o.x;" + "}" + "result;", + 42 * 1000); +} + + +// Test the case when we stored field into +// a stub, but it got invalidated later on. +THREADED_TEST(InterceptorLoadICInvalidatedField) { + CheckInterceptorLoadIC(InterceptorLoadXICGetter, + "proto1 = new Object();" + "proto2 = new Object();" + "o.__proto__ = proto1;" + "proto1.__proto__ = proto2;" + "proto2.y = 239;" + "for (var i = 0; i < 1000; i++) {" + " o.y;" + // Now it should be ICed and keep a reference to y defined on proto2 + "}" + "proto1.y = 42;" + "var result = 0;" + "for (var i = 0; i < 1000; i++) {" + " result += o.y;" + "}" + "result;", + 42 * 1000); +} + + +// Test the case when we stored field into +// a stub, but it got invalidated later on due to override on +// global object which is between interceptor and fields' holders. +THREADED_TEST(InterceptorLoadICInvalidatedFieldViaGlobal) { + CheckInterceptorLoadIC(InterceptorLoadXICGetter, + "o.__proto__ = this;" // set a global to be a proto of o. + "this.__proto__.y = 239;" + "for (var i = 0; i < 10; i++) {" + " if (o.y != 239) throw 'oops: ' + o.y;" + // Now it should be ICed and keep a reference to y defined on field_holder. + "}" + "this.y = 42;" // Assign on a global. + "var result = 0;" + "for (var i = 0; i < 10; i++) {" + " result += o.y;" + "}" + "result;", + 42 * 10); +} + + +static v8::Handle<Value> Return239(Local<String> name, const AccessorInfo&) { + ApiTestFuzzer::Fuzz(); + return v8_num(239); +} + + +static void SetOnThis(Local<String> name, + Local<Value> value, + const AccessorInfo& info) { + info.This()->ForceSet(name, value); +} + + +THREADED_TEST(InterceptorLoadICWithCallbackOnHolder) { + v8::HandleScope scope; + v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(); + templ->SetNamedPropertyHandler(InterceptorLoadXICGetter); + templ->SetAccessor(v8_str("y"), Return239); + LocalContext context; + context->Global()->Set(v8_str("o"), templ->NewInstance()); + v8::Handle<Value> value = CompileRun( + "var result = 0;" + "for (var i = 0; i < 7; i++) {" + " result = o.y;" + "}"); + CHECK_EQ(239, value->Int32Value()); +} + + +THREADED_TEST(InterceptorLoadICWithCallbackOnProto) { + v8::HandleScope scope; + v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(); + templ_o->SetNamedPropertyHandler(InterceptorLoadXICGetter); + v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New(); + templ_p->SetAccessor(v8_str("y"), Return239); + + LocalContext context; + context->Global()->Set(v8_str("o"), templ_o->NewInstance()); + context->Global()->Set(v8_str("p"), templ_p->NewInstance()); + + v8::Handle<Value> value = CompileRun( + "o.__proto__ = p;" + "var result = 0;" + "for (var i = 0; i < 7; i++) {" + " result = o.x + o.y;" + "}"); + CHECK_EQ(239 + 42, value->Int32Value()); +} + + +THREADED_TEST(InterceptorLoadICForCallbackWithOverride) { + v8::HandleScope scope; + v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(); + templ->SetNamedPropertyHandler(InterceptorLoadXICGetter); + templ->SetAccessor(v8_str("y"), Return239); + + LocalContext context; + context->Global()->Set(v8_str("o"), templ->NewInstance()); + + v8::Handle<Value> value = CompileRun( + "fst = new Object(); fst.__proto__ = o;" + "snd = new Object(); snd.__proto__ = fst;" + "var result1 = 0;" + "for (var i = 0; i < 7; i++) {" + " result1 = snd.x;" + "}" + "fst.x = 239;" + "var result = 0;" + "for (var i = 0; i < 7; i++) {" + " result = snd.x;" + "}" + "result + result1"); + CHECK_EQ(239 + 42, value->Int32Value()); +} + + +// Test the case when we stored callback into +// a stub, but interceptor produced value on its own. +THREADED_TEST(InterceptorLoadICCallbackNotNeeded) { + v8::HandleScope scope; + v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(); + templ_o->SetNamedPropertyHandler(InterceptorLoadXICGetter); + v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New(); + templ_p->SetAccessor(v8_str("y"), Return239); + + LocalContext context; + context->Global()->Set(v8_str("o"), templ_o->NewInstance()); + context->Global()->Set(v8_str("p"), templ_p->NewInstance()); + + v8::Handle<Value> value = CompileRun( + "o.__proto__ = p;" + "for (var i = 0; i < 7; i++) {" + " o.x;" + // Now it should be ICed and keep a reference to x defined on p + "}" + "var result = 0;" + "for (var i = 0; i < 7; i++) {" + " result += o.x;" + "}" + "result"); + CHECK_EQ(42 * 7, value->Int32Value()); +} + + +// Test the case when we stored callback into +// a stub, but it got invalidated later on. +THREADED_TEST(InterceptorLoadICInvalidatedCallback) { + v8::HandleScope scope; + v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(); + templ_o->SetNamedPropertyHandler(InterceptorLoadXICGetter); + v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New(); + templ_p->SetAccessor(v8_str("y"), Return239, SetOnThis); + + LocalContext context; + context->Global()->Set(v8_str("o"), templ_o->NewInstance()); + context->Global()->Set(v8_str("p"), templ_p->NewInstance()); + + v8::Handle<Value> value = CompileRun( + "inbetween = new Object();" + "o.__proto__ = inbetween;" + "inbetween.__proto__ = p;" + "for (var i = 0; i < 10; i++) {" + " o.y;" + // Now it should be ICed and keep a reference to y defined on p + "}" + "inbetween.y = 42;" + "var result = 0;" + "for (var i = 0; i < 10; i++) {" + " result += o.y;" + "}" + "result"); + CHECK_EQ(42 * 10, value->Int32Value()); +} + + +// Test the case when we stored callback into +// a stub, but it got invalidated later on due to override on +// global object which is between interceptor and callbacks' holders. +THREADED_TEST(InterceptorLoadICInvalidatedCallbackViaGlobal) { + v8::HandleScope scope; + v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(); + templ_o->SetNamedPropertyHandler(InterceptorLoadXICGetter); + v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New(); + templ_p->SetAccessor(v8_str("y"), Return239, SetOnThis); + + LocalContext context; + context->Global()->Set(v8_str("o"), templ_o->NewInstance()); + context->Global()->Set(v8_str("p"), templ_p->NewInstance()); + + v8::Handle<Value> value = CompileRun( + "o.__proto__ = this;" + "this.__proto__ = p;" + "for (var i = 0; i < 10; i++) {" + " if (o.y != 239) throw 'oops: ' + o.y;" + // Now it should be ICed and keep a reference to y defined on p + "}" + "this.y = 42;" + "var result = 0;" + "for (var i = 0; i < 10; i++) {" + " result += o.y;" + "}" + "result"); + CHECK_EQ(42 * 10, value->Int32Value()); +} + + static v8::Handle<Value> InterceptorLoadICGetter0(Local<String> name, const AccessorInfo& info) { ApiTestFuzzer::Fuzz(); @@ -5108,6 +5370,192 @@ THREADED_TEST(InterceptorCallIC) { CHECK_EQ(42, value->Int32Value()); } + +// This test checks that if interceptor doesn't provide +// a value, we can fetch regular value. +THREADED_TEST(InterceptorCallICSeesOthers) { + v8::HandleScope scope; + v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(); + templ->SetNamedPropertyHandler(NoBlockGetterX); + LocalContext context; + context->Global()->Set(v8_str("o"), templ->NewInstance()); + v8::Handle<Value> value = CompileRun( + "o.x = function f(x) { return x + 1; };" + "var result = 0;" + "for (var i = 0; i < 7; i++) {" + " result = o.x(41);" + "}"); + CHECK_EQ(42, value->Int32Value()); +} + + +static v8::Handle<Value> call_ic_function4; +static v8::Handle<Value> InterceptorCallICGetter4(Local<String> name, + const AccessorInfo& info) { + ApiTestFuzzer::Fuzz(); + CHECK(v8_str("x")->Equals(name)); + return call_ic_function4; +} + + +// This test checks that if interceptor provides a function, +// even if we cached shadowed variant, interceptor's function +// is invoked +THREADED_TEST(InterceptorCallICCacheableNotNeeded) { + v8::HandleScope scope; + v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(); + templ->SetNamedPropertyHandler(InterceptorCallICGetter4); + LocalContext context; + context->Global()->Set(v8_str("o"), templ->NewInstance()); + call_ic_function4 = + v8_compile("function f(x) { return x - 1; }; f")->Run(); + v8::Handle<Value> value = CompileRun( + "o.__proto__.x = function(x) { return x + 1; };" + "var result = 0;" + "for (var i = 0; i < 1000; i++) {" + " result = o.x(42);" + "}"); + CHECK_EQ(41, value->Int32Value()); +} + + +// Test the case when we stored cacheable lookup into +// a stub, but it got invalidated later on +THREADED_TEST(InterceptorCallICInvalidatedCacheable) { + v8::HandleScope scope; + v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(); + templ->SetNamedPropertyHandler(NoBlockGetterX); + LocalContext context; + context->Global()->Set(v8_str("o"), templ->NewInstance()); + v8::Handle<Value> value = CompileRun( + "proto1 = new Object();" + "proto2 = new Object();" + "o.__proto__ = proto1;" + "proto1.__proto__ = proto2;" + "proto2.y = function(x) { return x + 1; };" + // Invoke it many times to compile a stub + "for (var i = 0; i < 7; i++) {" + " o.y(42);" + "}" + "proto1.y = function(x) { return x - 1; };" + "var result = 0;" + "for (var i = 0; i < 7; i++) {" + " result += o.y(42);" + "}"); + CHECK_EQ(41 * 7, value->Int32Value()); +} + + +static v8::Handle<Value> call_ic_function5; +static v8::Handle<Value> InterceptorCallICGetter5(Local<String> name, + const AccessorInfo& info) { + ApiTestFuzzer::Fuzz(); + if (v8_str("x")->Equals(name)) + return call_ic_function5; + else + return Local<Value>(); +} + + +// This test checks that if interceptor doesn't provide a function, +// cached constant function is used +THREADED_TEST(InterceptorCallICConstantFunctionUsed) { + v8::HandleScope scope; + v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(); + templ->SetNamedPropertyHandler(NoBlockGetterX); + LocalContext context; + context->Global()->Set(v8_str("o"), templ->NewInstance()); + v8::Handle<Value> value = CompileRun( + "function inc(x) { return x + 1; };" + "inc(1);" + "o.x = inc;" + "var result = 0;" + "for (var i = 0; i < 1000; i++) {" + " result = o.x(42);" + "}"); + CHECK_EQ(43, value->Int32Value()); +} + + +// This test checks that if interceptor provides a function, +// even if we cached constant function, interceptor's function +// is invoked +THREADED_TEST(InterceptorCallICConstantFunctionNotNeeded) { + v8::HandleScope scope; + v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(); + templ->SetNamedPropertyHandler(InterceptorCallICGetter5); + LocalContext context; + context->Global()->Set(v8_str("o"), templ->NewInstance()); + call_ic_function5 = + v8_compile("function f(x) { return x - 1; }; f")->Run(); + v8::Handle<Value> value = CompileRun( + "function inc(x) { return x + 1; };" + "inc(1);" + "o.x = inc;" + "var result = 0;" + "for (var i = 0; i < 1000; i++) {" + " result = o.x(42);" + "}"); + CHECK_EQ(41, value->Int32Value()); +} + + +// Test the case when we stored constant function into +// a stub, but it got invalidated later on +THREADED_TEST(InterceptorCallICInvalidatedConstantFunction) { + v8::HandleScope scope; + v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(); + templ->SetNamedPropertyHandler(NoBlockGetterX); + LocalContext context; + context->Global()->Set(v8_str("o"), templ->NewInstance()); + v8::Handle<Value> value = CompileRun( + "function inc(x) { return x + 1; };" + "inc(1);" + "proto1 = new Object();" + "proto2 = new Object();" + "o.__proto__ = proto1;" + "proto1.__proto__ = proto2;" + "proto2.y = inc;" + // Invoke it many times to compile a stub + "for (var i = 0; i < 7; i++) {" + " o.y(42);" + "}" + "proto1.y = function(x) { return x - 1; };" + "var result = 0;" + "for (var i = 0; i < 7; i++) {" + " result += o.y(42);" + "}"); + CHECK_EQ(41 * 7, value->Int32Value()); +} + + +// Test the case when we stored constant function into +// a stub, but it got invalidated later on due to override on +// global object which is between interceptor and constant function' holders. +THREADED_TEST(InterceptorCallICInvalidatedConstantFunctionViaGlobal) { + v8::HandleScope scope; + v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(); + templ->SetNamedPropertyHandler(NoBlockGetterX); + LocalContext context; + context->Global()->Set(v8_str("o"), templ->NewInstance()); + v8::Handle<Value> value = CompileRun( + "function inc(x) { return x + 1; };" + "inc(1);" + "o.__proto__ = this;" + "this.__proto__.y = inc;" + // Invoke it many times to compile a stub + "for (var i = 0; i < 7; i++) {" + " if (o.y(42) != 43) throw 'oops: ' + o.y(42);" + "}" + "this.y = function(x) { return x - 1; };" + "var result = 0;" + "for (var i = 0; i < 7; i++) {" + " result += o.y(42);" + "}"); + CHECK_EQ(41 * 7, value->Int32Value()); +} + + static int interceptor_call_count = 0; static v8::Handle<Value> InterceptorICRefErrorGetter(Local<String> name, @@ -5768,6 +6216,7 @@ THREADED_TEST(NestedHandleScopeAndContexts) { THREADED_TEST(ExternalAllocatedMemory) { v8::HandleScope outer; + v8::Persistent<Context> env = Context::New(); const int kSize = 1024*1024; CHECK_EQ(v8::V8::AdjustAmountOfExternalAllocatedMemory(kSize), kSize); CHECK_EQ(v8::V8::AdjustAmountOfExternalAllocatedMemory(-kSize), 0); @@ -7081,3 +7530,164 @@ THREADED_TEST(ReplaceConstantFunction) { obj_clone->Set(foo_string, v8::String::New("Hello")); CHECK(!obj->Get(foo_string)->IsUndefined()); } + + +// Regression test for http://crbug.com/16276. +THREADED_TEST(Regress16276) { + v8::HandleScope scope; + LocalContext context; + // Force the IC in f to be a dictionary load IC. + CompileRun("function f(obj) { return obj.x; }\n" + "var obj = { x: { foo: 42 }, y: 87 };\n" + "var x = obj.x;\n" + "delete obj.y;\n" + "for (var i = 0; i < 5; i++) f(obj);"); + // Detach the global object to make 'this' refer directly to the + // global object (not the proxy), and make sure that the dictionary + // load IC doesn't mess up loading directly from the global object. + context->DetachGlobal(); + CHECK_EQ(42, CompileRun("f(this).foo")->Int32Value()); +} + + +THREADED_TEST(PixelArray) { + v8::HandleScope scope; + LocalContext context; + const int kElementCount = 40; + uint8_t* pixel_data = reinterpret_cast<uint8_t*>(malloc(kElementCount)); + i::Handle<i::PixelArray> pixels = i::Factory::NewPixelArray(kElementCount, + pixel_data); + i::Heap::CollectAllGarbage(); // Force GC to trigger verification. + for (int i = 0; i < kElementCount; i++) { + pixels->set(i, i); + } + i::Heap::CollectAllGarbage(); // Force GC to trigger verification. + for (int i = 0; i < kElementCount; i++) { + CHECK_EQ(i, pixels->get(i)); + CHECK_EQ(i, pixel_data[i]); + } + + v8::Handle<v8::Object> obj = v8::Object::New(); + i::Handle<i::JSObject> jsobj = v8::Utils::OpenHandle(*obj); + // Set the elements to be the pixels. + // jsobj->set_elements(*pixels); + obj->SetIndexedPropertiesToPixelData(pixel_data, kElementCount); + CHECK_EQ(1, i::Smi::cast(jsobj->GetElement(1))->value()); + obj->Set(v8_str("field"), v8::Int32::New(1503)); + context->Global()->Set(v8_str("pixels"), obj); + v8::Handle<v8::Value> result = CompileRun("pixels.field"); + CHECK_EQ(1503, result->Int32Value()); + result = CompileRun("pixels[1]"); + CHECK_EQ(1, result->Int32Value()); + result = CompileRun("var sum = 0;" + "for (var i = 0; i < 8; i++) {" + " sum += pixels[i];" + "}" + "sum;"); + CHECK_EQ(28, result->Int32Value()); + + i::Handle<i::Smi> value(i::Smi::FromInt(2)); + i::SetElement(jsobj, 1, value); + CHECK_EQ(2, i::Smi::cast(jsobj->GetElement(1))->value()); + *value.location() = i::Smi::FromInt(256); + i::SetElement(jsobj, 1, value); + CHECK_EQ(255, i::Smi::cast(jsobj->GetElement(1))->value()); + *value.location() = i::Smi::FromInt(-1); + i::SetElement(jsobj, 1, value); + CHECK_EQ(0, i::Smi::cast(jsobj->GetElement(1))->value()); + + result = CompileRun("for (var i = 0; i < 8; i++) {" + " pixels[i] = (i * 65) - 109;" + "}" + "pixels[1] + pixels[6];"); + CHECK_EQ(255, result->Int32Value()); + CHECK_EQ(0, i::Smi::cast(jsobj->GetElement(0))->value()); + CHECK_EQ(0, i::Smi::cast(jsobj->GetElement(1))->value()); + CHECK_EQ(21, i::Smi::cast(jsobj->GetElement(2))->value()); + CHECK_EQ(86, i::Smi::cast(jsobj->GetElement(3))->value()); + CHECK_EQ(151, i::Smi::cast(jsobj->GetElement(4))->value()); + CHECK_EQ(216, i::Smi::cast(jsobj->GetElement(5))->value()); + CHECK_EQ(255, i::Smi::cast(jsobj->GetElement(6))->value()); + CHECK_EQ(255, i::Smi::cast(jsobj->GetElement(7))->value()); + result = CompileRun("var sum = 0;" + "for (var i = 0; i < 8; i++) {" + " sum += pixels[i];" + "}" + "sum;"); + CHECK_EQ(984, result->Int32Value()); + + result = CompileRun("for (var i = 0; i < 8; i++) {" + " pixels[i] = (i * 1.1);" + "}" + "pixels[1] + pixels[6];"); + CHECK_EQ(8, result->Int32Value()); + CHECK_EQ(0, i::Smi::cast(jsobj->GetElement(0))->value()); + CHECK_EQ(1, i::Smi::cast(jsobj->GetElement(1))->value()); + CHECK_EQ(2, i::Smi::cast(jsobj->GetElement(2))->value()); + CHECK_EQ(3, i::Smi::cast(jsobj->GetElement(3))->value()); + CHECK_EQ(4, i::Smi::cast(jsobj->GetElement(4))->value()); + CHECK_EQ(6, i::Smi::cast(jsobj->GetElement(5))->value()); + CHECK_EQ(7, i::Smi::cast(jsobj->GetElement(6))->value()); + CHECK_EQ(8, i::Smi::cast(jsobj->GetElement(7))->value()); + + result = CompileRun("for (var i = 0; i < 8; i++) {" + " pixels[7] = undefined;" + "}" + "pixels[7];"); + CHECK_EQ(0, result->Int32Value()); + CHECK_EQ(0, i::Smi::cast(jsobj->GetElement(7))->value()); + + result = CompileRun("for (var i = 0; i < 8; i++) {" + " pixels[6] = '2.3';" + "}" + "pixels[6];"); + CHECK_EQ(2, result->Int32Value()); + CHECK_EQ(2, i::Smi::cast(jsobj->GetElement(6))->value()); + + result = CompileRun("for (var i = 0; i < 8; i++) {" + " pixels[5] = NaN;" + "}" + "pixels[5];"); + CHECK_EQ(0, result->Int32Value()); + CHECK_EQ(0, i::Smi::cast(jsobj->GetElement(5))->value()); + + result = CompileRun("for (var i = 0; i < 8; i++) {" + " pixels[8] = Infinity;" + "}" + "pixels[8];"); + CHECK_EQ(255, result->Int32Value()); + CHECK_EQ(255, i::Smi::cast(jsobj->GetElement(8))->value()); + + result = CompileRun("for (var i = 0; i < 8; i++) {" + " pixels[9] = -Infinity;" + "}" + "pixels[9];"); + CHECK_EQ(0, result->Int32Value()); + CHECK_EQ(0, i::Smi::cast(jsobj->GetElement(9))->value()); + + result = CompileRun("pixels[3] = 33;" + "delete pixels[3];" + "pixels[3];"); + CHECK_EQ(33, result->Int32Value()); + + result = CompileRun("pixels[0] = 10; pixels[1] = 11;" + "pixels[2] = 12; pixels[3] = 13;" + "pixels.__defineGetter__('2'," + "function() { return 120; });" + "pixels[2];"); + CHECK_EQ(12, result->Int32Value()); + + result = CompileRun("var js_array = new Array(40);" + "js_array[0] = 77;" + "js_array;"); + CHECK_EQ(77, v8::Object::Cast(*result)->Get(v8_str("0"))->Int32Value()); + + result = CompileRun("pixels[1] = 23;" + "pixels.__proto__ = [];" + "js_array.__proto__ = pixels;" + "js_array.concat(pixels);"); + CHECK_EQ(77, v8::Object::Cast(*result)->Get(v8_str("0"))->Int32Value()); + CHECK_EQ(23, v8::Object::Cast(*result)->Get(v8_str("1"))->Int32Value()); + + free(pixel_data); +} diff --git a/deps/v8/test/cctest/test-ast.cc b/deps/v8/test/cctest/test-ast.cc index 205434806c..9931f5607a 100644 --- a/deps/v8/test/cctest/test-ast.cc +++ b/deps/v8/test/cctest/test-ast.cc @@ -35,11 +35,11 @@ using namespace v8::internal; TEST(List) { - List<Node*>* list = new List<Node*>(0); + List<AstNode*>* list = new List<AstNode*>(0); CHECK_EQ(0, list->length()); ZoneScope zone_scope(DELETE_ON_EXIT); - Node* node = new EmptyStatement(); + AstNode* node = new EmptyStatement(); list->Add(node); CHECK_EQ(1, list->length()); CHECK_EQ(node, list->at(0)); diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc index fddd000289..9e2c38dbee 100644 --- a/deps/v8/test/cctest/test-debug.cc +++ b/deps/v8/test/cctest/test-debug.cc @@ -4875,7 +4875,7 @@ TEST(DebugBreakInMessageHandler) { v8::Debug::SetMessageHandler2(DebugBreakMessageHandler); // Test functions. - const char* script = "function f() { debugger; } function g() { }"; + const char* script = "function f() { debugger; g(); } function g() { }"; CompileRun(script); v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f"))); @@ -4954,8 +4954,10 @@ TEST(RegExpDebugBreak) { v8::Debug::DebugBreak(); result = f->Call(env->Global(), argc, argv); - CHECK_EQ(20, break_point_hit_count); - CHECK_EQ("exec", last_function_hit); + // Check that there was only one break event. Matching RegExp should not + // cause Break events. + CHECK_EQ(1, break_point_hit_count); + CHECK_EQ("f", last_function_hit); } #endif // V8_NATIVE_REGEXP @@ -5295,3 +5297,63 @@ TEST(ProvisionalBreakpointOnLineOutOfRange) { ClearBreakPointFromJS(sbp2); v8::Debug::SetMessageHandler2(NULL); } + + +static void BreakMessageHandler(const v8::Debug::Message& message) { + if (message.IsEvent() && message.GetEvent() == v8::Break) { + // Count the number of breaks. + break_point_hit_count++; + + v8::HandleScope scope; + v8::Handle<v8::String> json = message.GetJSON(); + + SendContinueCommand(); + } else if (message.IsEvent() && message.GetEvent() == v8::AfterCompile) { + v8::HandleScope scope; + + bool is_debug_break = i::StackGuard::IsDebugBreak(); + // Force DebugBreak flag while serializer is working. + i::StackGuard::DebugBreak(); + + // Force serialization to trigger some internal JS execution. + v8::Handle<v8::String> json = message.GetJSON(); + + // Restore previous state. + if (is_debug_break) { + i::StackGuard::DebugBreak(); + } else { + i::StackGuard::Continue(i::DEBUGBREAK); + } + } +} + + +// Test that if DebugBreak is forced it is ignored when code from +// debug-delay.js is executed. +TEST(NoDebugBreakInAfterCompileMessageHandler) { + v8::HandleScope scope; + DebugLocalContext env; + + // Register a debug event listener which sets the break flag and counts. + v8::Debug::SetMessageHandler2(BreakMessageHandler); + + // Set the debug break flag. + v8::Debug::DebugBreak(); + + // Create a function for testing stepping. + const char* src = "function f() { eval('var x = 10;'); } "; + v8::Local<v8::Function> f = CompileFunction(&env, src, "f"); + + // There should be only one break event. + CHECK_EQ(1, break_point_hit_count); + + // Set the debug break flag again. + v8::Debug::DebugBreak(); + f->Call(env->Global(), 0, NULL); + // There should be one more break event when the script is evaluated in 'f'. + CHECK_EQ(2, break_point_hit_count); + + // Get rid of the debug message handler. + v8::Debug::SetMessageHandler2(NULL); + CheckDebuggerUnloaded(); +} diff --git a/deps/v8/test/cctest/test-heap.cc b/deps/v8/test/cctest/test-heap.cc index 396bcc50b6..6b5907c2e8 100644 --- a/deps/v8/test/cctest/test-heap.cc +++ b/deps/v8/test/cctest/test-heap.cc @@ -36,7 +36,7 @@ TEST(HeapMaps) { InitializeVM(); CheckMap(Heap::meta_map(), MAP_TYPE, Map::kSize); CheckMap(Heap::heap_number_map(), HEAP_NUMBER_TYPE, HeapNumber::kSize); - CheckMap(Heap::fixed_array_map(), FIXED_ARRAY_TYPE, Array::kAlignedSize); + CheckMap(Heap::fixed_array_map(), FIXED_ARRAY_TYPE, FixedArray::kHeaderSize); CheckMap(Heap::long_string_map(), LONG_STRING_TYPE, SeqTwoByteString::kAlignedSize); } @@ -653,7 +653,7 @@ TEST(JSArray) { uint32_t int_length = 0; CHECK(Array::IndexFromObject(length, &int_length)); CHECK_EQ(length, array->length()); - CHECK(!array->HasFastElements()); // Must be in slow mode. + CHECK(array->HasDictionaryElements()); // Must be in slow mode. // array[length] = name. array->SetElement(int_length, name); diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc index f3f7efc710..46ae13b180 100644 --- a/deps/v8/test/cctest/test-log.cc +++ b/deps/v8/test/cctest/test-log.cc @@ -4,8 +4,12 @@ #ifdef ENABLE_LOGGING_AND_PROFILING -#include "v8.h" +#ifdef __linux__ +#include <signal.h> +#include <unistd.h> +#endif +#include "v8.h" #include "log.h" #include "cctest.h" @@ -144,6 +148,23 @@ class LoggerTestHelper : public AllStatic { using v8::internal::LoggerTestHelper; +// Under Linux, we need to check if signals were delivered to avoid false +// positives. Under other platforms profiling is done via a high-priority +// thread, so this case never happen. +static bool was_sigprof_received = true; +#ifdef __linux__ + +struct sigaction old_sigprof_handler; + +static void SigProfSignalHandler(int signal, siginfo_t* info, void* context) { + if (signal != SIGPROF) return; + was_sigprof_received = true; + old_sigprof_handler.sa_sigaction(signal, info, context); +} + +#endif // __linux__ + + static int CheckThatProfilerWorks(int log_pos) { Logger::ResumeProfiler(); CHECK(LoggerTestHelper::IsSamplerActive()); @@ -160,6 +181,18 @@ static int CheckThatProfilerWorks(int log_pos) { const char* code_creation = "\ncode-creation,"; // eq. to /^code-creation,/ CHECK_NE(NULL, strstr(buffer.start(), code_creation)); +#ifdef __linux__ + // Intercept SIGPROF handler to make sure that the test process + // had received it. Under load, system can defer it causing test failure. + // It is important to execute this after 'ResumeProfiler'. + was_sigprof_received = false; + struct sigaction sa; + sa.sa_sigaction = SigProfSignalHandler; + sigemptyset(&sa.sa_mask); + sa.sa_flags = SA_SIGINFO; + CHECK_EQ(0, sigaction(SIGPROF, &sa, &old_sigprof_handler)); +#endif // __linux__ + // Force compiler to generate new code by parametrizing source. EmbeddedVector<char, 100> script_src; i::OS::SNPrintF(script_src, @@ -170,6 +203,8 @@ static int CheckThatProfilerWorks(int log_pos) { const double end_time = i::OS::TimeCurrentMillis() + 200; while (i::OS::TimeCurrentMillis() < end_time) { CompileAndRunScript(script_src.start()); + // Yield CPU to give Profiler thread a chance to process ticks. + i::OS::Sleep(1); } Logger::PauseProfiler(); @@ -189,7 +224,8 @@ static int CheckThatProfilerWorks(int log_pos) { buffer[log_size] = '\0'; const char* tick = "\ntick,"; CHECK_NE(NULL, strstr(buffer.start(), code_creation)); - CHECK_NE(NULL, strstr(buffer.start(), tick)); + const bool ticks_found = strstr(buffer.start(), tick) != NULL; + CHECK_EQ(was_sigprof_received, ticks_found); return log_pos; } diff --git a/deps/v8/test/cctest/test-mark-compact.cc b/deps/v8/test/cctest/test-mark-compact.cc index 8db7339b54..743375d3ec 100644 --- a/deps/v8/test/cctest/test-mark-compact.cc +++ b/deps/v8/test/cctest/test-mark-compact.cc @@ -86,7 +86,8 @@ TEST(Promotion) { v8::HandleScope sc; // Allocate a fixed array in the new space. - int array_size = (Heap::MaxObjectSizeInPagedSpace() - Array::kHeaderSize) / + int array_size = + (Heap::MaxObjectSizeInPagedSpace() - FixedArray::kHeaderSize) / (kPointerSize * 4); Object* obj = Heap::AllocateFixedArray(array_size); CHECK(!obj->IsFailure()); @@ -118,7 +119,7 @@ TEST(NoPromotion) { CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE)); // Allocate a big Fixed array in the new space. - int size = (Heap::MaxObjectSizeInPagedSpace() - Array::kHeaderSize) / + int size = (Heap::MaxObjectSizeInPagedSpace() - FixedArray::kHeaderSize) / kPointerSize; Object* obj = Heap::AllocateFixedArray(size); diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc index 33a83c7795..8d8326ccad 100644 --- a/deps/v8/test/cctest/test-regexp.cc +++ b/deps/v8/test/cctest/test-regexp.cc @@ -35,7 +35,7 @@ #include "zone-inl.h" #include "parser.h" #include "ast.h" -#include "jsregexp-inl.h" +#include "jsregexp.h" #include "regexp-macro-assembler.h" #include "regexp-macro-assembler-irregexp.h" #ifdef V8_TARGET_ARCH_ARM diff --git a/deps/v8/test/message/message.status b/deps/v8/test/message/message.status index d40151e8d4..9afaa0f148 100644 --- a/deps/v8/test/message/message.status +++ b/deps/v8/test/message/message.status @@ -32,13 +32,13 @@ bugs: FAIL [ $arch == x64 ] -simple-throw.js: FAIL -try-catch-finally-throw-in-catch-and-finally.js: FAIL -try-catch-finally-throw-in-catch.js: FAIL -try-catch-finally-throw-in-finally.js: FAIL -try-finally-throw-in-finally.js: FAIL -try-finally-throw-in-try-and-finally.js: FAIL -try-finally-throw-in-try.js: FAIL -overwritten-builtins.js: FAIL -regress-73.js: FAIL -regress-75.js: FAIL +simple-throw: FAIL +try-catch-finally-throw-in-catch-and-finally: FAIL +try-catch-finally-throw-in-catch: FAIL +try-catch-finally-throw-in-finally: FAIL +try-finally-throw-in-finally: FAIL +try-finally-throw-in-try-and-finally: FAIL +try-finally-throw-in-try: FAIL +overwritten-builtins: FAIL +regress/regress-73: FAIL +regress/regress-75: FAIL diff --git a/deps/v8/test/mjsunit/debug-stepin-accessor.js b/deps/v8/test/mjsunit/debug-stepin-accessor.js new file mode 100644 index 0000000000..8b24c3c14f --- /dev/null +++ b/deps/v8/test/mjsunit/debug-stepin-accessor.js @@ -0,0 +1,248 @@ +// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+var exception = null;
+var state = 1;
+var expected_source_line_text = null;
+var expected_function_name = null;
+
+// Simple debug event handler which first time will cause 'step in' action
+// to get into g.call and than check that execution is pauesed inside
+// function 'g'.
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Break) {
+ if (state == 1) {
+ exec_state.prepareStep(Debug.StepAction.StepIn, 2);
+ state = 2;
+ } else if (state == 2) {
+ assertEquals(expected_source_line_text,
+ event_data.sourceLineText());
+ assertEquals(expected_function_name, event_data.func().name());
+ state = 3;
+ }
+ }
+ } catch(e) {
+ exception = e;
+ }
+};
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+
+var c = {
+ name: 'name ',
+ get getter1() {
+ return this.name; // getter 1
+ },
+ get getter2() {
+ return { // getter 2
+ 'a': c.name
+ };
+ },
+ set setter1(n) {
+ this.name = n; // setter 1
+ }
+};
+
+c.__defineGetter__('y', function getterY() {
+ return this.name; // getter y
+});
+
+c.__defineGetter__(3, function getter3() {
+ return this.name; // getter 3
+});
+
+c.__defineSetter__('y', function setterY(n) {
+ this.name = n; // setter y
+});
+
+c.__defineSetter__(3, function setter3(n) {
+ this.name = n; // setter 3
+});
+
+var d = {
+ 'c': c,
+};
+
+function testGetter1_1() {
+ expected_function_name = 'getter1';
+ expected_source_line_text = ' return this.name; // getter 1';
+ debugger;
+ var x = c.getter1;
+}
+
+function testGetter1_2() {
+ expected_function_name = 'getter1';
+ expected_source_line_text = ' return this.name; // getter 1';
+ debugger;
+ var x = c['getter1'];
+}
+
+function testGetter1_3() {
+ expected_function_name = 'getter1';
+ expected_source_line_text = ' return this.name; // getter 1';
+ debugger;
+ for (var i = 1; i < 2; i++) {
+ var x = c['getter' + i];
+ }
+}
+
+function testGetter1_4() {
+ expected_function_name = 'getter1';
+ expected_source_line_text = ' return this.name; // getter 1';
+ debugger;
+ var x = d.c.getter1;
+}
+
+function testGetter1_5() {
+ expected_function_name = 'getter1';
+ expected_source_line_text = ' return this.name; // getter 1';
+ for (var i = 2; i != 1; i--);
+ debugger;
+ var x = d.c['getter' + i];
+}
+
+function testGetter2_1() {
+ expected_function_name = 'getter2';
+ expected_source_line_text = ' return { // getter 2';
+ for (var i = 2; i != 1; i--);
+ debugger;
+ var t = d.c.getter2.name;
+}
+
+
+function testGetterY_1() {
+ expected_function_name = 'getterY';
+ expected_source_line_text = ' return this.name; // getter y';
+ debugger;
+ var t = d.c.y;
+}
+
+function testIndexedGetter3_1() {
+ expected_function_name = 'getter3';
+ expected_source_line_text = ' return this.name; // getter 3';
+ debugger;
+ var r = d.c[3];
+}
+
+function testSetterY_1() {
+ expected_function_name = 'setterY';
+ expected_source_line_text = ' this.name = n; // setter y';
+ debugger;
+ d.c.y = 'www';
+}
+
+function testIndexedSetter3_1() {
+ expected_function_name = 'setter3';
+ expected_source_line_text = ' this.name = n; // setter 3';
+ var i = 3
+ debugger;
+ d.c[3] = 'www';
+}
+
+function testSetter1_1() {
+ expected_function_name = 'setter1';
+ expected_source_line_text = ' this.name = n; // setter 1';
+ debugger;
+ d.c.setter1 = 'aa';
+}
+
+function testSetter1_2() {
+ expected_function_name = 'setter1';
+ expected_source_line_text = ' this.name = n; // setter 1';
+ debugger;
+ d.c['setter1'] = 'bb';
+}
+
+function testSetter1_3() {
+ expected_function_name = 'setter1';
+ expected_source_line_text = ' this.name = n; // setter 1';
+ for (var i = 2; i != 1; i--);
+ debugger;
+ d.c['setter' + i] = i;
+}
+
+var e = {
+ name: 'e'
+};
+e.__proto__ = c;
+
+function testProtoGetter1_1() {
+ expected_function_name = 'getter1';
+ expected_source_line_text = ' return this.name; // getter 1';
+ debugger;
+ var x = e.getter1;
+}
+
+function testProtoSetter1_1() {
+ expected_function_name = 'setter1';
+ expected_source_line_text = ' this.name = n; // setter 1';
+ debugger;
+ e.setter1 = 'aa';
+}
+
+function testProtoIndexedGetter3_1() {
+ expected_function_name = 'getter3';
+ expected_source_line_text = ' return this.name; // getter 3';
+ debugger;
+ var x = e[3];
+}
+
+function testProtoIndexedSetter3_1() {
+ expected_function_name = 'setter3';
+ expected_source_line_text = ' this.name = n; // setter 3';
+ debugger;
+ e[3] = 'new val';
+}
+
+function testProtoSetter1_2() {
+ expected_function_name = 'setter1';
+ expected_source_line_text = ' this.name = n; // setter 1';
+ for (var i = 2; i != 1; i--);
+ debugger;
+ e['setter' + i] = 'aa';
+}
+
+for (var n in this) {
+ if (n.substr(0, 4) != 'test') {
+ continue;
+ }
+ state = 1;
+ this[n]();
+ assertNull(exception);
+ assertEquals(3, state);
+}
+
+// Get rid of the debug event listener.
+Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/debug-stepin-builtin.js b/deps/v8/test/mjsunit/debug-stepin-builtin.js new file mode 100644 index 0000000000..c6a97eac01 --- /dev/null +++ b/deps/v8/test/mjsunit/debug-stepin-builtin.js @@ -0,0 +1,78 @@ +// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+var exception = null;
+var state = 1;
+var expected_source_line_text = null;
+var expected_function_name = null;
+
+// Simple debug event handler which first time will cause 'step in' action
+// and than check that execution is paused inside function
+// expected_function_name.
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Break) {
+ if (state == 1) {
+ exec_state.prepareStep(Debug.StepAction.StepIn, 2);
+ state = 2;
+ } else if (state == 2) {
+ assertEquals(expected_function_name, event_data.func().name());
+ assertEquals(expected_source_line_text,
+ event_data.sourceLineText());
+ state = 3;
+ }
+ }
+ } catch(e) {
+ exception = e;
+ }
+};
+
+// Add the debug event listener.
+Debug.setListener(listener);
+
+var a = [1,2,3,4,5];
+
+// Test step into function call from a function without local variables.
+function testStepInArraySlice() {
+ expected_function_name = 'testStepInArraySlice';
+ expected_source_line_text = '} // expected line';
+ debugger;
+ var s = Array.prototype.slice.call(a, 2,3);
+} // expected line
+
+state = 1;
+testStepInArraySlice();
+assertNull(exception);
+assertEquals(3, state);
+
+// Get rid of the debug event listener.
+Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status index 962e4d313c..0e9102c553 100644 --- a/deps/v8/test/mjsunit/mjsunit.status +++ b/deps/v8/test/mjsunit/mjsunit.status @@ -58,6 +58,8 @@ debug-ignore-breakpoints: CRASH || FAIL debug-multiple-breakpoints: CRASH || FAIL debug-setbreakpoint: CRASH || FAIL || PASS debug-step-stub-callfunction: SKIP +debug-stepin-accessor: CRASH || FAIL +debug-stepin-builtin: CRASH || FAIL debug-stepin-constructor: CRASH, FAIL debug-stepin-function-call: CRASH || FAIL debug-step: SKIP @@ -69,40 +71,37 @@ regress/regress-269: SKIP # Fails on real ARM hardware but not on the simulator. string-compare-alignment: PASS || FAIL +# Times out often in release mode on ARM. +array-splice: PASS || TIMEOUT [ $arch == x64 ] -debug-backtrace.js: CRASH || FAIL -date-parse.js: CRASH || FAIL -debug-backtrace-text.js: CRASH || FAIL -debug-multiple-breakpoints.js: CRASH || FAIL -debug-breakpoints.js: CRASH || FAIL -debug-changebreakpoint.js: CRASH || FAIL -debug-clearbreakpoint.js: CRASH || FAIL -debug-conditional-breakpoints.js: CRASH || FAIL -debug-constructor.js: CRASH || FAIL -debug-continue.js: CRASH || FAIL -debug-enable-disable-breakpoints.js: CRASH || FAIL -debug-evaluate-recursive.js: CRASH || FAIL -debug-event-listener.js: CRASH || FAIL -debug-evaluate.js: CRASH || FAIL -debug-ignore-breakpoints.js: CRASH || FAIL -debug-setbreakpoint.js: CRASH || FAIL -debug-step-stub-callfunction.js: CRASH || FAIL -debug-step.js: CRASH || FAIL -mirror-date.js: CRASH || FAIL -invalid-lhs.js: CRASH || FAIL -debug-stepin-constructor.js: CRASH || FAIL -new.js: CRASH || FAIL -fuzz-natives.js: CRASH || FAIL -greedy.js: CRASH || FAIL -debug-handle.js: CRASH || FAIL -string-indexof.js: CRASH || FAIL -debug-clearbreakpointgroup.js: CRASH || FAIL -regress/regress-269.js: CRASH || FAIL -div-mod.js: CRASH || FAIL -unicode-test.js: CRASH || FAIL -regress/regress-392.js: CRASH || FAIL -regress/regress-1200351.js: CRASH || FAIL -regress/regress-998565.js: CRASH || FAIL -tools/tickprocessor.js: CRASH || FAIL +debug-backtrace: CRASH || FAIL +debug-backtrace-text: CRASH || FAIL +debug-multiple-breakpoints: CRASH || FAIL +debug-breakpoints: CRASH || FAIL +debug-changebreakpoint: CRASH || FAIL +debug-clearbreakpoint: CRASH || FAIL +debug-conditional-breakpoints: CRASH || FAIL +debug-constructor: CRASH || FAIL +debug-continue: CRASH || FAIL +debug-enable-disable-breakpoints: CRASH || FAIL +debug-evaluate-recursive: CRASH || FAIL +debug-event-listener: CRASH || FAIL +debug-evaluate: CRASH || FAIL +debug-ignore-breakpoints: CRASH || FAIL +debug-setbreakpoint: CRASH || FAIL +debug-step-stub-callfunction: CRASH || FAIL +debug-step: CRASH || FAIL +debug-stepin-builtin: CRASH || FAIL +debug-stepin-constructor: CRASH || FAIL +debug-stepin-function-call: CRASH || FAIL +debug-stepin-accessor: CRASH || FAIL +fuzz-natives: PASS || TIMEOUT +debug-handle: CRASH || FAIL +debug-clearbreakpointgroup: CRASH || FAIL +regress/regress-269: CRASH || FAIL +regress/regress-392: CRASH || FAIL +regress/regress-1200351: CRASH || FAIL +regress/regress-998565: CRASH || FAIL +tools/tickprocessor: PASS || CRASH || FAIL diff --git a/deps/v8/test/mjsunit/regexp-call-as-function.js b/deps/v8/test/mjsunit/regexp-call-as-function.js new file mode 100644 index 0000000000..4cbe7f94f3 --- /dev/null +++ b/deps/v8/test/mjsunit/regexp-call-as-function.js @@ -0,0 +1,36 @@ +// Copyright 2009 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Test that regular expressions can be called as functions. Calling +// a regular expression as a function corresponds to calling it's exec +// method. + +var regexp = /a(b)(c)/; +var subject = "xyzabcde"; +var expected = 'abc,b,c'; +assertEquals(expected, String(regexp.exec(subject))); +assertEquals(expected, String(regexp(subject))); diff --git a/deps/v8/test/mjsunit/regress/regress-155924.js b/deps/v8/test/mjsunit/regress/regress-155924.js new file mode 100644 index 0000000000..666e3ba5ce --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-155924.js @@ -0,0 +1,46 @@ +// Copyright 2009 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// A HeapNumber with certain bits in the mantissa of the floating point +// value should not be able to masquerade as a string in a keyed lookup +// inline cache stub. See http://codereview.chromium.org/155924. + +A = [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ]; + +function foo() { + x = 1 << 26; + x = x * x; + // The following floating-point heap number has a second word similar + // to that of the string "5": + // 2^52 + index << cached_index_shift + cached_index_tag + x = x + (5 << 2) + (1 << 1); + return A[x]; +} + +assertEquals(undefined, foo(), "First lookup A[bad_float]"); +assertEquals(undefined, foo(), "Second lookup A[bad_float]"); +assertEquals(undefined, foo(), "Third lookup A[bad_float]"); diff --git a/deps/v8/test/mjsunit/regress/regress-345.js b/deps/v8/test/mjsunit/regress/regress-345.js new file mode 100644 index 0000000000..f7f28a1a2d --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-345.js @@ -0,0 +1,51 @@ +// Copyright 2009 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Escaping to the same target from both the try and catch blocks of +// try...catch...finally should not fail at compile-time. +// +// Reported by nth10sd. +// See http://code.google.com/p/v8/issues/detail?id=345 + +do { + try { + continue; + } catch (e) { + continue; + } finally { + } +} while (false); + + +L: { + try { + break L; + } catch (e) { + break L; + } finally { + } +} diff --git a/deps/v8/test/mjsunit/regress/regress-406.js b/deps/v8/test/mjsunit/regress/regress-406.js new file mode 100644 index 0000000000..f48a5de916 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-406.js @@ -0,0 +1,69 @@ +// Copyright 2009 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Test of constant folding of boolean-valued expressions. + +// See http://code.google.com/p/v8/issues/detail?id=406 + +assertFalse(typeof(0) == "zero"); +assertTrue(typeof(0) != "zero"); + +// The and and or truth tables with both operands constant. +assertFalse(typeof(0) == "zero" && typeof(0) == "zero"); +assertFalse(typeof(0) == "zero" && typeof(0) != "zero"); +assertFalse(typeof(0) != "zero" && typeof(0) == "zero"); +assertTrue(typeof(0) != "zero" && typeof(0) != "zero"); + +assertFalse(typeof(0) == "zero" || typeof(0) == "zero"); +assertTrue(typeof(0) == "zero" || typeof(0) != "zero"); +assertTrue(typeof(0) != "zero" || typeof(0) == "zero"); +assertTrue(typeof(0) != "zero" || typeof(0) != "zero"); + +// Same with just the left operand constant. +// Helper function to prevent simple constant folding. +function one() { return 1; } + +assertFalse(typeof(0) == "zero" && one() < 0); +assertFalse(typeof(0) == "zero" && one() > 0); +assertFalse(typeof(0) != "zero" && one() < 0); +assertTrue(typeof(0) != "zero" && one() > 0); + +assertFalse(typeof(0) == "zero" || one() < 0); +assertTrue(typeof(0) == "zero" || one() > 0); +assertTrue(typeof(0) != "zero" || one() < 0); +assertTrue(typeof(0) != "zero" || one() > 0); + +// Same with just the right operand constant. +assertFalse(one() < 0 && typeof(0) == "zero"); +assertFalse(one() < 0 && typeof(0) != "zero"); +assertFalse(one() > 0 && typeof(0) == "zero"); +assertTrue(one() > 0 && typeof(0) != "zero"); + +assertFalse(one() < 0 || typeof(0) == "zero"); +assertTrue(one() < 0 || typeof(0) != "zero"); +assertTrue(one() > 0 || typeof(0) == "zero"); +assertTrue(one() > 0 || typeof(0) != "zero"); diff --git a/deps/v8/test/mjsunit/stack-traces.js b/deps/v8/test/mjsunit/stack-traces.js index e457ece3c5..3bb5755aed 100644 --- a/deps/v8/test/mjsunit/stack-traces.js +++ b/deps/v8/test/mjsunit/stack-traces.js @@ -84,9 +84,26 @@ function testAnonymousMethod() { (function () { FAIL }).call([1, 2, 3]); } +function CustomError(message, stripPoint) { + this.message = message; + Error.captureStackTrace(this, stripPoint); +} + +CustomError.prototype.toString = function () { + return "CustomError: " + this.message; +}; + +function testDefaultCustomError() { + throw new CustomError("hep-hey", undefined); +} + +function testStrippedCustomError() { + throw new CustomError("hep-hey", CustomError); +} + // Utility function for testing that the expected strings occur // in the stack trace produced when running the given function. -function testTrace(fun, expected) { +function testTrace(fun, expected, unexpected) { var threw = false; try { fun(); @@ -94,6 +111,11 @@ function testTrace(fun, expected) { for (var i = 0; i < expected.length; i++) { assertTrue(e.stack.indexOf(expected[i]) != -1); } + if (unexpected) { + for (var i = 0; i < unexpected.length; i++) { + assertEquals(e.stack.indexOf(unexpected[i]), -1); + } + } threw = true; } assertTrue(threw); @@ -165,6 +187,10 @@ testTrace(testValue, ["at Number.causeError"]); testTrace(testConstructor, ["new Plonk"]); testTrace(testRenamedMethod, ["Wookie.a$b$c$d [as d]"]); testTrace(testAnonymousMethod, ["Array.<anonymous>"]); +testTrace(testDefaultCustomError, ["hep-hey", "new CustomError"], + ["collectStackTrace"]); +testTrace(testStrippedCustomError, ["hep-hey"], ["new CustomError", + "collectStackTrace"]); testCallerCensorship(); testUnintendedCallerCensorship(); diff --git a/deps/v8/test/mjsunit/tools/codemap.js b/deps/v8/test/mjsunit/tools/codemap.js index 55b8758835..06a91e8102 100644 --- a/deps/v8/test/mjsunit/tools/codemap.js +++ b/deps/v8/test/mjsunit/tools/codemap.js @@ -46,11 +46,11 @@ function assertNoEntry(codeMap, addr) { }; -(function testStaticCode() { +(function testLibrariesAndStaticCode() { var codeMap = new devtools.profiler.CodeMap(); - codeMap.addStaticCode(0x1500, newCodeEntry(0x3000, 'lib1')); - codeMap.addStaticCode(0x15500, newCodeEntry(0x5000, 'lib2')); - codeMap.addStaticCode(0x155500, newCodeEntry(0x10000, 'lib3')); + codeMap.addLibrary(0x1500, newCodeEntry(0x3000, 'lib1')); + codeMap.addLibrary(0x15500, newCodeEntry(0x5000, 'lib2')); + codeMap.addLibrary(0x155500, newCodeEntry(0x10000, 'lib3')); assertNoEntry(codeMap, 0); assertNoEntry(codeMap, 0x1500 - 1); assertEntry(codeMap, 'lib1', 0x1500); @@ -71,6 +71,28 @@ function assertNoEntry(codeMap, addr) { assertEntry(codeMap, 'lib3', 0x155500 + 0x10000 - 1); assertNoEntry(codeMap, 0x155500 + 0x10000); assertNoEntry(codeMap, 0xFFFFFFFF); + + codeMap.addStaticCode(0x1510, newCodeEntry(0x30, 'lib1-f1')); + codeMap.addStaticCode(0x1600, newCodeEntry(0x50, 'lib1-f2')); + codeMap.addStaticCode(0x15520, newCodeEntry(0x100, 'lib2-f1')); + assertEntry(codeMap, 'lib1', 0x1500); + assertEntry(codeMap, 'lib1', 0x1510 - 1); + assertEntry(codeMap, 'lib1-f1', 0x1510); + assertEntry(codeMap, 'lib1-f1', 0x1510 + 0x15); + assertEntry(codeMap, 'lib1-f1', 0x1510 + 0x30 - 1); + assertEntry(codeMap, 'lib1', 0x1510 + 0x30); + assertEntry(codeMap, 'lib1', 0x1600 - 1); + assertEntry(codeMap, 'lib1-f2', 0x1600); + assertEntry(codeMap, 'lib1-f2', 0x1600 + 0x30); + assertEntry(codeMap, 'lib1-f2', 0x1600 + 0x50 - 1); + assertEntry(codeMap, 'lib1', 0x1600 + 0x50); + assertEntry(codeMap, 'lib2', 0x15500); + assertEntry(codeMap, 'lib2', 0x15520 - 1); + assertEntry(codeMap, 'lib2-f1', 0x15520); + assertEntry(codeMap, 'lib2-f1', 0x15520 + 0x80); + assertEntry(codeMap, 'lib2-f1', 0x15520 + 0x100 - 1); + assertEntry(codeMap, 'lib2', 0x15520 + 0x100); + })(); diff --git a/deps/v8/test/mjsunit/tools/profile.js b/deps/v8/test/mjsunit/tools/profile.js index 49eef3b0aa..9ed851b1af 100644 --- a/deps/v8/test/mjsunit/tools/profile.js +++ b/deps/v8/test/mjsunit/tools/profile.js @@ -72,10 +72,10 @@ ProfileTestDriver.prototype.funcAddrs_ = { ProfileTestDriver.prototype.addFunctions_ = function() { - this.profile.addStaticCode('lib1', 0x11000, 0x12000); + this.profile.addLibrary('lib1', 0x11000, 0x12000); this.profile.addStaticCode('lib1-f1', 0x11100, 0x11900); this.profile.addStaticCode('lib1-f2', 0x11200, 0x11500); - this.profile.addStaticCode('lib2', 0x21000, 0x22000); + this.profile.addLibrary('lib2', 0x21000, 0x22000); this.profile.addStaticCode('lib2-f1', 0x21100, 0x21900); this.profile.addStaticCode('lib2-f2', 0x21200, 0x21500); this.profile.addCode('T', 'F1', 0x50100, 0x100); diff --git a/deps/v8/test/mjsunit/tools/tickprocessor-test.default b/deps/v8/test/mjsunit/tools/tickprocessor-test.default index a689ea8c38..702f4bcae8 100644 --- a/deps/v8/test/mjsunit/tools/tickprocessor-test.default +++ b/deps/v8/test/mjsunit/tools/tickprocessor-test.default @@ -6,20 +6,19 @@ Statistical profiling result from v8.log, (13 ticks, 2 unaccounted, 0 excluded). [Shared libraries]: ticks total nonlib name - 2 15.4% 0.0% /lib32/libm-2.7.so + 3 23.1% 0.0% /lib32/libm-2.7.so 1 7.7% 0.0% ffffe000-fffff000 [JavaScript]: ticks total nonlib name - 1 7.7% 10.0% LazyCompile: exp native math.js:41 + 1 7.7% 11.1% LazyCompile: exp native math.js:41 [C++]: ticks total nonlib name - 2 15.4% 20.0% v8::internal::Runtime_Math_exp(v8::internal::Arguments) - 1 7.7% 10.0% v8::internal::JSObject::LocalLookupRealNamedProperty(v8::internal::String*, v8::internal::LookupResult*) - 1 7.7% 10.0% v8::internal::HashTable<v8::internal::StringDictionaryShape, v8::internal::String*>::FindEntry(v8::internal::String*) - 1 7.7% 10.0% fegetexcept - 1 7.7% 10.0% exp + 2 15.4% 22.2% v8::internal::Runtime_Math_exp(v8::internal::Arguments) + 1 7.7% 11.1% v8::internal::JSObject::LocalLookupRealNamedProperty(v8::internal::String*, v8::internal::LookupResult*) + 1 7.7% 11.1% v8::internal::HashTable<v8::internal::StringDictionaryShape, v8::internal::String*>::FindEntry(v8::internal::String*) + 1 7.7% 11.1% exp [GC]: ticks total nonlib name @@ -31,11 +30,11 @@ Statistical profiling result from v8.log, (13 ticks, 2 unaccounted, 0 excluded). Callers occupying less than 2.0% are not shown. ticks parent name - 2 15.4% v8::internal::Runtime_Math_exp(v8::internal::Arguments) - 2 100.0% LazyCompile: exp native math.js:41 - 2 100.0% Script: exp.js + 3 23.1% /lib32/libm-2.7.so + 3 100.0% LazyCompile: exp native math.js:41 + 3 100.0% Script: exp.js - 2 15.4% /lib32/libm-2.7.so + 2 15.4% v8::internal::Runtime_Math_exp(v8::internal::Arguments) 2 100.0% LazyCompile: exp native math.js:41 2 100.0% Script: exp.js @@ -47,10 +46,6 @@ Statistical profiling result from v8.log, (13 ticks, 2 unaccounted, 0 excluded). 1 7.7% ffffe000-fffff000 - 1 7.7% fegetexcept - 1 100.0% LazyCompile: exp native math.js:41 - 1 100.0% Script: exp.js - 1 7.7% exp 1 100.0% LazyCompile: exp native math.js:41 1 100.0% Script: exp.js diff --git a/deps/v8/test/mjsunit/tools/tickprocessor-test.ignore-unknown b/deps/v8/test/mjsunit/tools/tickprocessor-test.ignore-unknown index 87beb08427..306d646c1a 100644 --- a/deps/v8/test/mjsunit/tools/tickprocessor-test.ignore-unknown +++ b/deps/v8/test/mjsunit/tools/tickprocessor-test.ignore-unknown @@ -2,20 +2,19 @@ Statistical profiling result from v8.log, (13 ticks, 2 unaccounted, 0 excluded). [Shared libraries]: ticks total nonlib name - 2 18.2% 0.0% /lib32/libm-2.7.so + 3 27.3% 0.0% /lib32/libm-2.7.so 1 9.1% 0.0% ffffe000-fffff000 [JavaScript]: ticks total nonlib name - 1 9.1% 12.5% LazyCompile: exp native math.js:41 + 1 9.1% 14.3% LazyCompile: exp native math.js:41 [C++]: ticks total nonlib name - 2 18.2% 25.0% v8::internal::Runtime_Math_exp(v8::internal::Arguments) - 1 9.1% 12.5% v8::internal::JSObject::LocalLookupRealNamedProperty(v8::internal::String*, v8::internal::LookupResult*) - 1 9.1% 12.5% v8::internal::HashTable<v8::internal::StringDictionaryShape, v8::internal::String*>::FindEntry(v8::internal::String*) - 1 9.1% 12.5% fegetexcept - 1 9.1% 12.5% exp + 2 18.2% 28.6% v8::internal::Runtime_Math_exp(v8::internal::Arguments) + 1 9.1% 14.3% v8::internal::JSObject::LocalLookupRealNamedProperty(v8::internal::String*, v8::internal::LookupResult*) + 1 9.1% 14.3% v8::internal::HashTable<v8::internal::StringDictionaryShape, v8::internal::String*>::FindEntry(v8::internal::String*) + 1 9.1% 14.3% exp [GC]: ticks total nonlib name @@ -27,11 +26,11 @@ Statistical profiling result from v8.log, (13 ticks, 2 unaccounted, 0 excluded). Callers occupying less than 2.0% are not shown. ticks parent name - 2 18.2% v8::internal::Runtime_Math_exp(v8::internal::Arguments) - 2 100.0% LazyCompile: exp native math.js:41 - 2 100.0% Script: exp.js + 3 27.3% /lib32/libm-2.7.so + 3 100.0% LazyCompile: exp native math.js:41 + 3 100.0% Script: exp.js - 2 18.2% /lib32/libm-2.7.so + 2 18.2% v8::internal::Runtime_Math_exp(v8::internal::Arguments) 2 100.0% LazyCompile: exp native math.js:41 2 100.0% Script: exp.js @@ -43,10 +42,6 @@ Statistical profiling result from v8.log, (13 ticks, 2 unaccounted, 0 excluded). 1 9.1% ffffe000-fffff000 - 1 9.1% fegetexcept - 1 100.0% LazyCompile: exp native math.js:41 - 1 100.0% Script: exp.js - 1 9.1% exp 1 100.0% LazyCompile: exp native math.js:41 1 100.0% Script: exp.js diff --git a/deps/v8/test/mjsunit/tools/tickprocessor-test.separate-ic b/deps/v8/test/mjsunit/tools/tickprocessor-test.separate-ic index 7eb3d9a7ae..3a2041b52f 100644 --- a/deps/v8/test/mjsunit/tools/tickprocessor-test.separate-ic +++ b/deps/v8/test/mjsunit/tools/tickprocessor-test.separate-ic @@ -6,22 +6,21 @@ Statistical profiling result from v8.log, (13 ticks, 2 unaccounted, 0 excluded). [Shared libraries]: ticks total nonlib name - 2 15.4% 0.0% /lib32/libm-2.7.so + 3 23.1% 0.0% /lib32/libm-2.7.so 1 7.7% 0.0% ffffe000-fffff000 [JavaScript]: ticks total nonlib name - 1 7.7% 10.0% LoadIC: j - 1 7.7% 10.0% LoadIC: i - 1 7.7% 10.0% LazyCompile: exp native math.js:41 + 1 7.7% 11.1% LoadIC: j + 1 7.7% 11.1% LoadIC: i + 1 7.7% 11.1% LazyCompile: exp native math.js:41 [C++]: ticks total nonlib name - 2 15.4% 20.0% v8::internal::Runtime_Math_exp(v8::internal::Arguments) - 1 7.7% 10.0% v8::internal::JSObject::LocalLookupRealNamedProperty(v8::internal::String*, v8::internal::LookupResult*) - 1 7.7% 10.0% v8::internal::HashTable<v8::internal::StringDictionaryShape, v8::internal::String*>::FindEntry(v8::internal::String*) - 1 7.7% 10.0% fegetexcept - 1 7.7% 10.0% exp + 2 15.4% 22.2% v8::internal::Runtime_Math_exp(v8::internal::Arguments) + 1 7.7% 11.1% v8::internal::JSObject::LocalLookupRealNamedProperty(v8::internal::String*, v8::internal::LookupResult*) + 1 7.7% 11.1% v8::internal::HashTable<v8::internal::StringDictionaryShape, v8::internal::String*>::FindEntry(v8::internal::String*) + 1 7.7% 11.1% exp [GC]: ticks total nonlib name @@ -33,11 +32,11 @@ Statistical profiling result from v8.log, (13 ticks, 2 unaccounted, 0 excluded). Callers occupying less than 2.0% are not shown. ticks parent name - 2 15.4% v8::internal::Runtime_Math_exp(v8::internal::Arguments) - 2 100.0% LazyCompile: exp native math.js:41 - 2 100.0% Script: exp.js + 3 23.1% /lib32/libm-2.7.so + 3 100.0% LazyCompile: exp native math.js:41 + 3 100.0% Script: exp.js - 2 15.4% /lib32/libm-2.7.so + 2 15.4% v8::internal::Runtime_Math_exp(v8::internal::Arguments) 2 100.0% LazyCompile: exp native math.js:41 2 100.0% Script: exp.js @@ -49,10 +48,6 @@ Statistical profiling result from v8.log, (13 ticks, 2 unaccounted, 0 excluded). 1 7.7% ffffe000-fffff000 - 1 7.7% fegetexcept - 1 100.0% LazyCompile: exp native math.js:41 - 1 100.0% Script: exp.js - 1 7.7% exp 1 100.0% LazyCompile: exp native math.js:41 1 100.0% Script: exp.js diff --git a/deps/v8/test/mjsunit/tools/tickprocessor.js b/deps/v8/test/mjsunit/tools/tickprocessor.js index 587106ac0a..00c3fb176b 100644 --- a/deps/v8/test/mjsunit/tools/tickprocessor.js +++ b/deps/v8/test/mjsunit/tools/tickprocessor.js @@ -31,6 +31,7 @@ // Files: tools/logreader.js tools/tickprocessor.js // Env: TEST_FILE_NAME + (function testArgumentsProcessor() { var p_default = new ArgumentsProcessor([]); assertTrue(p_default.parse()); @@ -69,12 +70,12 @@ ' U operator delete[](void*)@@GLIBCXX_3.4', '08049790 T _init', '08049f50 T _start', - '08139150 t v8::internal::Runtime_StringReplaceRegExpWithString(v8::internal::Arguments)', - '08139ca0 T v8::internal::Runtime::GetElementOrCharAt(v8::internal::Handle<v8::internal::Object>, unsigned int)', - '0813a0b0 t v8::internal::Runtime_DebugGetPropertyDetails(v8::internal::Arguments)', - '08181d30 W v8::internal::RegExpMacroAssemblerIrregexp::stack_limit_slack()', + '08139150 00000b4b t v8::internal::Runtime_StringReplaceRegExpWithString(v8::internal::Arguments)', + '08139ca0 000003f1 T v8::internal::Runtime::GetElementOrCharAt(v8::internal::Handle<v8::internal::Object>, unsigned int)', + '0813a0b0 00000855 t v8::internal::Runtime_DebugGetPropertyDetails(v8::internal::Arguments)', + '0818b220 00000036 W v8::internal::RegExpMacroAssembler::CheckPosition(int, v8::internal::Label*)', ' w __gmon_start__', - '081f08a0 B stdout' + '081f08a0 00000004 B stdout\n' ].join('\n'), '']; }; @@ -87,22 +88,22 @@ assertEquals( [['_init', 0x08049790, 0x08049f50], ['_start', 0x08049f50, 0x08139150], - ['v8::internal::Runtime_StringReplaceRegExpWithString(v8::internal::Arguments)', 0x08139150, 0x08139ca0], - ['v8::internal::Runtime::GetElementOrCharAt(v8::internal::Handle<v8::internal::Object>, unsigned int)', 0x08139ca0, 0x0813a0b0], - ['v8::internal::Runtime_DebugGetPropertyDetails(v8::internal::Arguments)', 0x0813a0b0, 0x08181d30], - ['v8::internal::RegExpMacroAssemblerIrregexp::stack_limit_slack()', 0x08181d30, 0x081ee000]], + ['v8::internal::Runtime_StringReplaceRegExpWithString(v8::internal::Arguments)', 0x08139150, 0x08139150 + 0xb4b], + ['v8::internal::Runtime::GetElementOrCharAt(v8::internal::Handle<v8::internal::Object>, unsigned int)', 0x08139ca0, 0x08139ca0 + 0x3f1], + ['v8::internal::Runtime_DebugGetPropertyDetails(v8::internal::Arguments)', 0x0813a0b0, 0x0813a0b0 + 0x855], + ['v8::internal::RegExpMacroAssembler::CheckPosition(int, v8::internal::Label*)', 0x0818b220, 0x0818b220 + 0x36]], shell_syms); // libc library UnixCppEntriesProvider.prototype.loadSymbols = function(libName) { this.symbols = [[ - '000162a0 T __libc_init_first', - '0002a5f0 T __isnan', - '0002a5f0 W isnan', - '0002aaa0 W scalblnf', - '0002aaa0 W scalbnf', - '0011a340 T __libc_thread_freeres', - '00128860 R _itoa_lower_digits'].join('\n'), '']; + '000162a0 00000005 T __libc_init_first', + '0002a5f0 0000002d T __isnan', + '0002a5f0 0000002d W isnan', + '0002aaa0 0000000d W scalblnf', + '0002aaa0 0000000d W scalbnf', + '0011a340 00000048 T __libc_thread_freeres', + '00128860 00000024 R _itoa_lower_digits\n'].join('\n'), '']; }; var libc_prov = new UnixCppEntriesProvider(); var libc_syms = []; @@ -110,17 +111,81 @@ function (name, start, end) { libc_syms.push(Array.prototype.slice.apply(arguments, [0])); }); - assertEquals( - [['__libc_init_first', 0xf7c5c000 + 0x000162a0, 0xf7c5c000 + 0x0002a5f0], - ['isnan', 0xf7c5c000 + 0x0002a5f0, 0xf7c5c000 + 0x0002aaa0], - ['scalbnf', 0xf7c5c000 + 0x0002aaa0, 0xf7c5c000 + 0x0011a340], - ['__libc_thread_freeres', 0xf7c5c000 + 0x0011a340, 0xf7da5000]], - libc_syms); + var libc_ref_syms = [['__libc_init_first', 0x000162a0, 0x000162a0 + 0x5], + ['__isnan', 0x0002a5f0, 0x0002a5f0 + 0x2d], + ['scalblnf', 0x0002aaa0, 0x0002aaa0 + 0xd], + ['__libc_thread_freeres', 0x0011a340, 0x0011a340 + 0x48]]; + for (var i = 0; i < libc_ref_syms.length; ++i) { + libc_ref_syms[i][1] += 0xf7c5c000; + libc_ref_syms[i][2] += 0xf7c5c000; + } + assertEquals(libc_ref_syms, libc_syms); UnixCppEntriesProvider.prototype.loadSymbols = oldLoadSymbols; })(); +(function testMacCppEntriesProvider() { + var oldLoadSymbols = MacCppEntriesProvider.prototype.loadSymbols; + + // shell executable + MacCppEntriesProvider.prototype.loadSymbols = function(libName) { + this.symbols = [[ + ' U operator delete[]', + '00001000 A __mh_execute_header', + '00001b00 T start', + '00001b40 t dyld_stub_binding_helper', + '0011b710 T v8::internal::RegExpMacroAssembler::CheckPosition', + '00134250 t v8::internal::Runtime_StringReplaceRegExpWithString', + '00137220 T v8::internal::Runtime::GetElementOrCharAt', + '00137400 t v8::internal::Runtime_DebugGetPropertyDetails', + '001c1a80 b _private_mem\n' + ].join('\n'), '']; + }; + + var shell_prov = new MacCppEntriesProvider(); + var shell_syms = []; + shell_prov.parseVmSymbols('shell', 0x00001b00, 0x00163156, + function (name, start, end) { + shell_syms.push(Array.prototype.slice.apply(arguments, [0])); + }); + assertEquals( + [['start', 0x00001b00, 0x00001b40], + ['dyld_stub_binding_helper', 0x00001b40, 0x0011b710], + ['v8::internal::RegExpMacroAssembler::CheckPosition', 0x0011b710, 0x00134250], + ['v8::internal::Runtime_StringReplaceRegExpWithString', 0x00134250, 0x00137220], + ['v8::internal::Runtime::GetElementOrCharAt', 0x00137220, 0x00137400], + ['v8::internal::Runtime_DebugGetPropertyDetails', 0x00137400, 0x00163156]], + shell_syms); + + // stdc++ library + MacCppEntriesProvider.prototype.loadSymbols = function(libName) { + this.symbols = [[ + '0000107a T __gnu_cxx::balloc::__mini_vector<std::pair<__gnu_cxx::bitmap_allocator<char>::_Alloc_block*, __gnu_cxx::bitmap_allocator<char>::_Alloc_block*> >::__mini_vector', + '0002c410 T std::basic_streambuf<char, std::char_traits<char> >::pubseekoff', + '0002c488 T std::basic_streambuf<char, std::char_traits<char> >::pubseekpos', + '000466aa T ___cxa_pure_virtual\n'].join('\n'), '']; + }; + var stdc_prov = new MacCppEntriesProvider(); + var stdc_syms = []; + stdc_prov.parseVmSymbols('stdc++', 0x95728fb4, 0x95770005, + function (name, start, end) { + stdc_syms.push(Array.prototype.slice.apply(arguments, [0])); + }); + var stdc_ref_syms = [['__gnu_cxx::balloc::__mini_vector<std::pair<__gnu_cxx::bitmap_allocator<char>::_Alloc_block*, __gnu_cxx::bitmap_allocator<char>::_Alloc_block*> >::__mini_vector', 0x0000107a, 0x0002c410], + ['std::basic_streambuf<char, std::char_traits<char> >::pubseekoff', 0x0002c410, 0x0002c488], + ['std::basic_streambuf<char, std::char_traits<char> >::pubseekpos', 0x0002c488, 0x000466aa], + ['___cxa_pure_virtual', 0x000466aa, 0x95770005 - 0x95728fb4]]; + for (var i = 0; i < stdc_ref_syms.length; ++i) { + stdc_ref_syms[i][1] += 0x95728fb4; + stdc_ref_syms[i][2] += 0x95728fb4; + } + assertEquals(stdc_ref_syms, stdc_syms); + + MacCppEntriesProvider.prototype.loadSymbols = oldLoadSymbols; +})(); + + (function testWindowsCppEntriesProvider() { var oldLoadSymbols = WindowsCppEntriesProvider.prototype.loadSymbols; @@ -174,8 +239,8 @@ CppEntriesProviderMock.prototype.parseVmSymbols = function( ['v8::internal::HashTable<v8::internal::StringDictionaryShape, v8::internal::String*>::FindEntry(v8::internal::String*)', 0x080f8210, 0x080f8800], ['v8::internal::Runtime_Math_exp(v8::internal::Arguments)', 0x08123b20, 0x08123b80]], '/lib32/libm-2.7.so': - [['exp', startAddr + 0x00009e80, startAddr + 0x00009f30], - ['fegetexcept', startAddr + 0x000061e0, startAddr + 0x00008b10]], + [['exp', startAddr + 0x00009e80, startAddr + 0x00009e80 + 0xa3], + ['fegetexcept', startAddr + 0x000061e0, startAddr + 0x000061e0 + 0x15]], 'ffffe000-fffff000': []}; assertTrue(name in symbols); var syms = symbols[name]; @@ -191,6 +256,7 @@ function PrintMonitor(outputOrFileName) { var outputPos = 0; var diffs = this.diffs = []; var realOut = this.realOut = []; + var unexpectedOut = this.unexpectedOut = null; this.oldPrint = print; print = function(str) { @@ -198,13 +264,15 @@ function PrintMonitor(outputOrFileName) { for (var i = 0; i < strSplit.length; ++i) { s = strSplit[i]; realOut.push(s); - assertTrue(outputPos < expectedOut.length, - 'unexpected output: "' + s + '"'); - if (expectedOut[outputPos] != s) { - diffs.push('line ' + outputPos + ': expected <' + - expectedOut[outputPos] + '> found <' + s + '>\n'); + if (outputPos < expectedOut.length) { + if (expectedOut[outputPos] != s) { + diffs.push('line ' + outputPos + ': expected <' + + expectedOut[outputPos] + '> found <' + s + '>\n'); + } + outputPos++; + } else { + unexpectedOut = true; } - outputPos++; } }; }; @@ -218,9 +286,10 @@ PrintMonitor.prototype.loadExpectedOutput = function(fileName) { PrintMonitor.prototype.finish = function() { print = this.oldPrint; - if (this.diffs.length > 0) { + if (this.diffs.length > 0 || this.unexpectedOut != null) { print(this.realOut.join('\n')); assertEquals([], this.diffs); + assertNull(this.unexpectedOut); } }; diff --git a/deps/v8/test/mozilla/mozilla.status b/deps/v8/test/mozilla/mozilla.status index 13ae29c8e4..538b0a8144 100644 --- a/deps/v8/test/mozilla/mozilla.status +++ b/deps/v8/test/mozilla/mozilla.status @@ -321,10 +321,6 @@ js1_5/Regress/regress-179524: FAIL_OK js1_5/Regress/regress-172699: FAIL_OK -# Calls regexp objects with function call syntax; non-ECMA behavior. -js1_2/Objects/toString-001: FAIL_OK - - # Assumes that the prototype of a function is enumerable. Non-ECMA, # see section 15.3.3.1, page 86. ecma/GlobalObject/15.1.2.2-1: FAIL_OK @@ -338,6 +334,7 @@ ecma/GlobalObject/15.1.2.7: FAIL_OK # Tests that rely on specific details of function decompilation or # print strings for errors. Non-ECMA behavior. js1_2/function/tostring-2: FAIL_OK +js1_2/Objects/toString-001: FAIL_OK js1_5/Exceptions/regress-332472: FAIL_OK js1_5/Regress/regress-173067: FAIL_OK js1_5/Regress/regress-355556: FAIL_OK @@ -561,23 +558,11 @@ js1_5/Array/regress-350256-02: FAIL ecma_3/Function/regress-137181: FAIL -# Calls regexp objects with function call syntax; non-ECMA behavior. -ecma_2/RegExp/regress-001: FAIL -js1_2/regexp/regress-6359: FAIL -js1_2/regexp/regress-9141: FAIL -js1_5/Regress/regress-224956: FAIL -js1_5/Regress/regress-325925: FAIL -js1_2/regexp/simple_form: FAIL - - # Tests that rely on specific details of function decompilation or # print strings for errors. Non-ECMA behavior. js1_4/Regress/function-003: FAIL -# Relies on JavaScript 1.2 / 1.3 deprecated features. -js1_2/function/regexparg-1: FAIL - # 'export' and 'import' are not keywords in V8. ecma_2/Exceptions/lexical-010: FAIL ecma_2/Exceptions/lexical-022: FAIL diff --git a/deps/v8/tools/codemap.js b/deps/v8/tools/codemap.js index d6df7fa969..404127f236 100644 --- a/deps/v8/tools/codemap.js +++ b/deps/v8/tools/codemap.js @@ -48,11 +48,16 @@ devtools.profiler.CodeMap = function() { this.dynamicsNameGen_ = new devtools.profiler.CodeMap.NameGenerator(); /** - * Static code entries. Used for libraries code. + * Static code entries. Used for statically compiled code. */ this.statics_ = new goog.structs.SplayTree(); /** + * Libraries entries. Used for the whole static code libraries. + */ + this.libraries_ = new goog.structs.SplayTree(); + + /** * Map of memory pages occupied with static code. */ this.pages_ = []; @@ -108,6 +113,19 @@ devtools.profiler.CodeMap.prototype.deleteCode = function(start) { /** + * Adds a library entry. + * + * @param {number} start The starting address. + * @param {devtools.profiler.CodeMap.CodeEntry} codeEntry Code entry object. + */ +devtools.profiler.CodeMap.prototype.addLibrary = function( + start, codeEntry) { + this.markPages_(start, start + codeEntry.size); + this.libraries_.insert(start, codeEntry); +}; + + +/** * Adds a static code entry. * * @param {number} start The starting address. @@ -115,7 +133,6 @@ devtools.profiler.CodeMap.prototype.deleteCode = function(start) { */ devtools.profiler.CodeMap.prototype.addStaticCode = function( start, codeEntry) { - this.markPages_(start, start + codeEntry.size); this.statics_.insert(start, codeEntry); }; @@ -157,7 +174,10 @@ devtools.profiler.CodeMap.prototype.findInTree_ = function(tree, addr) { devtools.profiler.CodeMap.prototype.findEntry = function(addr) { var pageAddr = addr >>> devtools.profiler.CodeMap.PAGE_ALIGNMENT; if (pageAddr in this.pages_) { - return this.findInTree_(this.statics_, addr); + // Static code entries can contain "holes" of unnamed code. + // In this case, the whole library is assigned to this address. + return this.findInTree_(this.statics_, addr) || + this.findInTree_(this.libraries_, addr); } var min = this.dynamics_.findMin(); var max = this.dynamics_.findMax(); @@ -176,7 +196,7 @@ devtools.profiler.CodeMap.prototype.findEntry = function(addr) { /** - * Returns an array of all dynamic code entries, including deleted ones. + * Returns an array of all dynamic code entries. */ devtools.profiler.CodeMap.prototype.getAllDynamicEntries = function() { return this.dynamics_.exportValues(); @@ -192,6 +212,14 @@ devtools.profiler.CodeMap.prototype.getAllStaticEntries = function() { /** + * Returns an array of all libraries entries. + */ +devtools.profiler.CodeMap.prototype.getAllLibrariesEntries = function() { + return this.libraries_.exportValues(); +}; + + +/** * Creates a code entry object. * * @param {number} size Code entry size in bytes. diff --git a/deps/v8/tools/gyp/v8.gyp b/deps/v8/tools/gyp/v8.gyp index b11a7ffaa1..837aa56ed9 100644 --- a/deps/v8/tools/gyp/v8.gyp +++ b/deps/v8/tools/gyp/v8.gyp @@ -52,6 +52,11 @@ 'V8_NATIVE_REGEXP', ], }], + ['target_arch=="x64"', { + 'defines': [ + 'V8_TARGET_ARCH_X64', + ], + }], ], 'configurations': { 'Debug': { @@ -254,6 +259,7 @@ '../../src/frames-inl.h', '../../src/frames.cc', '../../src/frames.h', + '../../src/frame-element.cc', '../../src/frame-element.h', '../../src/func-name-inferrer.cc', '../../src/func-name-inferrer.h', @@ -276,7 +282,6 @@ '../../src/jump-target.cc', '../../src/jump-target.h', '../../src/jump-target-inl.h', - '../../src/jsregexp-inl.h', '../../src/jsregexp.cc', '../../src/jsregexp.h', '../../src/list-inl.h', @@ -427,6 +432,34 @@ '../../src/ia32/virtual-frame-ia32.h', ], }], + ['target_arch=="x64"', { + 'include_dirs+': [ + '../../src/x64', + ], + 'sources': [ + '../../src/x64/assembler-x64-inl.h', + '../../src/x64/assembler-x64.cc', + '../../src/x64/assembler-x64.h', + '../../src/x64/builtins-x64.cc', + '../../src/x64/codegen-x64.cc', + '../../src/x64/codegen-x64.h', + '../../src/x64/cpu-x64.cc', + '../../src/x64/debug-x64.cc', + '../../src/x64/disasm-x64.cc', + '../../src/x64/frames-x64.cc', + '../../src/x64/frames-x64.h', + '../../src/x64/ic-x64.cc', + '../../src/x64/jump-target-x64.cc', + '../../src/x64/macro-assembler-x64.cc', + '../../src/x64/macro-assembler-x64.h', + #'../../src/x64/regexp-macro-assembler-x64.cc', + #'../../src/x64/regexp-macro-assembler-x64.h', + '../../src/x64/register-allocator-x64.cc', + '../../src/x64/stub-cache-x64.cc', + '../../src/x64/virtual-frame-x64.cc', + '../../src/x64/virtual-frame-x64.h', + ], + }], ['OS=="linux"', { 'link_settings': { 'libraries': [ diff --git a/deps/v8/tools/mac-nm b/deps/v8/tools/mac-nm new file mode 100755 index 0000000000..9c18177978 --- /dev/null +++ b/deps/v8/tools/mac-nm @@ -0,0 +1,18 @@ +#!/bin/sh + +# This script is a wrapper for OS X nm(1) tool. nm(1) perform C++ function +# names demangling, so we're piping its output to c++filt(1) tool which does it. +# But c++filt(1) comes with XCode (as a part of GNU binutils), so it doesn't +# guaranteed to exist on a system. +# +# An alternative approach is to perform demangling in tick processor, but +# for GNU C++ ABI this is a complex process (see cp-demangle.c sources), and +# can't be done partially, because term boundaries are plain text symbols, such +# as 'N', 'E', so one can't just do a search through a function name, it really +# needs to be parsed, which requires a lot of knowledge to be coded in. + +if [ "`which c++filt`" == "" ]; then + nm $@ +else + nm $@ | c++filt -p -i +fi diff --git a/deps/v8/tools/mac-tick-processor b/deps/v8/tools/mac-tick-processor new file mode 100755 index 0000000000..5fba622c9a --- /dev/null +++ b/deps/v8/tools/mac-tick-processor @@ -0,0 +1,6 @@ +#!/bin/sh + +# A wrapper script to call 'linux-tick-processor' with Mac-specific settings. + +tools_path=`cd $(dirname "$0");pwd` +$tools_path/linux-tick-processor --mac --nm=$tools_path/mac-nm $@ diff --git a/deps/v8/tools/process-heap-prof.py b/deps/v8/tools/process-heap-prof.py new file mode 100755 index 0000000000..ff83952e0e --- /dev/null +++ b/deps/v8/tools/process-heap-prof.py @@ -0,0 +1,80 @@ +#!/usr/bin/env python +# +# Copyright 2009 the V8 project authors. All rights reserved. +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# This is an utility for converting V8 heap logs into .hp files that can +# be further processed using 'hp2ps' tool (bundled with GHC and Valgrind) +# to produce heap usage histograms. + +# Sample usage: +# $ ./shell --log-gc script.js +# $ tools/process-heap-prof.py v8.log | hp2ps -c > script-heap-graph.ps +# ('-c' enables color, see hp2ps manual page for more options) +# or +# $ tools/process-heap-prof.py --js-cons-profile v8.log | hp2ps -c > script-heap-graph.ps +# to get JS constructor profile + + +import csv, sys, time + +def process_logfile(filename, itemname): + first_call_time = None + sample_time = 0.0 + sampling = False + try: + logfile = open(filename, 'rb') + try: + logreader = csv.reader(logfile) + + print('JOB "v8"') + print('DATE "%s"' % time.asctime(time.localtime())) + print('SAMPLE_UNIT "seconds"') + print('VALUE_UNIT "bytes"') + + for row in logreader: + if row[0] == 'heap-sample-begin' and row[1] == 'Heap': + sample_time = float(row[3])/1000.0 + if first_call_time == None: + first_call_time = sample_time + sample_time -= first_call_time + print('BEGIN_SAMPLE %.2f' % sample_time) + sampling = True + elif row[0] == 'heap-sample-end' and row[1] == 'Heap': + print('END_SAMPLE %.2f' % sample_time) + sampling = False + elif row[0] == itemname and sampling: + print('%s %d' % (row[1], int(row[3]))) + finally: + logfile.close() + except: + sys.exit('can\'t open %s' % filename) + +if sys.argv[1] == '--js-cons-profile': + process_logfile(sys.argv[2], 'heap-js-cons-item') +else: + process_logfile(sys.argv[1], 'heap-sample-item') diff --git a/deps/v8/tools/profile.js b/deps/v8/tools/profile.js index 614c635576..db4b542ff2 100644 --- a/deps/v8/tools/profile.js +++ b/deps/v8/tools/profile.js @@ -86,7 +86,23 @@ devtools.profiler.Profile.prototype.handleUnknownCode = function( /** - * Registers static (library) code entry. + * Registers a library. + * + * @param {string} name Code entry name. + * @param {number} startAddr Starting address. + * @param {number} endAddr Ending address. + */ +devtools.profiler.Profile.prototype.addLibrary = function( + name, startAddr, endAddr) { + var entry = new devtools.profiler.CodeMap.CodeEntry( + endAddr - startAddr, name); + this.codeMap_.addLibrary(startAddr, entry); + return entry; +}; + + +/** + * Registers statically compiled code entry. * * @param {string} name Code entry name. * @param {number} startAddr Starting address. diff --git a/deps/v8/tools/test.py b/deps/v8/tools/test.py index 05eb9fdc00..c1b8b80f66 100755 --- a/deps/v8/tools/test.py +++ b/deps/v8/tools/test.py @@ -1136,6 +1136,7 @@ def ProcessOptions(options): # was found, set the arch to the guess. if options.arch == 'none': options.arch = ARCH_GUESS + options.scons_flags.append("arch=" + options.arch) return True diff --git a/deps/v8/tools/tickprocessor-driver.js b/deps/v8/tools/tickprocessor-driver.js index f7cfd13f09..dc6779607a 100644 --- a/deps/v8/tools/tickprocessor-driver.js +++ b/deps/v8/tools/tickprocessor-driver.js @@ -37,11 +37,15 @@ function processArguments(args) { } } +var entriesProviders = { + 'unix': UnixCppEntriesProvider, + 'windows': WindowsCppEntriesProvider, + 'mac': MacCppEntriesProvider +}; var params = processArguments(arguments); var tickProcessor = new TickProcessor( - params.platform == 'unix' ? new UnixCppEntriesProvider(params.nm) : - new WindowsCppEntriesProvider(), + new (entriesProviders[params.platform])(params.nm), params.separateIc, params.ignoreUnknown, params.stateFilter); diff --git a/deps/v8/tools/tickprocessor.js b/deps/v8/tools/tickprocessor.js index c95a4e616e..34c6195d9a 100644 --- a/deps/v8/tools/tickprocessor.js +++ b/deps/v8/tools/tickprocessor.js @@ -174,7 +174,7 @@ TickProcessor.prototype.processLogFile = function(fileName) { TickProcessor.prototype.processSharedLibrary = function( name, startAddr, endAddr) { - var entry = this.profile_.addStaticCode(name, startAddr, endAddr); + var entry = this.profile_.addLibrary(name, startAddr, endAddr); this.setCodeType(entry.getName(), 'SHARED_LIB'); var self = this; @@ -380,14 +380,21 @@ CppEntriesProvider.prototype.parseVmSymbols = function( var prevEntry; - function addPrevEntry(end) { + function addEntry(funcInfo) { // Several functions can be mapped onto the same address. To avoid // creating zero-sized entries, skip such duplicates. // Also double-check that function belongs to the library address space. - if (prevEntry && prevEntry.start < end && - prevEntry.start >= libStart && end <= libEnd) { - processorFunc(prevEntry.name, prevEntry.start, end); + if (prevEntry && !prevEntry.end && + prevEntry.start < funcInfo.start && + prevEntry.start >= libStart && funcInfo.start <= libEnd) { + processorFunc(prevEntry.name, prevEntry.start, funcInfo.start); } + if (funcInfo.end && + (!prevEntry || prevEntry.start != funcInfo.start) && + funcInfo.start >= libStart && funcInfo.end <= libEnd) { + processorFunc(funcInfo.name, funcInfo.start, funcInfo.end); + } + prevEntry = funcInfo; } while (true) { @@ -400,10 +407,12 @@ CppEntriesProvider.prototype.parseVmSymbols = function( if (funcInfo.start < libStart && funcInfo.start < libEnd - libStart) { funcInfo.start += libStart; } - addPrevEntry(funcInfo.start); - prevEntry = funcInfo; + if (funcInfo.size) { + funcInfo.end = funcInfo.start + funcInfo.size; + } + addEntry(funcInfo); } - addPrevEntry(libEnd); + addEntry({name: '', start: libEnd}); }; @@ -420,19 +429,17 @@ function UnixCppEntriesProvider(nmExec) { this.symbols = []; this.parsePos = 0; this.nmExec = nmExec; + this.FUNC_RE = /^([0-9a-fA-F]{8,16}) ([0-9a-fA-F]{8,16} )?[tTwW] (.*)$/; }; inherits(UnixCppEntriesProvider, CppEntriesProvider); -UnixCppEntriesProvider.FUNC_RE = /^([0-9a-fA-F]{8}) [tTwW] (.*)$/; - - UnixCppEntriesProvider.prototype.loadSymbols = function(libName) { this.parsePos = 0; try { this.symbols = [ - os.system(this.nmExec, ['-C', '-n', libName], -1, -1), - os.system(this.nmExec, ['-C', '-n', '-D', libName], -1, -1) + os.system(this.nmExec, ['-C', '-n', '-S', libName], -1, -1), + os.system(this.nmExec, ['-C', '-n', '-S', '-D', libName], -1, -1) ]; } catch (e) { // If the library cannot be found on this system let's not panic. @@ -454,8 +461,34 @@ UnixCppEntriesProvider.prototype.parseNextLine = function() { var line = this.symbols[0].substring(this.parsePos, lineEndPos); this.parsePos = lineEndPos + 1; - var fields = line.match(UnixCppEntriesProvider.FUNC_RE); - return fields ? { name: fields[2], start: parseInt(fields[1], 16) } : null; + var fields = line.match(this.FUNC_RE); + var funcInfo = null; + if (fields) { + funcInfo = { name: fields[3], start: parseInt(fields[1], 16) }; + if (fields[2]) { + funcInfo.size = parseInt(fields[2], 16); + } + } + return funcInfo; +}; + + +function MacCppEntriesProvider(nmExec) { + UnixCppEntriesProvider.call(this, nmExec); + // Note an empty group. It is required, as UnixCppEntriesProvider expects 3 groups. + this.FUNC_RE = /^([0-9a-fA-F]{8}) ()[iItT] (.*)$/; +}; +inherits(MacCppEntriesProvider, UnixCppEntriesProvider); + + +MacCppEntriesProvider.prototype.loadSymbols = function(libName) { + this.parsePos = 0; + try { + this.symbols = [os.system(this.nmExec, ['-n', '-f', libName], -1, -1), '']; + } catch (e) { + // If the library cannot be found on this system let's not panic. + this.symbols = ''; + } }; @@ -538,6 +571,8 @@ function ArgumentsProcessor(args) { 'Specify that we are running on *nix platform'], '--windows': ['platform', 'windows', 'Specify that we are running on Windows platform'], + '--mac': ['platform', 'mac', + 'Specify that we are running on Mac OS X platform'], '--nm': ['nm', 'nm', 'Specify the \'nm\' executable to use (e.g. --nm=/my_dir/nm)'] }; diff --git a/deps/v8/tools/v8.xcodeproj/project.pbxproj b/deps/v8/tools/v8.xcodeproj/project.pbxproj index 368ba3f3e6..45e63612e4 100755..100644 --- a/deps/v8/tools/v8.xcodeproj/project.pbxproj +++ b/deps/v8/tools/v8.xcodeproj/project.pbxproj @@ -59,6 +59,8 @@ 896FD03A0E78D717003DFB6A /* libv8-arm.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 89F23C870E78D5B2006B2466 /* libv8-arm.a */; }; 897F767F0E71B690007ACF34 /* shell.cc in Sources */ = {isa = PBXBuildFile; fileRef = 897FF1B50E719C0900D62E90 /* shell.cc */; }; 897F76850E71B6B1007ACF34 /* libv8.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 8970F2F00E719FB2006AE7B5 /* libv8.a */; }; + 8981F6001010501900D1520E /* frame-element.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8981F5FE1010500F00D1520E /* frame-element.cc */; }; + 8981F6011010502800D1520E /* frame-element.cc in Sources */ = {isa = PBXBuildFile; fileRef = 8981F5FE1010500F00D1520E /* frame-element.cc */; }; 898BD20E0EF6CC930068B00A /* debug-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 898BD20D0EF6CC850068B00A /* debug-ia32.cc */; }; 898BD20F0EF6CC9A0068B00A /* debug-arm.cc in Sources */ = {isa = PBXBuildFile; fileRef = 898BD20C0EF6CC850068B00A /* debug-arm.cc */; }; 89A15C7B0EE466EB00B48DEB /* regexp-macro-assembler-ia32.cc in Sources */ = {isa = PBXBuildFile; fileRef = 89A15C720EE466D000B48DEB /* regexp-macro-assembler-ia32.cc */; }; @@ -503,12 +505,13 @@ 897FF1B70E719C2E00D62E90 /* macros.py */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.python; name = macros.py; path = ../src/macros.py; sourceTree = "<group>"; }; 897FF32F0FAA0ED200136CF6 /* version.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = version.cc; sourceTree = "<group>"; }; 897FF3300FAA0ED200136CF6 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = "<group>"; }; + 8981F5FE1010500F00D1520E /* frame-element.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "frame-element.cc"; sourceTree = "<group>"; }; + 8981F5FF1010500F00D1520E /* frame-element.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "frame-element.h"; sourceTree = "<group>"; }; 898BD20C0EF6CC850068B00A /* debug-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "debug-arm.cc"; path = "arm/debug-arm.cc"; sourceTree = "<group>"; }; 898BD20D0EF6CC850068B00A /* debug-ia32.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "debug-ia32.cc"; path = "ia32/debug-ia32.cc"; sourceTree = "<group>"; }; 89A15C630EE4661A00B48DEB /* bytecodes-irregexp.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "bytecodes-irregexp.h"; sourceTree = "<group>"; }; 89A15C660EE4665300B48DEB /* interpreter-irregexp.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "interpreter-irregexp.cc"; sourceTree = "<group>"; }; 89A15C670EE4665300B48DEB /* interpreter-irregexp.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "interpreter-irregexp.h"; sourceTree = "<group>"; }; - 89A15C680EE4665300B48DEB /* jsregexp-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "jsregexp-inl.h"; sourceTree = "<group>"; }; 89A15C6D0EE466A900B48DEB /* platform-freebsd.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "platform-freebsd.cc"; sourceTree = "<group>"; }; 89A15C700EE466D000B48DEB /* regexp-macro-assembler-arm.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = "regexp-macro-assembler-arm.cc"; path = "arm/regexp-macro-assembler-arm.cc"; sourceTree = "<group>"; }; 89A15C710EE466D000B48DEB /* regexp-macro-assembler-arm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "regexp-macro-assembler-arm.h"; path = "arm/regexp-macro-assembler-arm.h"; sourceTree = "<group>"; }; @@ -700,6 +703,8 @@ 89471C7F0EB23EE400B6874B /* flag-definitions.h */, 897FF1350E719B8F00D62E90 /* flags.cc */, 897FF1360E719B8F00D62E90 /* flags.h */, + 8981F5FE1010500F00D1520E /* frame-element.cc */, + 8981F5FF1010500F00D1520E /* frame-element.h */, 897FF1370E719B8F00D62E90 /* frames-arm.cc */, 897FF1380E719B8F00D62E90 /* frames-arm.h */, 897FF1390E719B8F00D62E90 /* frames-ia32.cc */, @@ -727,7 +732,6 @@ 897FF14D0E719B8F00D62E90 /* ic.h */, 89A15C660EE4665300B48DEB /* interpreter-irregexp.cc */, 89A15C670EE4665300B48DEB /* interpreter-irregexp.h */, - 89A15C680EE4665300B48DEB /* jsregexp-inl.h */, 897FF14E0E719B8F00D62E90 /* jsregexp.cc */, 897FF14F0E719B8F00D62E90 /* jsregexp.h */, 58950D4E0F55514900F3E8BA /* jump-target-arm.cc */, @@ -1196,6 +1200,7 @@ 58950D670F5551C400F3E8BA /* virtual-frame-ia32.cc in Sources */, 89A88E2E0E71A6D60043BA31 /* zone.cc in Sources */, 9F4B7B890FCC877A00DC4117 /* log-utils.cc in Sources */, + 8981F6001010501900D1520E /* frame-element.cc in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -1300,6 +1305,7 @@ 58950D690F5551CE00F3E8BA /* virtual-frame-arm.cc in Sources */, 89F23C820E78D5B2006B2466 /* zone.cc in Sources */, 9F4B7B8A0FCC877A00DC4117 /* log-utils.cc in Sources */, + 8981F6011010502800D1520E /* frame-element.cc in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; diff --git a/deps/v8/tools/visual_studio/v8_base.vcproj b/deps/v8/tools/visual_studio/v8_base.vcproj index bfdcec922c..9ebb1288b0 100644 --- a/deps/v8/tools/visual_studio/v8_base.vcproj +++ b/deps/v8/tools/visual_studio/v8_base.vcproj @@ -397,19 +397,23 @@ > </File> <File - RelativePath="..\..\src\ia32\frames-ia32.cc" + RelativePath="..\..\src\frame-element.cc" > </File> <File - RelativePath="..\..\src\ia32\frames-ia32.h" + RelativePath="..\..\src\frame-element.h" > </File> <File - RelativePath="..\..\src\frames-inl.h" + RelativePath="..\..\src\ia32\frames-ia32.cc" > </File> <File - RelativePath="..\..\src\frame-element.h" + RelativePath="..\..\src\ia32\frames-ia32.h" + > + </File> + <File + RelativePath="..\..\src\frames-inl.h" > </File> <File @@ -517,10 +521,6 @@ > </File> <File - RelativePath="..\..\src\jsregexp-inl.h" - > - </File> - <File RelativePath="..\..\src\jsregexp.cc" > </File> diff --git a/deps/v8/tools/visual_studio/v8_base_arm.vcproj b/deps/v8/tools/visual_studio/v8_base_arm.vcproj index 8ebe386c39..83ebb9b27f 100644 --- a/deps/v8/tools/visual_studio/v8_base_arm.vcproj +++ b/deps/v8/tools/visual_studio/v8_base_arm.vcproj @@ -401,6 +401,14 @@ > </File> <File + RelativePath="..\..\src\frame-element.cc" + > + </File> + <File + RelativePath="..\..\src\frame-element.h" + > + </File> + <File RelativePath="..\..\src\arm\frames-arm.cc" > </File> @@ -513,10 +521,6 @@ > </File> <File - RelativePath="..\..\src\jsregexp-inl.h" - > - </File> - <File RelativePath="..\..\src\jsregexp.cc" > </File> |