diff options
Diffstat (limited to 'deps/v8/src')
289 files changed, 21028 insertions, 21457 deletions
diff --git a/deps/v8/src/OWNERS b/deps/v8/src/OWNERS new file mode 100644 index 0000000000..f38fecad4e --- /dev/null +++ b/deps/v8/src/OWNERS @@ -0,0 +1,2 @@ +per-file i18n.*=cira@chromium.org +per-file i18n.*=mnita@google.com diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc index 669c02baf3..50232661c1 100644 --- a/deps/v8/src/accessors.cc +++ b/deps/v8/src/accessors.cc @@ -78,6 +78,61 @@ MaybeObject* Accessors::ReadOnlySetAccessor(Isolate* isolate, } +static V8_INLINE bool CheckForName(Handle<String> name, + String* property_name, + int offset, + int* object_offset) { + if (name->Equals(property_name)) { + *object_offset = offset; + return true; + } + return false; +} + + +bool Accessors::IsJSObjectFieldAccessor( + Handle<Map> map, Handle<String> name, + int* object_offset) { + Isolate* isolate = map->GetIsolate(); + switch (map->instance_type()) { + case JS_ARRAY_TYPE: + return + CheckForName(name, isolate->heap()->length_string(), + JSArray::kLengthOffset, object_offset); + case JS_TYPED_ARRAY_TYPE: + return + CheckForName(name, isolate->heap()->length_string(), + JSTypedArray::kLengthOffset, object_offset) || + CheckForName(name, isolate->heap()->byte_length_string(), + JSTypedArray::kByteLengthOffset, object_offset) || + CheckForName(name, isolate->heap()->byte_offset_string(), + JSTypedArray::kByteOffsetOffset, object_offset) || + CheckForName(name, isolate->heap()->buffer_string(), + JSTypedArray::kBufferOffset, object_offset); + case JS_ARRAY_BUFFER_TYPE: + return + CheckForName(name, isolate->heap()->byte_length_string(), + JSArrayBuffer::kByteLengthOffset, object_offset); + case JS_DATA_VIEW_TYPE: + return + CheckForName(name, isolate->heap()->byte_length_string(), + JSDataView::kByteLengthOffset, object_offset) || + CheckForName(name, isolate->heap()->byte_offset_string(), + JSDataView::kByteOffsetOffset, object_offset) || + CheckForName(name, isolate->heap()->buffer_string(), + JSDataView::kBufferOffset, object_offset); + default: { + if (map->instance_type() < FIRST_NONSTRING_TYPE) { + return + CheckForName(name, isolate->heap()->length_string(), + String::kLengthOffset, object_offset); + } + return false; + } + } +} + + // // Accessors::ArrayLength // diff --git a/deps/v8/src/accessors.h b/deps/v8/src/accessors.h index d9a2130f61..b2dee27932 100644 --- a/deps/v8/src/accessors.h +++ b/deps/v8/src/accessors.h @@ -86,6 +86,13 @@ class Accessors : public AllStatic { static Handle<AccessorInfo> MakeModuleExport( Handle<String> name, int index, PropertyAttributes attributes); + // Returns true for properties that are accessors to object fields. + // If true, *object_offset contains offset of object field. + static bool IsJSObjectFieldAccessor( + Handle<Map> map, Handle<String> name, + int* object_offset); + + private: // Accessor functions only used through the descriptor. static MaybeObject* FunctionSetPrototype(Isolate* isolate, diff --git a/deps/v8/src/allocation-site-scopes.cc b/deps/v8/src/allocation-site-scopes.cc new file mode 100644 index 0000000000..8097045b27 --- /dev/null +++ b/deps/v8/src/allocation-site-scopes.cc @@ -0,0 +1,108 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "allocation-site-scopes.h" + +namespace v8 { +namespace internal { + + +Handle<AllocationSite> AllocationSiteCreationContext::EnterNewScope() { + Handle<AllocationSite> scope_site; + if (top().is_null()) { + // We are creating the top level AllocationSite as opposed to a nested + // AllocationSite. + InitializeTraversal(isolate()->factory()->NewAllocationSite()); + scope_site = Handle<AllocationSite>(*top(), isolate()); + if (FLAG_trace_creation_allocation_sites) { + PrintF("*** Creating top level AllocationSite %p\n", + static_cast<void*>(*scope_site)); + } + } else { + ASSERT(!current().is_null()); + scope_site = isolate()->factory()->NewAllocationSite(); + if (FLAG_trace_creation_allocation_sites) { + PrintF("Creating nested site (top, current, new) (%p, %p, %p)\n", + static_cast<void*>(*top()), + static_cast<void*>(*current()), + static_cast<void*>(*scope_site)); + } + current()->set_nested_site(*scope_site); + update_current_site(*scope_site); + } + ASSERT(!scope_site.is_null()); + return scope_site; +} + + +void AllocationSiteCreationContext::ExitScope( + Handle<AllocationSite> scope_site, + Handle<JSObject> object) { + if (!object.is_null() && !object->IsFailure()) { + bool top_level = !scope_site.is_null() && + top().is_identical_to(scope_site); + + scope_site->set_transition_info(*object); + if (FLAG_trace_creation_allocation_sites) { + if (top_level) { + PrintF("*** Setting AllocationSite %p transition_info %p\n", + static_cast<void*>(*scope_site), + static_cast<void*>(*object)); + } else { + PrintF("Setting AllocationSite (%p, %p) transition_info %p\n", + static_cast<void*>(*top()), + static_cast<void*>(*scope_site), + static_cast<void*>(*object)); + } + } + } +} + + +Handle<AllocationSite> AllocationSiteUsageContext::EnterNewScope() { + if (top().is_null()) { + InitializeTraversal(top_site_); + } else { + // Advance current site + Object* nested_site = current()->nested_site(); + // Something is wrong if we advance to the end of the list here. + ASSERT(nested_site->IsAllocationSite()); + update_current_site(AllocationSite::cast(nested_site)); + } + return Handle<AllocationSite>(*current(), isolate()); +} + + +void AllocationSiteUsageContext::ExitScope( + Handle<AllocationSite> scope_site, + Handle<JSObject> object) { + // This assert ensures that we are pointing at the right sub-object in a + // recursive walk of a nested literal. + ASSERT(object.is_null() || *object == scope_site->transition_info()); +} + +} } // namespace v8::internal diff --git a/deps/v8/src/allocation-site-scopes.h b/deps/v8/src/allocation-site-scopes.h new file mode 100644 index 0000000000..1c3afdf369 --- /dev/null +++ b/deps/v8/src/allocation-site-scopes.h @@ -0,0 +1,115 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_ALLOCATION_SITE_SCOPES_H_ +#define V8_ALLOCATION_SITE_SCOPES_H_ + +#include "ast.h" +#include "handles.h" +#include "objects.h" +#include "zone.h" + +namespace v8 { +namespace internal { + + +// AllocationSiteContext is the base class for walking and copying a nested +// boilerplate with AllocationSite and AllocationMemento support. +class AllocationSiteContext { + public: + AllocationSiteContext(Isolate* isolate, bool activated) { + isolate_ = isolate; + activated_ = activated; + }; + virtual ~AllocationSiteContext() {} + + Handle<AllocationSite> top() { return top_; } + Handle<AllocationSite> current() { return current_; } + + // If activated, then recursively create mementos + bool activated() const { return activated_; } + + // Returns the AllocationSite that matches this scope. + virtual Handle<AllocationSite> EnterNewScope() = 0; + + // scope_site should be the handle returned by the matching EnterNewScope() + virtual void ExitScope(Handle<AllocationSite> scope_site, + Handle<JSObject> object) = 0; + + protected: + void update_current_site(AllocationSite* site) { + *(current_.location()) = site; + } + + Isolate* isolate() { return isolate_; } + void InitializeTraversal(Handle<AllocationSite> site) { + top_ = site; + current_ = Handle<AllocationSite>(*top_, isolate()); + } + + private: + Isolate* isolate_; + Handle<AllocationSite> top_; + Handle<AllocationSite> current_; + bool activated_; +}; + + +// AllocationSiteCreationContext aids in the creation of AllocationSites to +// accompany object literals. +class AllocationSiteCreationContext : public AllocationSiteContext { + public: + explicit AllocationSiteCreationContext(Isolate* isolate) + : AllocationSiteContext(isolate, true) { } + + virtual Handle<AllocationSite> EnterNewScope() V8_OVERRIDE; + virtual void ExitScope(Handle<AllocationSite> site, + Handle<JSObject> object) V8_OVERRIDE; +}; + + +// AllocationSiteUsageContext aids in the creation of AllocationMementos placed +// behind some/all components of a copied object literal. +class AllocationSiteUsageContext : public AllocationSiteContext { + public: + AllocationSiteUsageContext(Isolate* isolate, Handle<AllocationSite> site, + bool activated) + : AllocationSiteContext(isolate, activated), + top_site_(site) { } + + virtual Handle<AllocationSite> EnterNewScope() V8_OVERRIDE; + virtual void ExitScope(Handle<AllocationSite> site, + Handle<JSObject> object) V8_OVERRIDE; + + private: + Handle<AllocationSite> top_site_; +}; + + +} } // namespace v8::internal + +#endif // V8_ALLOCATION_SITE_SCOPES_H_ diff --git a/deps/v8/src/allocation-tracker.cc b/deps/v8/src/allocation-tracker.cc new file mode 100644 index 0000000000..586ce3c45a --- /dev/null +++ b/deps/v8/src/allocation-tracker.cc @@ -0,0 +1,279 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "v8.h" + +#include "allocation-tracker.h" + +#include "heap-snapshot-generator.h" +#include "frames-inl.h" + +namespace v8 { +namespace internal { + +AllocationTraceNode::AllocationTraceNode( + AllocationTraceTree* tree, SnapshotObjectId shared_function_info_id) + : tree_(tree), + function_id_(shared_function_info_id), + total_size_(0), + allocation_count_(0), + id_(tree->next_node_id()) { +} + + +AllocationTraceNode::~AllocationTraceNode() { +} + + +AllocationTraceNode* AllocationTraceNode::FindChild(SnapshotObjectId id) { + for (int i = 0; i < children_.length(); i++) { + AllocationTraceNode* node = children_[i]; + if (node->function_id() == id) return node; + } + return NULL; +} + + +AllocationTraceNode* AllocationTraceNode::FindOrAddChild(SnapshotObjectId id) { + AllocationTraceNode* child = FindChild(id); + if (child == NULL) { + child = new AllocationTraceNode(tree_, id); + children_.Add(child); + } + return child; +} + + +void AllocationTraceNode::AddAllocation(unsigned size) { + total_size_ += size; + ++allocation_count_; +} + + +void AllocationTraceNode::Print(int indent, AllocationTracker* tracker) { + OS::Print("%10u %10u %*c", total_size_, allocation_count_, indent, ' '); + if (tracker != NULL) { + const char* name = "<unknown function>"; + if (function_id_ != 0) { + AllocationTracker::FunctionInfo* info = + tracker->GetFunctionInfo(function_id_); + if (info != NULL) { + name = info->name; + } + } + OS::Print("%s #%u", name, id_); + } else { + OS::Print("%u #%u", function_id_, id_); + } + OS::Print("\n"); + indent += 2; + for (int i = 0; i < children_.length(); i++) { + children_[i]->Print(indent, tracker); + } +} + + +AllocationTraceTree::AllocationTraceTree() + : next_node_id_(1), + root_(this, 0) { +} + + +AllocationTraceTree::~AllocationTraceTree() { +} + + +AllocationTraceNode* AllocationTraceTree::AddPathFromEnd( + const Vector<SnapshotObjectId>& path) { + AllocationTraceNode* node = root(); + for (SnapshotObjectId* entry = path.start() + path.length() - 1; + entry != path.start() - 1; + --entry) { + node = node->FindOrAddChild(*entry); + } + return node; +} + + +void AllocationTraceTree::Print(AllocationTracker* tracker) { + OS::Print("[AllocationTraceTree:]\n"); + OS::Print("Total size | Allocation count | Function id | id\n"); + root()->Print(0, tracker); +} + +void AllocationTracker::DeleteUnresolvedLocation( + UnresolvedLocation** location) { + delete *location; +} + + +AllocationTracker::FunctionInfo::FunctionInfo() + : name(""), + script_name(""), + script_id(0), + line(-1), + column(-1) { +} + + +static bool AddressesMatch(void* key1, void* key2) { + return key1 == key2; +} + + +AllocationTracker::AllocationTracker( + HeapObjectsMap* ids, StringsStorage* names) + : ids_(ids), + names_(names), + id_to_function_info_(AddressesMatch) { +} + + +AllocationTracker::~AllocationTracker() { + unresolved_locations_.Iterate(DeleteUnresolvedLocation); +} + + +void AllocationTracker::PrepareForSerialization() { + List<UnresolvedLocation*> copy(unresolved_locations_.length()); + copy.AddAll(unresolved_locations_); + unresolved_locations_.Clear(); + for (int i = 0; i < copy.length(); i++) { + copy[i]->Resolve(); + delete copy[i]; + } +} + + +void AllocationTracker::NewObjectEvent(Address addr, int size) { + DisallowHeapAllocation no_allocation; + Heap* heap = ids_->heap(); + + // Mark the new block as FreeSpace to make sure the heap is iterable + // while we are capturing stack trace. + FreeListNode::FromAddress(addr)->set_size(heap, size); + ASSERT_EQ(HeapObject::FromAddress(addr)->Size(), size); + ASSERT(FreeListNode::IsFreeListNode(HeapObject::FromAddress(addr))); + + Isolate* isolate = heap->isolate(); + int length = 0; + StackTraceFrameIterator it(isolate); + while (!it.done() && length < kMaxAllocationTraceLength) { + JavaScriptFrame* frame = it.frame(); + SharedFunctionInfo* shared = frame->function()->shared(); + SnapshotObjectId id = ids_->FindEntry(shared->address()); + allocation_trace_buffer_[length++] = id; + AddFunctionInfo(shared, id); + it.Advance(); + } + AllocationTraceNode* top_node = trace_tree_.AddPathFromEnd( + Vector<SnapshotObjectId>(allocation_trace_buffer_, length)); + top_node->AddAllocation(size); +} + + +static uint32_t SnapshotObjectIdHash(SnapshotObjectId id) { + return ComputeIntegerHash(static_cast<uint32_t>(id), + v8::internal::kZeroHashSeed); +} + + +AllocationTracker::FunctionInfo* AllocationTracker::GetFunctionInfo( + SnapshotObjectId id) { + HashMap::Entry* entry = id_to_function_info_.Lookup( + reinterpret_cast<void*>(id), SnapshotObjectIdHash(id), false); + if (entry == NULL) { + return NULL; + } + return reinterpret_cast<FunctionInfo*>(entry->value); +} + + +void AllocationTracker::AddFunctionInfo(SharedFunctionInfo* shared, + SnapshotObjectId id) { + HashMap::Entry* entry = id_to_function_info_.Lookup( + reinterpret_cast<void*>(id), SnapshotObjectIdHash(id), true); + if (entry->value == NULL) { + FunctionInfo* info = new FunctionInfo(); + info->name = names_->GetFunctionName(shared->DebugName()); + if (shared->script()->IsScript()) { + Script* script = Script::cast(shared->script()); + if (script->name()->IsName()) { + Name* name = Name::cast(script->name()); + info->script_name = names_->GetName(name); + } + info->script_id = script->id()->value(); + // Converting start offset into line and column may cause heap + // allocations so we postpone them until snapshot serialization. + unresolved_locations_.Add(new UnresolvedLocation( + script, + shared->start_position(), + info)); + } + entry->value = info; + } +} + + +AllocationTracker::UnresolvedLocation::UnresolvedLocation( + Script* script, int start, FunctionInfo* info) + : start_position_(start), + info_(info) { + script_ = Handle<Script>::cast( + script->GetIsolate()->global_handles()->Create(script)); + GlobalHandles::MakeWeak( + reinterpret_cast<Object**>(script_.location()), + this, &HandleWeakScript); +} + + +AllocationTracker::UnresolvedLocation::~UnresolvedLocation() { + if (!script_.is_null()) { + script_->GetIsolate()->global_handles()->Destroy( + reinterpret_cast<Object**>(script_.location())); + } +} + + +void AllocationTracker::UnresolvedLocation::Resolve() { + if (script_.is_null()) return; + info_->line = GetScriptLineNumber(script_, start_position_); + info_->column = GetScriptColumnNumber(script_, start_position_); +} + + +void AllocationTracker::UnresolvedLocation::HandleWeakScript( + v8::Isolate* isolate, + v8::Persistent<v8::Value>* obj, + void* data) { + UnresolvedLocation* location = reinterpret_cast<UnresolvedLocation*>(data); + location->script_ = Handle<Script>::null(); + obj->Dispose(); +} + + +} } // namespace v8::internal diff --git a/deps/v8/src/allocation-tracker.h b/deps/v8/src/allocation-tracker.h new file mode 100644 index 0000000000..617cf902e8 --- /dev/null +++ b/deps/v8/src/allocation-tracker.h @@ -0,0 +1,138 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_ALLOCATION_TRACKER_H_ +#define V8_ALLOCATION_TRACKER_H_ + +namespace v8 { +namespace internal { + +class HeapObjectsMap; + +class AllocationTraceTree; + +class AllocationTraceNode { + public: + AllocationTraceNode(AllocationTraceTree* tree, + SnapshotObjectId shared_function_info_id); + ~AllocationTraceNode(); + AllocationTraceNode* FindChild(SnapshotObjectId shared_function_info_id); + AllocationTraceNode* FindOrAddChild(SnapshotObjectId shared_function_info_id); + void AddAllocation(unsigned size); + + SnapshotObjectId function_id() const { return function_id_; } + unsigned allocation_size() const { return total_size_; } + unsigned allocation_count() const { return allocation_count_; } + unsigned id() const { return id_; } + Vector<AllocationTraceNode*> children() const { return children_.ToVector(); } + + void Print(int indent, AllocationTracker* tracker); + + private: + AllocationTraceTree* tree_; + SnapshotObjectId function_id_; + unsigned total_size_; + unsigned allocation_count_; + unsigned id_; + List<AllocationTraceNode*> children_; + + DISALLOW_COPY_AND_ASSIGN(AllocationTraceNode); +}; + + +class AllocationTraceTree { + public: + AllocationTraceTree(); + ~AllocationTraceTree(); + AllocationTraceNode* AddPathFromEnd(const Vector<SnapshotObjectId>& path); + AllocationTraceNode* root() { return &root_; } + unsigned next_node_id() { return next_node_id_++; } + void Print(AllocationTracker* tracker); + + private: + unsigned next_node_id_; + AllocationTraceNode root_; + + DISALLOW_COPY_AND_ASSIGN(AllocationTraceTree); +}; + + +class AllocationTracker { + public: + struct FunctionInfo { + FunctionInfo(); + const char* name; + const char* script_name; + int script_id; + int line; + int column; + }; + + AllocationTracker(HeapObjectsMap* ids, StringsStorage* names); + ~AllocationTracker(); + + void PrepareForSerialization(); + void NewObjectEvent(Address addr, int size); + + AllocationTraceTree* trace_tree() { return &trace_tree_; } + HashMap* id_to_function_info() { return &id_to_function_info_; } + FunctionInfo* GetFunctionInfo(SnapshotObjectId id); + + private: + void AddFunctionInfo(SharedFunctionInfo* info, SnapshotObjectId id); + + class UnresolvedLocation { + public: + UnresolvedLocation(Script* script, int start, FunctionInfo* info); + ~UnresolvedLocation(); + void Resolve(); + + private: + static void HandleWeakScript(v8::Isolate* isolate, + v8::Persistent<v8::Value>* obj, + void* data); + Handle<Script> script_; + int start_position_; + FunctionInfo* info_; + }; + static void DeleteUnresolvedLocation(UnresolvedLocation** location); + + static const int kMaxAllocationTraceLength = 64; + HeapObjectsMap* ids_; + StringsStorage* names_; + AllocationTraceTree trace_tree_; + SnapshotObjectId allocation_trace_buffer_[kMaxAllocationTraceLength]; + HashMap id_to_function_info_; + List<UnresolvedLocation*> unresolved_locations_; + + DISALLOW_COPY_AND_ASSIGN(AllocationTracker); +}; + +} } // namespace v8::internal + +#endif // V8_ALLOCATION_TRACKER_H_ + diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index 71a8f4a6cf..8a73877eed 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -77,8 +77,7 @@ namespace v8 { #define ON_BAILOUT(isolate, location, code) \ - if (IsDeadCheck(isolate, location) || \ - IsExecutionTerminatingCheck(isolate)) { \ + if (IsExecutionTerminatingCheck(isolate)) { \ code; \ UNREACHABLE(); \ } @@ -253,13 +252,6 @@ static inline bool ApiCheck(bool condition, } -static bool ReportV8Dead(const char* location) { - FatalErrorCallback callback = GetFatalErrorHandler(); - callback(location, "V8 is no longer usable"); - return true; -} - - static bool ReportEmptyHandle(const char* location) { FatalErrorCallback callback = GetFatalErrorHandler(); callback(location, "Reading from empty handle"); @@ -267,24 +259,6 @@ static bool ReportEmptyHandle(const char* location) { } -/** - * IsDeadCheck checks that the vm is usable. If, for instance, the vm has been - * out of memory at some point this check will fail. It should be called on - * entry to all methods that touch anything in the heap, except destructors - * which you sometimes can't avoid calling after the vm has crashed. Functions - * that call EnsureInitialized or ON_BAILOUT don't have to also call - * IsDeadCheck. ON_BAILOUT has the advantage over EnsureInitialized that you - * can arrange to return if the VM is dead. This is needed to ensure that no VM - * heap allocations are attempted on a dead VM. EnsureInitialized has the - * advantage over ON_BAILOUT that it actually initializes the VM if this has not - * yet been done. - */ -static inline bool IsDeadCheck(i::Isolate* isolate, const char* location) { - return !isolate->IsInitialized() - && isolate->IsDead() ? ReportV8Dead(location) : false; -} - - static inline bool IsExecutionTerminatingCheck(i::Isolate* isolate) { if (!isolate->IsInitialized()) return false; if (isolate->has_scheduled_exception()) { @@ -321,7 +295,6 @@ static bool InitializeHelper(i::Isolate* isolate) { static inline bool EnsureInitializedForIsolate(i::Isolate* isolate, const char* location) { - if (IsDeadCheck(isolate, location)) return false; if (isolate != NULL) { if (isolate->IsInitialized()) return true; } @@ -500,19 +473,7 @@ void V8::SetFlagsFromCommandLine(int* argc, char** argv, bool remove_flags) { v8::Handle<Value> ThrowException(v8::Handle<v8::Value> value) { - i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::ThrowException()")) { - return v8::Handle<Value>(); - } - ENTER_V8(isolate); - // If we're passed an empty handle, we throw an undefined exception - // to deal more gracefully with out of memory situations. - if (value.IsEmpty()) { - isolate->ScheduleThrow(isolate->heap()->undefined_value()); - } else { - isolate->ScheduleThrow(*Utils::OpenHandle(*value)); - } - return v8::Undefined(); + return v8::Isolate::GetCurrent()->ThrowException(value); } @@ -602,8 +563,7 @@ ResourceConstraints::ResourceConstraints() : max_young_space_size_(0), max_old_space_size_(0), max_executable_size_(0), - stack_limit_(NULL), - is_memory_constrained_() { } + stack_limit_(NULL) { } bool SetResourceConstraints(ResourceConstraints* constraints) { @@ -614,7 +574,8 @@ bool SetResourceConstraints(ResourceConstraints* constraints) { int max_executable_size = constraints->max_executable_size(); if (young_space_size != 0 || old_gen_size != 0 || max_executable_size != 0) { // After initialization it's too late to change Heap constraints. - ASSERT(!isolate->IsInitialized()); + // TODO(rmcilroy): fix this assert. + // ASSERT(!isolate->IsInitialized()); bool result = isolate->heap()->ConfigureHeap(young_space_size / 2, old_gen_size, max_executable_size); @@ -624,16 +585,11 @@ bool SetResourceConstraints(ResourceConstraints* constraints) { uintptr_t limit = reinterpret_cast<uintptr_t>(constraints->stack_limit()); isolate->stack_guard()->SetStackLimit(limit); } - if (constraints->is_memory_constrained().has_value) { - isolate->set_is_memory_constrained( - constraints->is_memory_constrained().value); - } return true; } i::Object** V8::GlobalizeReference(i::Isolate* isolate, i::Object** obj) { - if (IsDeadCheck(isolate, "V8::Persistent::New")) return NULL; LOG_API(isolate, "Persistent::New"); i::Handle<i::Object> result = isolate->global_handles()->Create(*obj); #ifdef DEBUG @@ -728,50 +684,58 @@ int HandleScope::NumberOfHandles() { } -i::Object** HandleScope::CreateHandle(i::Object* value) { - return i::HandleScope::CreateHandle(i::Isolate::Current(), value); +i::Object** HandleScope::CreateHandle(i::Isolate* isolate, i::Object* value) { + return i::HandleScope::CreateHandle(isolate, value); +} + + +i::Object** HandleScope::CreateHandle(i::HeapObject* heap_object, + i::Object* value) { + ASSERT(heap_object->IsHeapObject()); + return i::HandleScope::CreateHandle(heap_object->GetIsolate(), value); } -i::Object** HandleScope::CreateHandle(i::Isolate* isolate, i::Object* value) { - ASSERT(isolate == i::Isolate::Current()); - return i::HandleScope::CreateHandle(isolate, value); +EscapableHandleScope::EscapableHandleScope(Isolate* v8_isolate) { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate); + escape_slot_ = CreateHandle(isolate, isolate->heap()->the_hole_value()); + Initialize(v8_isolate); } -i::Object** HandleScope::CreateHandle(i::HeapObject* value) { - ASSERT(value->IsHeapObject()); - return reinterpret_cast<i::Object**>( - i::HandleScope::CreateHandle(value->GetIsolate(), value)); +i::Object** EscapableHandleScope::Escape(i::Object** escape_value) { + ApiCheck(*escape_slot_ == isolate_->heap()->the_hole_value(), + "EscapeableHandleScope::Escape", + "Escape value set twice"); + if (escape_value == NULL) { + *escape_slot_ = isolate_->heap()->undefined_value(); + return NULL; + } + *escape_slot_ = *escape_value; + return escape_slot_; } void Context::Enter() { i::Handle<i::Context> env = Utils::OpenHandle(this); i::Isolate* isolate = env->GetIsolate(); - if (IsDeadCheck(isolate, "v8::Context::Enter()")) return; ENTER_V8(isolate); - isolate->handle_scope_implementer()->EnterContext(env); - isolate->handle_scope_implementer()->SaveContext(isolate->context()); isolate->set_context(*env); } void Context::Exit() { - // Exit is essentially a static function and doesn't use the - // receiver, so we have to get the current isolate from the thread - // local. + // TODO(dcarney): fix this once chrome is fixed. i::Isolate* isolate = i::Isolate::Current(); - if (!isolate->IsInitialized()) return; - - if (!ApiCheck(isolate->handle_scope_implementer()->LeaveLastContext(), + i::Handle<i::Context> context = i::Handle<i::Context>::null(); + ENTER_V8(isolate); + if (!ApiCheck(isolate->handle_scope_implementer()->LeaveContext(context), "v8::Context::Exit()", "Cannot exit non-entered context")) { return; } - // Content of 'last_context' could be NULL. i::Context* last_context = isolate->handle_scope_implementer()->RestoreContext(); @@ -797,7 +761,7 @@ static i::Handle<i::FixedArray> EmbedderDataFor(Context* context, bool can_grow, const char* location) { i::Handle<i::Context> env = Utils::OpenHandle(context); - bool ok = !IsDeadCheck(env->GetIsolate(), location) && + bool ok = ApiCheck(env->IsNativeContext(), location, "Not a native context") && ApiCheck(index >= 0, location, "Negative index"); if (!ok) return i::Handle<i::FixedArray>(); @@ -974,7 +938,6 @@ void Template::Set(v8::Handle<String> name, v8::Handle<Data> value, v8::PropertyAttribute attribute) { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::Template::Set()")) return; ENTER_V8(isolate); i::HandleScope scope(isolate); const int kSize = 3; @@ -993,7 +956,6 @@ void Template::SetAccessorProperty( v8::PropertyAttribute attribute, v8::AccessControl access_control) { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::Template::SetAccessor()")) return; ENTER_V8(isolate); ASSERT(!name.IsEmpty()); ASSERT(!getter.IsEmpty() || !setter.IsEmpty()); @@ -1019,9 +981,6 @@ static void InitializeFunctionTemplate( Local<ObjectTemplate> FunctionTemplate::PrototypeTemplate() { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::FunctionTemplate::PrototypeTemplate()")) { - return Local<ObjectTemplate>(); - } ENTER_V8(isolate); i::Handle<i::Object> result(Utils::OpenHandle(this)->prototype_template(), isolate); @@ -1035,7 +994,6 @@ Local<ObjectTemplate> FunctionTemplate::PrototypeTemplate() { void FunctionTemplate::Inherit(v8::Handle<FunctionTemplate> value) { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::FunctionTemplate::Inherit()")) return; ENTER_V8(isolate); Utils::OpenHandle(this)->set_parent_template(*Utils::OpenHandle(*value)); } @@ -1061,7 +1019,9 @@ static Local<FunctionTemplate> FunctionTemplateNew( } obj->set_serial_number(i::Smi::FromInt(next_serial_number)); if (callback != 0) { - if (data.IsEmpty()) data = v8::Undefined(); + if (data.IsEmpty()) { + data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate)); + } Utils::ToLocal(obj)->SetCallHandler(callback, data); } obj->set_length(length); @@ -1278,7 +1238,6 @@ int TypeSwitch::match(v8::Handle<Value> value) { void FunctionTemplate::SetCallHandler(FunctionCallback callback, v8::Handle<Value> data) { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetCallHandler()")) return; ENTER_V8(isolate); i::HandleScope scope(isolate); i::Handle<i::Struct> struct_obj = @@ -1286,7 +1245,9 @@ void FunctionTemplate::SetCallHandler(FunctionCallback callback, i::Handle<i::CallHandlerInfo> obj = i::Handle<i::CallHandlerInfo>::cast(struct_obj); SET_FIELD_WRAPPED(obj, set_callback, callback); - if (data.IsEmpty()) data = v8::Undefined(); + if (data.IsEmpty()) { + data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate)); + } obj->set_data(*Utils::OpenHandle(*data)); Utils::OpenHandle(this)->set_call_code(*obj); } @@ -1324,7 +1285,9 @@ static i::Handle<i::AccessorInfo> MakeAccessorInfo( isolate->factory()->NewExecutableAccessorInfo(); SET_FIELD_WRAPPED(obj, set_getter, getter); SET_FIELD_WRAPPED(obj, set_setter, setter); - if (data.IsEmpty()) data = v8::Undefined(); + if (data.IsEmpty()) { + data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate)); + } obj->set_data(*Utils::OpenHandle(*data)); return SetAccessorInfoProperties(obj, name, settings, attributes, signature); } @@ -1349,8 +1312,7 @@ static i::Handle<i::AccessorInfo> MakeAccessorInfo( Local<ObjectTemplate> FunctionTemplate::InstanceTemplate() { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::FunctionTemplate::InstanceTemplate()") - || EmptyCheck("v8::FunctionTemplate::InstanceTemplate()", this)) + if (EmptyCheck("v8::FunctionTemplate::InstanceTemplate()", this)) return Local<ObjectTemplate>(); ENTER_V8(isolate); i::Handle<i::FunctionTemplateInfo> handle = Utils::OpenHandle(this); @@ -1367,7 +1329,6 @@ Local<ObjectTemplate> FunctionTemplate::InstanceTemplate() { void FunctionTemplate::SetLength(int length) { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetLength()")) return; ENTER_V8(isolate); Utils::OpenHandle(this)->set_length(length); } @@ -1375,7 +1336,6 @@ void FunctionTemplate::SetLength(int length) { void FunctionTemplate::SetClassName(Handle<String> name) { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetClassName()")) return; ENTER_V8(isolate); Utils::OpenHandle(this)->set_class_name(*Utils::OpenHandle(*name)); } @@ -1383,9 +1343,6 @@ void FunctionTemplate::SetClassName(Handle<String> name) { void FunctionTemplate::SetHiddenPrototype(bool value) { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetHiddenPrototype()")) { - return; - } ENTER_V8(isolate); Utils::OpenHandle(this)->set_hidden_prototype(value); } @@ -1393,9 +1350,6 @@ void FunctionTemplate::SetHiddenPrototype(bool value) { void FunctionTemplate::ReadOnlyPrototype() { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::FunctionTemplate::ReadOnlyPrototype()")) { - return; - } ENTER_V8(isolate); Utils::OpenHandle(this)->set_read_only_prototype(true); } @@ -1403,9 +1357,6 @@ void FunctionTemplate::ReadOnlyPrototype() { void FunctionTemplate::RemovePrototype() { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::FunctionTemplate::RemovePrototype()")) { - return; - } ENTER_V8(isolate); Utils::OpenHandle(this)->set_remove_prototype(true); } @@ -1422,9 +1373,6 @@ Local<ObjectTemplate> ObjectTemplate::New() { Local<ObjectTemplate> ObjectTemplate::New( v8::Handle<FunctionTemplate> constructor) { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::ObjectTemplate::New()")) { - return Local<ObjectTemplate>(); - } EnsureInitializedForIsolate(isolate, "v8::ObjectTemplate::New()"); LOG_API(isolate, "ObjectTemplate::New"); ENTER_V8(isolate); @@ -1495,7 +1443,6 @@ static bool TemplateSetAccessor( PropertyAttribute attribute, v8::Local<AccessorSignature> signature) { i::Isolate* isolate = Utils::OpenHandle(template_obj)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetAccessor()")) return false; ENTER_V8(isolate); i::HandleScope scope(isolate); i::Handle<i::AccessorInfo> obj = MakeAccessorInfo( @@ -1551,9 +1498,6 @@ void ObjectTemplate::SetNamedPropertyHandler( NamedPropertyEnumeratorCallback enumerator, Handle<Value> data) { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetNamedPropertyHandler()")) { - return; - } ENTER_V8(isolate); i::HandleScope scope(isolate); EnsureConstructor(this); @@ -1571,7 +1515,9 @@ void ObjectTemplate::SetNamedPropertyHandler( if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover); if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator); - if (data.IsEmpty()) data = v8::Undefined(); + if (data.IsEmpty()) { + data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate)); + } obj->set_data(*Utils::OpenHandle(*data)); cons->set_named_property_handler(*obj); } @@ -1579,7 +1525,6 @@ void ObjectTemplate::SetNamedPropertyHandler( void ObjectTemplate::MarkAsUndetectable() { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::ObjectTemplate::MarkAsUndetectable()")) return; ENTER_V8(isolate); i::HandleScope scope(isolate); EnsureConstructor(this); @@ -1596,9 +1541,6 @@ void ObjectTemplate::SetAccessCheckCallbacks( Handle<Value> data, bool turned_on_by_default) { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetAccessCheckCallbacks()")) { - return; - } ENTER_V8(isolate); i::HandleScope scope(isolate); EnsureConstructor(this); @@ -1611,7 +1553,9 @@ void ObjectTemplate::SetAccessCheckCallbacks( SET_FIELD_WRAPPED(info, set_named_callback, named_callback); SET_FIELD_WRAPPED(info, set_indexed_callback, indexed_callback); - if (data.IsEmpty()) data = v8::Undefined(); + if (data.IsEmpty()) { + data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate)); + } info->set_data(*Utils::OpenHandle(*data)); i::FunctionTemplateInfo* constructor = @@ -1630,9 +1574,6 @@ void ObjectTemplate::SetIndexedPropertyHandler( IndexedPropertyEnumeratorCallback enumerator, Handle<Value> data) { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetIndexedPropertyHandler()")) { - return; - } ENTER_V8(isolate); i::HandleScope scope(isolate); EnsureConstructor(this); @@ -1650,7 +1591,9 @@ void ObjectTemplate::SetIndexedPropertyHandler( if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover); if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator); - if (data.IsEmpty()) data = v8::Undefined(); + if (data.IsEmpty()) { + data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate)); + } obj->set_data(*Utils::OpenHandle(*data)); cons->set_indexed_property_handler(*obj); } @@ -1659,10 +1602,6 @@ void ObjectTemplate::SetIndexedPropertyHandler( void ObjectTemplate::SetCallAsFunctionHandler(FunctionCallback callback, Handle<Value> data) { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, - "v8::ObjectTemplate::SetCallAsFunctionHandler()")) { - return; - } ENTER_V8(isolate); i::HandleScope scope(isolate); EnsureConstructor(this); @@ -1674,26 +1613,21 @@ void ObjectTemplate::SetCallAsFunctionHandler(FunctionCallback callback, i::Handle<i::CallHandlerInfo> obj = i::Handle<i::CallHandlerInfo>::cast(struct_obj); SET_FIELD_WRAPPED(obj, set_callback, callback); - if (data.IsEmpty()) data = v8::Undefined(); + if (data.IsEmpty()) { + data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate)); + } obj->set_data(*Utils::OpenHandle(*data)); cons->set_instance_call_handler(*obj); } int ObjectTemplate::InternalFieldCount() { - if (IsDeadCheck(Utils::OpenHandle(this)->GetIsolate(), - "v8::ObjectTemplate::InternalFieldCount()")) { - return 0; - } return i::Smi::cast(Utils::OpenHandle(this)->internal_field_count())->value(); } void ObjectTemplate::SetInternalFieldCount(int value) { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetInternalFieldCount()")) { - return; - } if (!ApiCheck(i::Smi::IsValid(value), "v8::ObjectTemplate::SetInternalFieldCount()", "Invalid internal field count")) { @@ -1713,10 +1647,13 @@ void ObjectTemplate::SetInternalFieldCount(int value) { // --- S c r i p t D a t a --- -ScriptData* ScriptData::PreCompile(const char* input, int length) { +ScriptData* ScriptData::PreCompile(v8::Isolate* isolate, + const char* input, + int length) { i::Utf8ToUtf16CharacterStream stream( reinterpret_cast<const unsigned char*>(input), length); - return i::PreParserApi::PreParse(i::Isolate::Current(), &stream); + return i::PreParserApi::PreParse( + reinterpret_cast<i::Isolate*>(isolate), &stream); } @@ -1763,13 +1700,13 @@ Local<Script> Script::New(v8::Handle<String> source, v8::ScriptOrigin* origin, v8::ScriptData* pre_data, v8::Handle<String> script_data) { - i::Isolate* isolate = i::Isolate::Current(); + i::Handle<i::String> str = Utils::OpenHandle(*source); + i::Isolate* isolate = str->GetIsolate(); ON_BAILOUT(isolate, "v8::Script::New()", return Local<Script>()); LOG_API(isolate, "Script::New"); ENTER_V8(isolate); i::SharedFunctionInfo* raw_result = NULL; { i::HandleScope scope(isolate); - i::Handle<i::String> str = Utils::OpenHandle(*source); i::Handle<i::Object> name_obj; int line_offset = 0; int column_offset = 0; @@ -1786,8 +1723,9 @@ Local<Script> Script::New(v8::Handle<String> source, static_cast<int>(origin->ResourceColumnOffset()->Value()); } if (!origin->ResourceIsSharedCrossOrigin().IsEmpty()) { + v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate); is_shared_cross_origin = - origin->ResourceIsSharedCrossOrigin() == v8::True(); + origin->ResourceIsSharedCrossOrigin() == v8::True(v8_isolate); } } EXCEPTION_PREAMBLE(isolate); @@ -1831,7 +1769,8 @@ Local<Script> Script::Compile(v8::Handle<String> source, v8::ScriptOrigin* origin, v8::ScriptData* pre_data, v8::Handle<String> script_data) { - i::Isolate* isolate = i::Isolate::Current(); + i::Handle<i::String> str = Utils::OpenHandle(*source); + i::Isolate* isolate = str->GetIsolate(); ON_BAILOUT(isolate, "v8::Script::Compile()", return Local<Script>()); LOG_API(isolate, "Script::Compile"); ENTER_V8(isolate); @@ -1858,7 +1797,11 @@ Local<Script> Script::Compile(v8::Handle<String> source, Local<Value> Script::Run() { - i::Isolate* isolate = i::Isolate::Current(); + // If execution is terminating, Compile(script)->Run() requires this check. + if (this == NULL) return Local<Value>(); + i::Handle<i::HeapObject> obj = + i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this)); + i::Isolate* isolate = obj->GetIsolate(); ON_BAILOUT(isolate, "v8::Script::Run()", return Local<Value>()); LOG_API(isolate, "Script::Run"); ENTER_V8(isolate); @@ -1867,7 +1810,6 @@ Local<Value> Script::Run() { i::Object* raw_result = NULL; { i::HandleScope scope(isolate); - i::Handle<i::Object> obj = Utils::OpenHandle(this); i::Handle<i::JSFunction> fun; if (obj->IsSharedFunctionInfo()) { i::Handle<i::SharedFunctionInfo> @@ -1905,7 +1847,9 @@ static i::Handle<i::SharedFunctionInfo> OpenScript(Script* script) { Local<Value> Script::Id() { - i::Isolate* isolate = i::Isolate::Current(); + i::Handle<i::HeapObject> obj = + i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this)); + i::Isolate* isolate = obj->GetIsolate(); ON_BAILOUT(isolate, "v8::Script::Id()", return Local<Value>()); LOG_API(isolate, "Script::Id"); i::Object* raw_id = NULL; @@ -1922,7 +1866,9 @@ Local<Value> Script::Id() { int Script::GetId() { - i::Isolate* isolate = i::Isolate::Current(); + i::Handle<i::HeapObject> obj = + i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this)); + i::Isolate* isolate = obj->GetIsolate(); ON_BAILOUT(isolate, "v8::Script::Id()", return -1); LOG_API(isolate, "Script::Id"); { @@ -1935,10 +1881,11 @@ int Script::GetId() { int Script::GetLineNumber(int code_pos) { - i::Isolate* isolate = i::Isolate::Current(); + i::Handle<i::HeapObject> obj = + i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this)); + i::Isolate* isolate = obj->GetIsolate(); ON_BAILOUT(isolate, "v8::Script::GetLineNumber()", return -1); LOG_API(isolate, "Script::GetLineNumber"); - i::Handle<i::Object> obj = Utils::OpenHandle(this); if (obj->IsScript()) { i::Handle<i::Script> script = i::Handle<i::Script>(i::Script::cast(*obj)); return i::GetScriptLineNumber(script, code_pos); @@ -1949,10 +1896,11 @@ int Script::GetLineNumber(int code_pos) { Handle<Value> Script::GetScriptName() { - i::Isolate* isolate = i::Isolate::Current(); + i::Handle<i::HeapObject> obj = + i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this)); + i::Isolate* isolate = obj->GetIsolate(); ON_BAILOUT(isolate, "v8::Script::GetName()", return Handle<String>()); LOG_API(isolate, "Script::GetName"); - i::Handle<i::Object> obj = Utils::OpenHandle(this); if (obj->IsScript()) { i::Object* name = i::Script::cast(*obj)->name(); return Utils::ToLocal(i::Handle<i::Object>(name, isolate)); @@ -1963,7 +1911,9 @@ Handle<Value> Script::GetScriptName() { void Script::SetData(v8::Handle<String> data) { - i::Isolate* isolate = i::Isolate::Current(); + i::Handle<i::HeapObject> obj = + i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this)); + i::Isolate* isolate = obj->GetIsolate(); ON_BAILOUT(isolate, "v8::Script::SetData()", return); LOG_API(isolate, "Script::SetData"); { @@ -1995,8 +1945,9 @@ v8::TryCatch::TryCatch() v8::TryCatch::~TryCatch() { ASSERT(isolate_ == i::Isolate::Current()); if (rethrow_) { - v8::HandleScope scope(reinterpret_cast<Isolate*>(isolate_)); - v8::Local<v8::Value> exc = v8::Local<v8::Value>::New(Exception()); + v8::Isolate* isolate = reinterpret_cast<Isolate*>(isolate_); + v8::HandleScope scope(isolate); + v8::Local<v8::Value> exc = v8::Local<v8::Value>::New(isolate, Exception()); if (HasCaught() && capture_message_) { // If an exception was caught and rethrow_ is indicated, the saved // message, script, and location need to be restored to Isolate TLS @@ -2006,7 +1957,7 @@ v8::TryCatch::~TryCatch() { isolate_->RestorePendingMessageFromTryCatch(this); } isolate_->UnregisterTryCatchHandler(this); - v8::ThrowException(exc); + reinterpret_cast<Isolate*>(isolate_)->ThrowException(exc); ASSERT(!isolate_->thread_local_top()->rethrowing_message_); } else { isolate_->UnregisterTryCatchHandler(this); @@ -2032,7 +1983,7 @@ bool v8::TryCatch::HasTerminated() const { v8::Handle<v8::Value> v8::TryCatch::ReThrow() { if (!HasCaught()) return v8::Local<v8::Value>(); rethrow_ = true; - return v8::Undefined(); + return v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate_)); } @@ -2056,7 +2007,7 @@ v8::Local<Value> v8::TryCatch::StackTrace() const { i::HandleScope scope(isolate_); i::Handle<i::JSObject> obj(i::JSObject::cast(raw_obj), isolate_); i::Handle<i::String> name = isolate_->factory()->stack_string(); - if (!obj->HasProperty(*name)) return v8::Local<Value>(); + if (!i::JSReceiver::HasProperty(obj, name)) return v8::Local<Value>(); i::Handle<i::Object> value = i::GetProperty(isolate_, obj, name); if (value.is_null()) return v8::Local<Value>(); return v8::Utils::ToLocal(scope.CloseAndEscape(value)); @@ -2116,9 +2067,6 @@ Local<String> Message::Get() const { v8::Handle<Value> Message::GetScriptResourceName() const { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::Message::GetScriptResourceName()")) { - return Local<String>(); - } ENTER_V8(isolate); HandleScope scope(reinterpret_cast<Isolate*>(isolate)); i::Handle<i::JSMessageObject> message = @@ -2135,9 +2083,6 @@ v8::Handle<Value> Message::GetScriptResourceName() const { v8::Handle<Value> Message::GetScriptData() const { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::Message::GetScriptResourceData()")) { - return Local<Value>(); - } ENTER_V8(isolate); HandleScope scope(reinterpret_cast<Isolate*>(isolate)); i::Handle<i::JSMessageObject> message = @@ -2153,9 +2098,6 @@ v8::Handle<Value> Message::GetScriptData() const { v8::Handle<v8::StackTrace> Message::GetStackTrace() const { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::Message::GetStackTrace()")) { - return Local<v8::StackTrace>(); - } ENTER_V8(isolate); HandleScope scope(reinterpret_cast<Isolate*>(isolate)); i::Handle<i::JSMessageObject> message = @@ -2215,7 +2157,6 @@ int Message::GetLineNumber() const { int Message::GetStartPosition() const { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::Message::GetStartPosition()")) return 0; ENTER_V8(isolate); i::HandleScope scope(isolate); i::Handle<i::JSMessageObject> message = @@ -2226,7 +2167,6 @@ int Message::GetStartPosition() const { int Message::GetEndPosition() const { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::Message::GetEndPosition()")) return 0; ENTER_V8(isolate); i::HandleScope scope(isolate); i::Handle<i::JSMessageObject> message = @@ -2237,9 +2177,6 @@ int Message::GetEndPosition() const { int Message::GetStartColumn() const { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::Message::GetStartColumn()")) { - return kNoColumnInfo; - } ENTER_V8(isolate); i::HandleScope scope(isolate); i::Handle<i::JSObject> data_obj = Utils::OpenHandle(this); @@ -2255,7 +2192,6 @@ int Message::GetStartColumn() const { int Message::GetEndColumn() const { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::Message::GetEndColumn()")) return kNoColumnInfo; ENTER_V8(isolate); i::HandleScope scope(isolate); i::Handle<i::JSObject> data_obj = Utils::OpenHandle(this); @@ -2275,7 +2211,6 @@ int Message::GetEndColumn() const { bool Message::IsSharedCrossOrigin() const { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::Message::IsSharedCrossOrigin()")) return 0; ENTER_V8(isolate); i::HandleScope scope(isolate); i::Handle<i::JSMessageObject> message = @@ -2307,7 +2242,6 @@ Local<String> Message::GetSourceLine() const { void Message::PrintCurrentStackTrace(FILE* out) { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::Message::PrintCurrentStackTrace()")) return; ENTER_V8(isolate); isolate->PrintCurrentStackTrace(out); } @@ -2317,9 +2251,6 @@ void Message::PrintCurrentStackTrace(FILE* out) { Local<StackFrame> StackTrace::GetFrame(uint32_t index) const { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::StackTrace::GetFrame()")) { - return Local<StackFrame>(); - } ENTER_V8(isolate); HandleScope scope(reinterpret_cast<Isolate*>(isolate)); i::Handle<i::JSArray> self = Utils::OpenHandle(this); @@ -2331,7 +2262,6 @@ Local<StackFrame> StackTrace::GetFrame(uint32_t index) const { int StackTrace::GetFrameCount() const { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::StackTrace::GetFrameCount()")) return -1; ENTER_V8(isolate); return i::Smi::cast(Utils::OpenHandle(this)->length())->value(); } @@ -2339,7 +2269,6 @@ int StackTrace::GetFrameCount() const { Local<Array> StackTrace::AsArray() { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::StackTrace::AsArray()")) Local<Array>(); ENTER_V8(isolate); return Utils::ToLocal(Utils::OpenHandle(this)); } @@ -2348,9 +2277,6 @@ Local<Array> StackTrace::AsArray() { Local<StackTrace> StackTrace::CurrentStackTrace(int frame_limit, StackTraceOptions options) { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::StackTrace::CurrentStackTrace()")) { - Local<StackTrace>(); - } ENTER_V8(isolate); i::Handle<i::JSArray> stackTrace = isolate->CaptureCurrentStackTrace(frame_limit, options); @@ -2362,9 +2288,6 @@ Local<StackTrace> StackTrace::CurrentStackTrace(int frame_limit, int StackFrame::GetLineNumber() const { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::StackFrame::GetLineNumber()")) { - return Message::kNoLineNumberInfo; - } ENTER_V8(isolate); i::HandleScope scope(isolate); i::Handle<i::JSObject> self = Utils::OpenHandle(this); @@ -2378,9 +2301,6 @@ int StackFrame::GetLineNumber() const { int StackFrame::GetColumn() const { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::StackFrame::GetColumn()")) { - return Message::kNoColumnInfo; - } ENTER_V8(isolate); i::HandleScope scope(isolate); i::Handle<i::JSObject> self = Utils::OpenHandle(this); @@ -2394,9 +2314,6 @@ int StackFrame::GetColumn() const { int StackFrame::GetScriptId() const { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::StackFrame::GetScriptId()")) { - return Message::kNoScriptIdInfo; - } ENTER_V8(isolate); i::HandleScope scope(isolate); i::Handle<i::JSObject> self = Utils::OpenHandle(this); @@ -2410,9 +2327,6 @@ int StackFrame::GetScriptId() const { Local<String> StackFrame::GetScriptName() const { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::StackFrame::GetScriptName()")) { - return Local<String>(); - } ENTER_V8(isolate); HandleScope scope(reinterpret_cast<Isolate*>(isolate)); i::Handle<i::JSObject> self = Utils::OpenHandle(this); @@ -2426,9 +2340,6 @@ Local<String> StackFrame::GetScriptName() const { Local<String> StackFrame::GetScriptNameOrSourceURL() const { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::StackFrame::GetScriptNameOrSourceURL()")) { - return Local<String>(); - } ENTER_V8(isolate); HandleScope scope(reinterpret_cast<Isolate*>(isolate)); i::Handle<i::JSObject> self = Utils::OpenHandle(this); @@ -2442,9 +2353,6 @@ Local<String> StackFrame::GetScriptNameOrSourceURL() const { Local<String> StackFrame::GetFunctionName() const { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::StackFrame::GetFunctionName()")) { - return Local<String>(); - } ENTER_V8(isolate); HandleScope scope(reinterpret_cast<Isolate*>(isolate)); i::Handle<i::JSObject> self = Utils::OpenHandle(this); @@ -2458,7 +2366,6 @@ Local<String> StackFrame::GetFunctionName() const { bool StackFrame::IsEval() const { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::StackFrame::IsEval()")) return false; ENTER_V8(isolate); i::HandleScope scope(isolate); i::Handle<i::JSObject> self = Utils::OpenHandle(this); @@ -2469,7 +2376,6 @@ bool StackFrame::IsEval() const { bool StackFrame::IsConstructor() const { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::StackFrame::IsConstructor()")) return false; ENTER_V8(isolate); i::HandleScope scope(isolate); i::Handle<i::JSObject> self = Utils::OpenHandle(this); @@ -2504,9 +2410,6 @@ Local<Value> JSON::Parse(Local<String> json_string) { // --- D a t a --- bool Value::FullIsUndefined() const { - if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsUndefined()")) { - return false; - } bool result = Utils::OpenHandle(this)->IsUndefined(); ASSERT_EQ(result, QuickIsUndefined()); return result; @@ -2514,7 +2417,6 @@ bool Value::FullIsUndefined() const { bool Value::FullIsNull() const { - if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsNull()")) return false; bool result = Utils::OpenHandle(this)->IsNull(); ASSERT_EQ(result, QuickIsNull()); return result; @@ -2522,27 +2424,21 @@ bool Value::FullIsNull() const { bool Value::IsTrue() const { - if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsTrue()")) return false; return Utils::OpenHandle(this)->IsTrue(); } bool Value::IsFalse() const { - if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsFalse()")) return false; return Utils::OpenHandle(this)->IsFalse(); } bool Value::IsFunction() const { - if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsFunction()")) { - return false; - } return Utils::OpenHandle(this)->IsJSFunction(); } bool Value::FullIsString() const { - if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsString()")) return false; bool result = Utils::OpenHandle(this)->IsString(); ASSERT_EQ(result, QuickIsString()); return result; @@ -2550,20 +2446,16 @@ bool Value::FullIsString() const { bool Value::IsSymbol() const { - if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsSymbol()")) return false; return Utils::OpenHandle(this)->IsSymbol(); } bool Value::IsArray() const { - if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsArray()")) return false; return Utils::OpenHandle(this)->IsJSArray(); } bool Value::IsArrayBuffer() const { - if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsArrayBuffer()")) - return false; return Utils::OpenHandle(this)->IsJSArrayBuffer(); } @@ -2574,8 +2466,6 @@ bool Value::IsArrayBufferView() const { bool Value::IsTypedArray() const { - if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsArrayBuffer()")) - return false; return Utils::OpenHandle(this)->IsJSTypedArray(); } @@ -2594,8 +2484,6 @@ F(Uint8ClampedArray, kExternalPixelArray) #define VALUE_IS_TYPED_ARRAY(TypedArray, type_const) \ bool Value::Is##TypedArray() const { \ - if (IsDeadCheck(i::Isolate::Current(), "v8::Value::Is" #TypedArray "()")) \ - return false; \ i::Handle<i::Object> obj = Utils::OpenHandle(this); \ if (!obj->IsJSTypedArray()) return false; \ return i::JSTypedArray::cast(*obj)->type() == type_const; \ @@ -2612,35 +2500,26 @@ bool Value::IsDataView() const { bool Value::IsObject() const { - if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsObject()")) return false; return Utils::OpenHandle(this)->IsJSObject(); } bool Value::IsNumber() const { - if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsNumber()")) return false; return Utils::OpenHandle(this)->IsNumber(); } bool Value::IsBoolean() const { - if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsBoolean()")) { - return false; - } return Utils::OpenHandle(this)->IsBoolean(); } bool Value::IsExternal() const { - if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsExternal()")) { - return false; - } return Utils::OpenHandle(this)->IsExternal(); } bool Value::IsInt32() const { - if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsInt32()")) return false; i::Handle<i::Object> obj = Utils::OpenHandle(this); if (obj->IsSmi()) return true; if (obj->IsNumber()) { @@ -2657,7 +2536,6 @@ bool Value::IsInt32() const { bool Value::IsUint32() const { - if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsUint32()")) return false; i::Handle<i::Object> obj = Utils::OpenHandle(this); if (obj->IsSmi()) return i::Smi::cast(*obj)->value() >= 0; if (obj->IsNumber()) { @@ -2675,7 +2553,6 @@ bool Value::IsUint32() const { bool Value::IsDate() const { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::Value::IsDate()")) return false; i::Handle<i::Object> obj = Utils::OpenHandle(this); return obj->HasSpecificClassOf(isolate->heap()->Date_string()); } @@ -2683,7 +2560,6 @@ bool Value::IsDate() const { bool Value::IsStringObject() const { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::Value::IsStringObject()")) return false; i::Handle<i::Object> obj = Utils::OpenHandle(this); return obj->HasSpecificClassOf(isolate->heap()->String_string()); } @@ -2693,7 +2569,6 @@ bool Value::IsSymbolObject() const { // TODO(svenpanne): these and other test functions should be written such // that they do not use Isolate::Current(). i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::Value::IsSymbolObject()")) return false; i::Handle<i::Object> obj = Utils::OpenHandle(this); return obj->HasSpecificClassOf(isolate->heap()->Symbol_string()); } @@ -2701,7 +2576,6 @@ bool Value::IsSymbolObject() const { bool Value::IsNumberObject() const { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::Value::IsNumberObject()")) return false; i::Handle<i::Object> obj = Utils::OpenHandle(this); return obj->HasSpecificClassOf(isolate->heap()->Number_string()); } @@ -2729,7 +2603,6 @@ static bool CheckConstructor(i::Isolate* isolate, bool Value::IsNativeError() const { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::Value::IsNativeError()")) return false; i::Handle<i::Object> obj = Utils::OpenHandle(this); if (obj->IsJSObject()) { i::Handle<i::JSObject> js_obj(i::JSObject::cast(*obj)); @@ -2748,14 +2621,12 @@ bool Value::IsNativeError() const { bool Value::IsBooleanObject() const { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::Value::IsBooleanObject()")) return false; i::Handle<i::Object> obj = Utils::OpenHandle(this); return obj->HasSpecificClassOf(isolate->heap()->Boolean_string()); } bool Value::IsRegExp() const { - if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsRegExp()")) return false; i::Handle<i::Object> obj = Utils::OpenHandle(this); return obj->IsJSRegExp(); } @@ -2768,9 +2639,6 @@ Local<String> Value::ToString() const { str = obj; } else { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::Value::ToString()")) { - return Local<String>(); - } LOG_API(isolate, "ToString"); ENTER_V8(isolate); EXCEPTION_PREAMBLE(isolate); @@ -2788,9 +2656,6 @@ Local<String> Value::ToDetailString() const { str = obj; } else { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::Value::ToDetailString()")) { - return Local<String>(); - } LOG_API(isolate, "ToDetailString"); ENTER_V8(isolate); EXCEPTION_PREAMBLE(isolate); @@ -2808,9 +2673,6 @@ Local<v8::Object> Value::ToObject() const { val = obj; } else { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::Value::ToObject()")) { - return Local<v8::Object>(); - } LOG_API(isolate, "ToObject"); ENTER_V8(isolate); EXCEPTION_PREAMBLE(isolate); @@ -2827,9 +2689,6 @@ Local<Boolean> Value::ToBoolean() const { return ToApiHandle<Boolean>(obj); } else { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::Value::ToBoolean()")) { - return Local<Boolean>(); - } LOG_API(isolate, "ToBoolean"); ENTER_V8(isolate); i::Handle<i::Object> val = @@ -2846,9 +2705,6 @@ Local<Number> Value::ToNumber() const { num = obj; } else { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::Value::ToNumber()")) { - return Local<Number>(); - } LOG_API(isolate, "ToNumber"); ENTER_V8(isolate); EXCEPTION_PREAMBLE(isolate); @@ -2866,7 +2722,6 @@ Local<Integer> Value::ToInteger() const { num = obj; } else { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::Value::ToInteger()")) return Local<Integer>(); LOG_API(isolate, "ToInteger"); ENTER_V8(isolate); EXCEPTION_PREAMBLE(isolate); @@ -2886,7 +2741,6 @@ void i::Internals::CheckInitializedImpl(v8::Isolate* external_isolate) { void External::CheckCast(v8::Value* that) { - if (IsDeadCheck(i::Isolate::Current(), "v8::External::Cast()")) return; ApiCheck(Utils::OpenHandle(that)->IsExternal(), "v8::External::Cast()", "Could not convert to external"); @@ -2894,7 +2748,6 @@ void External::CheckCast(v8::Value* that) { void v8::Object::CheckCast(Value* that) { - if (IsDeadCheck(i::Isolate::Current(), "v8::Object::Cast()")) return; i::Handle<i::Object> obj = Utils::OpenHandle(that); ApiCheck(obj->IsJSObject(), "v8::Object::Cast()", @@ -2903,7 +2756,6 @@ void v8::Object::CheckCast(Value* that) { void v8::Function::CheckCast(Value* that) { - if (IsDeadCheck(i::Isolate::Current(), "v8::Function::Cast()")) return; i::Handle<i::Object> obj = Utils::OpenHandle(that); ApiCheck(obj->IsJSFunction(), "v8::Function::Cast()", @@ -2912,7 +2764,6 @@ void v8::Function::CheckCast(Value* that) { void v8::String::CheckCast(v8::Value* that) { - if (IsDeadCheck(i::Isolate::Current(), "v8::String::Cast()")) return; i::Handle<i::Object> obj = Utils::OpenHandle(that); ApiCheck(obj->IsString(), "v8::String::Cast()", @@ -2921,7 +2772,6 @@ void v8::String::CheckCast(v8::Value* that) { void v8::Symbol::CheckCast(v8::Value* that) { - if (IsDeadCheck(i::Isolate::Current(), "v8::Symbol::Cast()")) return; i::Handle<i::Object> obj = Utils::OpenHandle(that); ApiCheck(obj->IsSymbol(), "v8::Symbol::Cast()", @@ -2930,7 +2780,6 @@ void v8::Symbol::CheckCast(v8::Value* that) { void v8::Number::CheckCast(v8::Value* that) { - if (IsDeadCheck(i::Isolate::Current(), "v8::Number::Cast()")) return; i::Handle<i::Object> obj = Utils::OpenHandle(that); ApiCheck(obj->IsNumber(), "v8::Number::Cast()", @@ -2939,7 +2788,6 @@ void v8::Number::CheckCast(v8::Value* that) { void v8::Integer::CheckCast(v8::Value* that) { - if (IsDeadCheck(i::Isolate::Current(), "v8::Integer::Cast()")) return; i::Handle<i::Object> obj = Utils::OpenHandle(that); ApiCheck(obj->IsNumber(), "v8::Integer::Cast()", @@ -2948,7 +2796,6 @@ void v8::Integer::CheckCast(v8::Value* that) { void v8::Array::CheckCast(Value* that) { - if (IsDeadCheck(i::Isolate::Current(), "v8::Array::Cast()")) return; i::Handle<i::Object> obj = Utils::OpenHandle(that); ApiCheck(obj->IsJSArray(), "v8::Array::Cast()", @@ -2957,7 +2804,6 @@ void v8::Array::CheckCast(Value* that) { void v8::ArrayBuffer::CheckCast(Value* that) { - if (IsDeadCheck(i::Isolate::Current(), "v8::ArrayBuffer::Cast()")) return; i::Handle<i::Object> obj = Utils::OpenHandle(that); ApiCheck(obj->IsJSArrayBuffer(), "v8::ArrayBuffer::Cast()", @@ -2974,7 +2820,6 @@ void v8::ArrayBufferView::CheckCast(Value* that) { void v8::TypedArray::CheckCast(Value* that) { - if (IsDeadCheck(i::Isolate::Current(), "v8::TypedArray::Cast()")) return; i::Handle<i::Object> obj = Utils::OpenHandle(that); ApiCheck(obj->IsJSTypedArray(), "v8::TypedArray::Cast()", @@ -2984,8 +2829,6 @@ void v8::TypedArray::CheckCast(Value* that) { #define CHECK_TYPED_ARRAY_CAST(ApiClass, typeConst) \ void v8::ApiClass::CheckCast(Value* that) { \ - if (IsDeadCheck(i::Isolate::Current(), "v8::" #ApiClass "::Cast()")) \ - return; \ i::Handle<i::Object> obj = Utils::OpenHandle(that); \ ApiCheck(obj->IsJSTypedArray() && \ i::JSTypedArray::cast(*obj)->type() == typeConst, \ @@ -3009,7 +2852,6 @@ void v8::DataView::CheckCast(Value* that) { void v8::Date::CheckCast(v8::Value* that) { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::Date::Cast()")) return; i::Handle<i::Object> obj = Utils::OpenHandle(that); ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Date_string()), "v8::Date::Cast()", @@ -3019,7 +2861,6 @@ void v8::Date::CheckCast(v8::Value* that) { void v8::StringObject::CheckCast(v8::Value* that) { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::StringObject::Cast()")) return; i::Handle<i::Object> obj = Utils::OpenHandle(that); ApiCheck(obj->HasSpecificClassOf(isolate->heap()->String_string()), "v8::StringObject::Cast()", @@ -3029,7 +2870,6 @@ void v8::StringObject::CheckCast(v8::Value* that) { void v8::SymbolObject::CheckCast(v8::Value* that) { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::SymbolObject::Cast()")) return; i::Handle<i::Object> obj = Utils::OpenHandle(that); ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Symbol_string()), "v8::SymbolObject::Cast()", @@ -3039,7 +2879,6 @@ void v8::SymbolObject::CheckCast(v8::Value* that) { void v8::NumberObject::CheckCast(v8::Value* that) { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::NumberObject::Cast()")) return; i::Handle<i::Object> obj = Utils::OpenHandle(that); ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Number_string()), "v8::NumberObject::Cast()", @@ -3049,7 +2888,6 @@ void v8::NumberObject::CheckCast(v8::Value* that) { void v8::BooleanObject::CheckCast(v8::Value* that) { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::BooleanObject::Cast()")) return; i::Handle<i::Object> obj = Utils::OpenHandle(that); ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Boolean_string()), "v8::BooleanObject::Cast()", @@ -3058,7 +2896,6 @@ void v8::BooleanObject::CheckCast(v8::Value* that) { void v8::RegExp::CheckCast(v8::Value* that) { - if (IsDeadCheck(i::Isolate::Current(), "v8::RegExp::Cast()")) return; i::Handle<i::Object> obj = Utils::OpenHandle(that); ApiCheck(obj->IsJSRegExp(), "v8::RegExp::Cast()", @@ -3078,9 +2915,6 @@ double Value::NumberValue() const { num = obj; } else { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::Value::NumberValue()")) { - return i::OS::nan_value(); - } LOG_API(isolate, "NumberValue"); ENTER_V8(isolate); EXCEPTION_PREAMBLE(isolate); @@ -3098,7 +2932,6 @@ int64_t Value::IntegerValue() const { num = obj; } else { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::Value::IntegerValue()")) return 0; LOG_API(isolate, "IntegerValue"); ENTER_V8(isolate); EXCEPTION_PREAMBLE(isolate); @@ -3120,7 +2953,6 @@ Local<Int32> Value::ToInt32() const { num = obj; } else { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::Value::ToInt32()")) return Local<Int32>(); LOG_API(isolate, "ToInt32"); ENTER_V8(isolate); EXCEPTION_PREAMBLE(isolate); @@ -3138,7 +2970,6 @@ Local<Uint32> Value::ToUint32() const { num = obj; } else { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::Value::ToUint32()")) return Local<Uint32>(); LOG_API(isolate, "ToUInt32"); ENTER_V8(isolate); EXCEPTION_PREAMBLE(isolate); @@ -3156,7 +2987,6 @@ Local<Uint32> Value::ToArrayIndex() const { return Local<Uint32>(); } i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::Value::ToArrayIndex()")) return Local<Uint32>(); LOG_API(isolate, "ToArrayIndex"); ENTER_V8(isolate); EXCEPTION_PREAMBLE(isolate); @@ -3184,7 +3014,6 @@ int32_t Value::Int32Value() const { return i::Smi::cast(*obj)->value(); } else { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::Value::Int32Value()")) return 0; LOG_API(isolate, "Int32Value (slow)"); ENTER_V8(isolate); EXCEPTION_PREAMBLE(isolate); @@ -3202,9 +3031,8 @@ int32_t Value::Int32Value() const { bool Value::Equals(Handle<Value> that) const { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::Value::Equals()") - || EmptyCheck("v8::Value::Equals()", this) - || EmptyCheck("v8::Value::Equals()", that)) { + if (EmptyCheck("v8::Value::Equals()", this) || + EmptyCheck("v8::Value::Equals()", that)) { return false; } LOG_API(isolate, "Equals"); @@ -3229,9 +3057,8 @@ bool Value::Equals(Handle<Value> that) const { bool Value::StrictEquals(Handle<Value> that) const { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::Value::StrictEquals()") - || EmptyCheck("v8::Value::StrictEquals()", this) - || EmptyCheck("v8::Value::StrictEquals()", that)) { + if (EmptyCheck("v8::Value::StrictEquals()", this) || + EmptyCheck("v8::Value::StrictEquals()", that)) { return false; } LOG_API(isolate, "StrictEquals"); @@ -3259,13 +3086,25 @@ bool Value::StrictEquals(Handle<Value> that) const { } +bool Value::SameValue(Handle<Value> that) const { + i::Isolate* isolate = i::Isolate::Current(); + if (EmptyCheck("v8::Value::SameValue()", this) || + EmptyCheck("v8::Value::SameValue()", that)) { + return false; + } + LOG_API(isolate, "SameValue"); + i::Handle<i::Object> obj = Utils::OpenHandle(this); + i::Handle<i::Object> other = Utils::OpenHandle(*that); + return obj->SameValue(*other); +} + + uint32_t Value::Uint32Value() const { i::Handle<i::Object> obj = Utils::OpenHandle(this); if (obj->IsSmi()) { return i::Smi::cast(*obj)->value(); } else { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::Value::Uint32Value()")) return 0; LOG_API(isolate, "Uint32Value"); ENTER_V8(isolate); EXCEPTION_PREAMBLE(isolate); @@ -3625,7 +3464,7 @@ bool v8::Object::Has(uint32_t index) { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); ON_BAILOUT(isolate, "v8::Object::HasProperty()", return false); i::Handle<i::JSObject> self = Utils::OpenHandle(this); - return self->HasElement(index); + return i::JSReceiver::HasElement(self, index); } @@ -3679,8 +3518,8 @@ bool v8::Object::HasOwnProperty(Handle<String> key) { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); ON_BAILOUT(isolate, "v8::Object::HasOwnProperty()", return false); - return Utils::OpenHandle(this)->HasLocalProperty( - *Utils::OpenHandle(*key)); + return i::JSReceiver::HasLocalProperty( + Utils::OpenHandle(this), Utils::OpenHandle(*key)); } @@ -3688,9 +3527,8 @@ bool v8::Object::HasRealNamedProperty(Handle<String> key) { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); ON_BAILOUT(isolate, "v8::Object::HasRealNamedProperty()", return false); - return Utils::OpenHandle(this)->HasRealNamedProperty( - isolate, - *Utils::OpenHandle(*key)); + return i::JSObject::HasRealNamedProperty(Utils::OpenHandle(this), + Utils::OpenHandle(*key)); } @@ -3698,7 +3536,7 @@ bool v8::Object::HasRealIndexedProperty(uint32_t index) { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); ON_BAILOUT(isolate, "v8::Object::HasRealIndexedProperty()", return false); - return Utils::OpenHandle(this)->HasRealElementProperty(isolate, index); + return i::JSObject::HasRealElementProperty(Utils::OpenHandle(this), index); } @@ -3708,9 +3546,8 @@ bool v8::Object::HasRealNamedCallbackProperty(Handle<String> key) { "v8::Object::HasRealNamedCallbackProperty()", return false); ENTER_V8(isolate); - return Utils::OpenHandle(this)->HasRealNamedCallbackProperty( - isolate, - *Utils::OpenHandle(*key)); + return i::JSObject::HasRealNamedCallbackProperty(Utils::OpenHandle(this), + Utils::OpenHandle(*key)); } @@ -3813,7 +3650,7 @@ Local<v8::Object> v8::Object::Clone() { ENTER_V8(isolate); i::Handle<i::JSObject> self = Utils::OpenHandle(this); EXCEPTION_PREAMBLE(isolate); - i::Handle<i::JSObject> result = i::Copy(self); + i::Handle<i::JSObject> result = i::JSObject::Copy(self); has_pending_exception = result.is_null(); EXCEPTION_BAILOUT_CHECK(isolate, Local<Object>()); return Utils::ToLocal(result); @@ -4113,7 +3950,7 @@ bool v8::Object::IsCallable() { } -Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv, +Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Value> recv, int argc, v8::Handle<v8::Value> argv[]) { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); @@ -4141,7 +3978,7 @@ Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv, } EXCEPTION_PREAMBLE(isolate); i::Handle<i::Object> returned = i::Execution::Call( - isolate, fun, recv_obj, argc, args, &has_pending_exception); + isolate, fun, recv_obj, argc, args, &has_pending_exception, true); EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Value>()); return Utils::ToLocal(scope.CloseAndEscape(returned)); } @@ -4225,7 +4062,7 @@ Local<v8::Object> Function::NewInstance(int argc, } -Local<v8::Value> Function::Call(v8::Handle<v8::Object> recv, int argc, +Local<v8::Value> Function::Call(v8::Handle<v8::Value> recv, int argc, v8::Handle<v8::Value> argv[]) { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); ON_BAILOUT(isolate, "v8::Function::Call()", return Local<v8::Value>()); @@ -4242,7 +4079,7 @@ Local<v8::Value> Function::Call(v8::Handle<v8::Object> recv, int argc, i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv); EXCEPTION_PREAMBLE(isolate); i::Handle<i::Object> returned = i::Execution::Call( - isolate, fun, recv_obj, argc, args, &has_pending_exception); + isolate, fun, recv_obj, argc, args, &has_pending_exception, true); EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Object>()); raw_result = *returned; } @@ -4274,6 +4111,29 @@ Handle<Value> Function::GetInferredName() const { } +Handle<Value> Function::GetDisplayName() const { + i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); + ON_BAILOUT(isolate, "v8::Function::GetDisplayName()", + return ToApiHandle<Primitive>( + isolate->factory()->undefined_value())); + ENTER_V8(isolate); + i::Handle<i::JSFunction> func = Utils::OpenHandle(this); + i::Handle<i::String> property_name = + isolate->factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("displayName")); + i::LookupResult lookup(isolate); + func->LookupRealNamedProperty(*property_name, &lookup); + if (lookup.IsFound()) { + i::Object* value = lookup.GetLazyValue(); + if (value && value->IsString()) { + i::String* name = i::String::cast(value); + if (name->length() > 0) return Utils::ToLocal(i::Handle<i::String>(name)); + } + } + return ToApiHandle<Primitive>(isolate->factory()->undefined_value()); +} + + ScriptOrigin Function::GetScriptOrigin() const { i::Handle<i::JSFunction> func = Utils::OpenHandle(this); if (func->shared()->script()->IsScript()) { @@ -4312,12 +4172,20 @@ int Function::GetScriptColumnNumber() const { } +bool Function::IsBuiltin() const { + i::Handle<i::JSFunction> func = Utils::OpenHandle(this); + return func->IsBuiltin(); +} + + Handle<Value> Function::GetScriptId() const { i::Handle<i::JSFunction> func = Utils::OpenHandle(this); - if (!func->shared()->script()->IsScript()) - return v8::Undefined(); + i::Isolate* isolate = func->GetIsolate(); + if (!func->shared()->script()->IsScript()) { + return v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate)); + } i::Handle<i::Script> script(i::Script::cast(func->shared()->script())); - return Utils::ToLocal(i::Handle<i::Object>(script->id(), func->GetIsolate())); + return Utils::ToLocal(i::Handle<i::Object>(script->id(), isolate)); } @@ -4331,16 +4199,12 @@ int Function::ScriptId() const { int String::Length() const { i::Handle<i::String> str = Utils::OpenHandle(this); - if (IsDeadCheck(str->GetIsolate(), "v8::String::Length()")) return 0; return str->length(); } bool String::IsOneByte() const { i::Handle<i::String> str = Utils::OpenHandle(this); - if (IsDeadCheck(str->GetIsolate(), "v8::String::IsOneByte()")) { - return false; - } return str->HasOnlyOneByteChars(); } @@ -4456,10 +4320,6 @@ class ContainsOnlyOneByteHelper { bool String::ContainsOnlyOneByte() const { i::Handle<i::String> str = Utils::OpenHandle(this); - if (IsDeadCheck(str->GetIsolate(), - "v8::String::ContainsOnlyOneByte()")) { - return false; - } if (str->HasOnlyOneByteChars()) return true; ContainsOnlyOneByteHelper helper; return helper.Check(*str); @@ -4663,7 +4523,6 @@ static int Utf8Length(i::String* str, i::Isolate* isolate) { int String::Utf8Length() const { i::Handle<i::String> str = Utils::OpenHandle(this); i::Isolate* isolate = str->GetIsolate(); - if (IsDeadCheck(isolate, "v8::String::Utf8Length()")) return 0; return v8::Utf8Length(*str, isolate); } @@ -4849,7 +4708,6 @@ int String::WriteUtf8(char* buffer, int* nchars_ref, int options) const { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::String::WriteUtf8()")) return 0; LOG_API(isolate, "String::WriteUtf8"); ENTER_V8(isolate); i::Handle<i::String> str = Utils::OpenHandle(this); @@ -4894,40 +4752,6 @@ int String::WriteUtf8(char* buffer, } -int String::WriteAscii(char* buffer, - int start, - int length, - int options) const { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::String::WriteAscii()")) return 0; - LOG_API(isolate, "String::WriteAscii"); - ENTER_V8(isolate); - ASSERT(start >= 0 && length >= -1); - i::Handle<i::String> str = Utils::OpenHandle(this); - isolate->string_tracker()->RecordWrite(str); - if (options & HINT_MANY_WRITES_EXPECTED) { - FlattenString(str); // Flatten the string for efficiency. - } - - int end = length; - if ((length == -1) || (length > str->length() - start)) { - end = str->length() - start; - } - if (end < 0) return 0; - i::StringCharacterStream write_stream(*str, isolate->write_iterator(), start); - int i; - for (i = 0; i < end; i++) { - char c = static_cast<char>(write_stream.GetNext()); - if (c == '\0' && !(options & PRESERVE_ASCII_NULL)) c = ' '; - buffer[i] = c; - } - if (!(options & NO_NULL_TERMINATION) && (length == -1 || i < length)) { - buffer[i] = '\0'; - } - return i; -} - - template<typename CharType> static inline int WriteHelper(const String* string, CharType* buffer, @@ -4935,7 +4759,6 @@ static inline int WriteHelper(const String* string, int length, int options) { i::Isolate* isolate = Utils::OpenHandle(string)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::String::Write()")) return 0; LOG_API(isolate, "String::Write"); ENTER_V8(isolate); ASSERT(start >= 0 && length >= -1); @@ -4977,9 +4800,6 @@ int String::Write(uint16_t* buffer, bool v8::String::IsExternal() const { i::Handle<i::String> str = Utils::OpenHandle(this); - if (IsDeadCheck(str->GetIsolate(), "v8::String::IsExternal()")) { - return false; - } EnsureInitializedForIsolate(str->GetIsolate(), "v8::String::IsExternal()"); return i::StringShape(*str).IsExternalTwoByte(); } @@ -4987,9 +4807,6 @@ bool v8::String::IsExternal() const { bool v8::String::IsExternalAscii() const { i::Handle<i::String> str = Utils::OpenHandle(this); - if (IsDeadCheck(str->GetIsolate(), "v8::String::IsExternalAscii()")) { - return false; - } return i::StringShape(*str).IsExternalAscii(); } @@ -5035,10 +4852,6 @@ void v8::String::VerifyExternalStringResourceBase( const v8::String::ExternalAsciiStringResource* v8::String::GetExternalAsciiStringResource() const { i::Handle<i::String> str = Utils::OpenHandle(this); - if (IsDeadCheck(str->GetIsolate(), - "v8::String::GetExternalAsciiStringResource()")) { - return NULL; - } if (i::StringShape(*str).IsExternalAscii()) { const void* resource = i::Handle<i::ExternalAsciiString>::cast(str)->resource(); @@ -5050,8 +4863,6 @@ const v8::String::ExternalAsciiStringResource* Local<Value> Symbol::Name() const { - if (IsDeadCheck(i::Isolate::Current(), "v8::Symbol::Name()")) - return Local<Value>(); i::Handle<i::Symbol> sym = Utils::OpenHandle(this); i::Handle<i::Object> name(sym->name(), sym->GetIsolate()); return Utils::ToLocal(name); @@ -5059,21 +4870,18 @@ Local<Value> Symbol::Name() const { double Number::Value() const { - if (IsDeadCheck(i::Isolate::Current(), "v8::Number::Value()")) return 0; i::Handle<i::Object> obj = Utils::OpenHandle(this); return obj->Number(); } bool Boolean::Value() const { - if (IsDeadCheck(i::Isolate::Current(), "v8::Boolean::Value()")) return false; i::Handle<i::Object> obj = Utils::OpenHandle(this); return obj->IsTrue(); } int64_t Integer::Value() const { - if (IsDeadCheck(i::Isolate::Current(), "v8::Integer::Value()")) return 0; i::Handle<i::Object> obj = Utils::OpenHandle(this); if (obj->IsSmi()) { return i::Smi::cast(*obj)->value(); @@ -5084,7 +4892,6 @@ int64_t Integer::Value() const { int32_t Int32::Value() const { - if (IsDeadCheck(i::Isolate::Current(), "v8::Int32::Value()")) return 0; i::Handle<i::Object> obj = Utils::OpenHandle(this); if (obj->IsSmi()) { return i::Smi::cast(*obj)->value(); @@ -5095,7 +4902,6 @@ int32_t Int32::Value() const { uint32_t Uint32::Value() const { - if (IsDeadCheck(i::Isolate::Current(), "v8::Uint32::Value()")) return 0; i::Handle<i::Object> obj = Utils::OpenHandle(this); if (obj->IsSmi()) { return i::Smi::cast(*obj)->value(); @@ -5107,9 +4913,6 @@ uint32_t Uint32::Value() const { int v8::Object::InternalFieldCount() { i::Handle<i::JSObject> obj = Utils::OpenHandle(this); - if (IsDeadCheck(obj->GetIsolate(), "v8::Object::InternalFieldCount()")) { - return 0; - } return obj->GetInternalFieldCount(); } @@ -5117,10 +4920,9 @@ int v8::Object::InternalFieldCount() { static bool InternalFieldOK(i::Handle<i::JSObject> obj, int index, const char* location) { - return !IsDeadCheck(obj->GetIsolate(), location) && - ApiCheck(index < obj->GetInternalFieldCount(), - location, - "Internal field out of bounds"); + return ApiCheck(index < obj->GetInternalFieldCount(), + location, + "Internal field out of bounds"); } @@ -5191,11 +4993,6 @@ void v8::V8::SetReturnAddressLocationResolver( } -bool v8::V8::SetFunctionEntryHook(FunctionEntryHook entry_hook) { - return SetFunctionEntryHook(Isolate::GetCurrent(), entry_hook); -} - - bool v8::V8::SetFunctionEntryHook(Isolate* ext_isolate, FunctionEntryHook entry_hook) { ASSERT(ext_isolate != NULL); @@ -5256,25 +5053,8 @@ HeapStatistics::HeapStatistics(): total_heap_size_(0), heap_size_limit_(0) { } -void v8::V8::GetHeapStatistics(HeapStatistics* heap_statistics) { - i::Isolate* isolate = i::Isolate::UncheckedCurrent(); - if (isolate == NULL || !isolate->IsInitialized()) { - // Isolate is unitialized thus heap is not configured yet. - heap_statistics->total_heap_size_ = 0; - heap_statistics->total_heap_size_executable_ = 0; - heap_statistics->total_physical_size_ = 0; - heap_statistics->used_heap_size_ = 0; - heap_statistics->heap_size_limit_ = 0; - return; - } - Isolate* ext_isolate = reinterpret_cast<Isolate*>(isolate); - return ext_isolate->GetHeapStatistics(heap_statistics); -} - - void v8::V8::VisitExternalResources(ExternalResourceVisitor* visitor) { i::Isolate* isolate = i::Isolate::Current(); - IsDeadCheck(isolate, "v8::V8::VisitExternalResources"); isolate->heap()->VisitExternalResources(visitor); } @@ -5298,8 +5078,6 @@ class VisitorAdapter : public i::ObjectVisitor { void v8::V8::VisitHandlesWithClassIds(PersistentHandleVisitor* visitor) { i::Isolate* isolate = i::Isolate::Current(); - IsDeadCheck(isolate, "v8::V8::VisitHandlesWithClassId"); - i::DisallowHeapAllocation no_allocation; VisitorAdapter visitor_adapter(visitor); @@ -5311,8 +5089,6 @@ void v8::V8::VisitHandlesForPartialDependence( Isolate* exported_isolate, PersistentHandleVisitor* visitor) { i::Isolate* isolate = reinterpret_cast<i::Isolate*>(exported_isolate); ASSERT(isolate == i::Isolate::Current()); - IsDeadCheck(isolate, "v8::V8::VisitHandlesForPartialDependence"); - i::DisallowHeapAllocation no_allocation; VisitorAdapter visitor_adapter(visitor); @@ -5423,7 +5199,6 @@ Local<Context> v8::Context::New( v8::ExtensionConfiguration* extensions, v8::Handle<ObjectTemplate> global_template, v8::Handle<Value> global_object) { - i::Isolate::EnsureDefaultIsolate(); i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external_isolate); EnsureInitializedForIsolate(isolate, "v8::Context::New()"); LOG_API(isolate, "Context::New"); @@ -5438,9 +5213,6 @@ Local<Context> v8::Context::New( void v8::Context::SetSecurityToken(Handle<Value> token) { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::Context::SetSecurityToken()")) { - return; - } ENTER_V8(isolate); i::Handle<i::Context> env = Utils::OpenHandle(this); i::Handle<i::Object> token_handle = Utils::OpenHandle(*token); @@ -5450,10 +5222,6 @@ void v8::Context::SetSecurityToken(Handle<Value> token) { void v8::Context::UseDefaultSecurityToken() { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, - "v8::Context::UseDefaultSecurityToken()")) { - return; - } ENTER_V8(isolate); i::Handle<i::Context> env = Utils::OpenHandle(this); env->set_security_token(env->global_object()); @@ -5462,9 +5230,6 @@ void v8::Context::UseDefaultSecurityToken() { Handle<Value> v8::Context::GetSecurityToken() { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::Context::GetSecurityToken()")) { - return Handle<Value>(); - } i::Handle<i::Context> env = Utils::OpenHandle(this); i::Object* security_token = env->security_token(); i::Handle<i::Object> token_handle(security_token, isolate); @@ -5494,67 +5259,42 @@ v8::Local<v8::Context> Context::GetEntered() { if (!EnsureInitializedForIsolate(isolate, "v8::Context::GetEntered()")) { return Local<Context>(); } - i::Handle<i::Object> last = - isolate->handle_scope_implementer()->LastEnteredContext(); - if (last.is_null()) return Local<Context>(); - i::Handle<i::Context> context = i::Handle<i::Context>::cast(last); - return Utils::ToLocal(context); + return reinterpret_cast<Isolate*>(isolate)->GetEnteredContext(); } v8::Local<v8::Context> Context::GetCurrent() { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::Context::GetCurrent()")) { - return Local<Context>(); - } return reinterpret_cast<Isolate*>(isolate)->GetCurrentContext(); } v8::Local<v8::Context> Context::GetCalling() { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::Context::GetCalling()")) { - return Local<Context>(); - } - i::Handle<i::Object> calling = - isolate->GetCallingNativeContext(); - if (calling.is_null()) return Local<Context>(); - i::Handle<i::Context> context = i::Handle<i::Context>::cast(calling); - return Utils::ToLocal(context); + return reinterpret_cast<Isolate*>(isolate)->GetCallingContext(); } v8::Local<v8::Object> Context::Global() { - i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::Context::Global()")) { - return Local<v8::Object>(); - } - i::Object** ctx = reinterpret_cast<i::Object**>(this); - i::Handle<i::Context> context = - i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx)); + i::Handle<i::Context> context = Utils::OpenHandle(this); + i::Isolate* isolate = context->GetIsolate(); i::Handle<i::Object> global(context->global_proxy(), isolate); return Utils::ToLocal(i::Handle<i::JSObject>::cast(global)); } void Context::DetachGlobal() { - i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::Context::DetachGlobal()")) return; + i::Handle<i::Context> context = Utils::OpenHandle(this); + i::Isolate* isolate = context->GetIsolate(); ENTER_V8(isolate); - i::Object** ctx = reinterpret_cast<i::Object**>(this); - i::Handle<i::Context> context = - i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx)); isolate->bootstrapper()->DetachGlobal(context); } void Context::ReattachGlobal(Handle<Object> global_object) { - i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::Context::ReattachGlobal()")) return; + i::Handle<i::Context> context = Utils::OpenHandle(this); + i::Isolate* isolate = context->GetIsolate(); ENTER_V8(isolate); - i::Object** ctx = reinterpret_cast<i::Object**>(this); - i::Handle<i::Context> context = - i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx)); i::Handle<i::JSGlobalProxy> global_proxy = i::Handle<i::JSGlobalProxy>::cast(Utils::OpenHandle(*global_object)); isolate->bootstrapper()->ReattachGlobal(context, global_proxy); @@ -5562,44 +5302,23 @@ void Context::ReattachGlobal(Handle<Object> global_object) { void Context::AllowCodeGenerationFromStrings(bool allow) { - i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::Context::AllowCodeGenerationFromStrings()")) { - return; - } + i::Handle<i::Context> context = Utils::OpenHandle(this); + i::Isolate* isolate = context->GetIsolate(); ENTER_V8(isolate); - i::Object** ctx = reinterpret_cast<i::Object**>(this); - i::Handle<i::Context> context = - i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx)); context->set_allow_code_gen_from_strings( allow ? isolate->heap()->true_value() : isolate->heap()->false_value()); } bool Context::IsCodeGenerationFromStringsAllowed() { - i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, - "v8::Context::IsCodeGenerationFromStringsAllowed()")) { - return false; - } - ENTER_V8(isolate); - i::Object** ctx = reinterpret_cast<i::Object**>(this); - i::Handle<i::Context> context = - i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx)); + i::Handle<i::Context> context = Utils::OpenHandle(this); return !context->allow_code_gen_from_strings()->IsFalse(); } void Context::SetErrorMessageForCodeGenerationFromStrings( Handle<String> error) { - i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, - "v8::Context::SetErrorMessageForCodeGenerationFromStrings()")) { - return; - } - ENTER_V8(isolate); - i::Object** ctx = reinterpret_cast<i::Object**>(this); - i::Handle<i::Context> context = - i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx)); + i::Handle<i::Context> context = Utils::OpenHandle(this); i::Handle<i::String> error_handle = Utils::OpenHandle(*error); context->set_error_message_for_code_gen_from_strings(*error_handle); } @@ -5655,7 +5374,6 @@ Local<External> v8::External::New(void* value) { void* External::Value() const { - if (IsDeadCheck(i::Isolate::Current(), "v8::External::Value()")) return NULL; return ExternalValue(*Utils::OpenHandle(this)); } @@ -5848,7 +5566,6 @@ Local<String> v8::String::NewExternal( bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) { i::Handle<i::String> obj = Utils::OpenHandle(this); i::Isolate* isolate = obj->GetIsolate(); - if (IsDeadCheck(isolate, "v8::String::MakeExternal()")) return false; if (i::StringShape(*obj).IsExternalTwoByte()) { return false; // Already an external string. } @@ -5899,7 +5616,6 @@ bool v8::String::MakeExternal( v8::String::ExternalAsciiStringResource* resource) { i::Handle<i::String> obj = Utils::OpenHandle(this); i::Isolate* isolate = obj->GetIsolate(); - if (IsDeadCheck(isolate, "v8::String::MakeExternal()")) return false; if (i::StringShape(*obj).IsExternalTwoByte()) { return false; // Already an external string. } @@ -5937,7 +5653,12 @@ bool v8::String::CanMakeExternal() { if (!internal::FLAG_clever_optimizations) return false; i::Handle<i::String> obj = Utils::OpenHandle(this); i::Isolate* isolate = obj->GetIsolate(); - if (IsDeadCheck(isolate, "v8::String::CanMakeExternal()")) return false; + + // TODO(yangguo): Externalizing sliced/cons strings allocates. + // This rule can be removed when all code that can + // trigger an access check is handlified and therefore GC safe. + if (isolate->heap()->old_pointer_space()->Contains(*obj)) return false; + if (isolate->string_tracker()->IsFreshUnusedString(obj)) return false; int size = obj->Size(); // Byte size of the original string. if (size < i::ExternalString::kShortSize) return false; @@ -5970,7 +5691,6 @@ Local<v8::Value> v8::NumberObject::New(double value) { double v8::NumberObject::ValueOf() const { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::NumberObject::NumberValue()")) return 0; LOG_API(isolate, "NumberObject::NumberValue"); i::Handle<i::Object> obj = Utils::OpenHandle(this); i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj); @@ -5994,7 +5714,6 @@ Local<v8::Value> v8::BooleanObject::New(bool value) { bool v8::BooleanObject::ValueOf() const { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::BooleanObject::BooleanValue()")) return 0; LOG_API(isolate, "BooleanObject::BooleanValue"); i::Handle<i::Object> obj = Utils::OpenHandle(this); i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj); @@ -6015,9 +5734,6 @@ Local<v8::Value> v8::StringObject::New(Handle<String> value) { Local<v8::String> v8::StringObject::ValueOf() const { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::StringObject::StringValue()")) { - return Local<v8::String>(); - } LOG_API(isolate, "StringObject::StringValue"); i::Handle<i::Object> obj = Utils::OpenHandle(this); i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj); @@ -6039,8 +5755,6 @@ Local<v8::Value> v8::SymbolObject::New(Isolate* isolate, Handle<Symbol> value) { Local<v8::Symbol> v8::SymbolObject::ValueOf() const { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::SymbolObject::SymbolValue()")) - return Local<v8::Symbol>(); LOG_API(isolate, "SymbolObject::SymbolValue"); i::Handle<i::Object> obj = Utils::OpenHandle(this); i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj); @@ -6068,7 +5782,6 @@ Local<v8::Value> v8::Date::New(double time) { double v8::Date::ValueOf() const { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::Date::NumberValue()")) return 0; LOG_API(isolate, "Date::NumberValue"); i::Handle<i::Object> obj = Utils::OpenHandle(this); i::Handle<i::JSDate> jsdate = i::Handle<i::JSDate>::cast(obj); @@ -6142,10 +5855,6 @@ Local<v8::RegExp> v8::RegExp::New(Handle<String> pattern, Local<v8::String> v8::RegExp::GetSource() const { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::RegExp::GetSource()")) { - return Local<v8::String>(); - } i::Handle<i::JSRegExp> obj = Utils::OpenHandle(this); return Utils::ToLocal(i::Handle<i::String>(obj->Pattern())); } @@ -6162,9 +5871,6 @@ REGEXP_FLAG_ASSERT_EQ(kMultiline, MULTILINE); #undef REGEXP_FLAG_ASSERT_EQ v8::RegExp::Flags v8::RegExp::GetFlags() const { - if (IsDeadCheck(i::Isolate::Current(), "v8::RegExp::GetFlags()")) { - return v8::RegExp::kNone; - } i::Handle<i::JSRegExp> obj = Utils::OpenHandle(this); return static_cast<RegExp::Flags>(obj->GetFlags().value()); } @@ -6185,8 +5891,6 @@ Local<v8::Array> v8::Array::New(int length) { uint32_t v8::Array::Length() const { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::Array::Length()")) return 0; i::Handle<i::JSArray> obj = Utils::OpenHandle(this); i::Object* length = obj->length(); if (length->IsSmi()) { @@ -6212,7 +5916,7 @@ Local<Object> Array::CloneElementAt(uint32_t index) { i::Handle<i::JSObject> paragon_handle(i::JSObject::cast(paragon)); EXCEPTION_PREAMBLE(isolate); ENTER_V8(isolate); - i::Handle<i::JSObject> result = i::Copy(paragon_handle); + i::Handle<i::JSObject> result = i::JSObject::Copy(paragon_handle); has_pending_exception = result.is_null(); EXCEPTION_BAILOUT_CHECK(isolate, Local<Object>()); return Utils::ToLocal(result); @@ -6264,8 +5968,6 @@ void v8::ArrayBuffer::Neuter() { size_t v8::ArrayBuffer::ByteLength() const { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::ArrayBuffer::ByteLength()")) return 0; i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(this); return static_cast<size_t>(obj->byte_length()->Number()); } @@ -6315,18 +6017,7 @@ size_t v8::ArrayBufferView::ByteLength() { } -void* v8::ArrayBufferView::BaseAddress() { - i::Handle<i::JSArrayBufferView> obj = Utils::OpenHandle(this); - i::Handle<i::JSArrayBuffer> buffer(i::JSArrayBuffer::cast(obj->buffer())); - void* buffer_data = buffer->backing_store(); - size_t byte_offset = static_cast<size_t>(obj->byte_offset()->Number()); - return static_cast<uint8_t*>(buffer_data) + byte_offset; -} - - size_t v8::TypedArray::Length() { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::TypedArray::Length()")) return 0; i::Handle<i::JSTypedArray> obj = Utils::OpenHandle(this); return static_cast<size_t>(obj->length()->Number()); } @@ -6584,14 +6275,12 @@ void V8::SetCaptureStackTraceForUncaughtExceptions( void V8::SetCounterFunction(CounterLookupCallback callback) { i::Isolate* isolate = EnterIsolateIfNeeded(); - if (IsDeadCheck(isolate, "v8::V8::SetCounterFunction()")) return; isolate->stats_table()->SetCounterFunction(callback); } void V8::SetCreateHistogramFunction(CreateHistogramCallback callback) { i::Isolate* isolate = EnterIsolateIfNeeded(); - if (IsDeadCheck(isolate, "v8::V8::SetCreateHistogramFunction()")) return; isolate->stats_table()->SetCreateHistogramFunction(callback); isolate->InitializeLoggingAndCounters(); isolate->counters()->ResetHistograms(); @@ -6600,7 +6289,6 @@ void V8::SetCreateHistogramFunction(CreateHistogramCallback callback) { void V8::SetAddHistogramSampleFunction(AddHistogramSampleCallback callback) { i::Isolate* isolate = EnterIsolateIfNeeded(); - if (IsDeadCheck(isolate, "v8::V8::SetAddHistogramSampleFunction()")) return; isolate->stats_table()-> SetAddHistogramSampleFunction(callback); } @@ -6608,9 +6296,6 @@ void V8::SetAddHistogramSampleFunction(AddHistogramSampleCallback callback) { void V8::SetFailedAccessCheckCallbackFunction( FailedAccessCheckCallback callback) { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::V8::SetFailedAccessCheckCallbackFunction()")) { - return; - } isolate->SetFailedAccessCheckCallback(callback); } @@ -6624,8 +6309,7 @@ intptr_t Isolate::AdjustAmountOfExternalAllocatedMemory( intptr_t V8::AdjustAmountOfExternalAllocatedMemory(intptr_t change_in_bytes) { i::Isolate* isolate = i::Isolate::UncheckedCurrent(); - if (isolate == NULL || !isolate->IsInitialized() || - IsDeadCheck(isolate, "v8::V8::AdjustAmountOfExternalAllocatedMemory()")) { + if (isolate == NULL || !isolate->IsInitialized()) { return 0; } Isolate* isolate_ext = reinterpret_cast<Isolate*>(isolate); @@ -6647,9 +6331,15 @@ CpuProfiler* Isolate::GetCpuProfiler() { } +bool Isolate::InContext() { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this); + return isolate->context() != NULL; +} + + v8::Local<v8::Context> Isolate::GetCurrentContext() { - i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(this); - i::Context* context = internal_isolate->context(); + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this); + i::Context* context = isolate->context(); if (context == NULL) return Local<Context>(); i::Context* native_context = context->global_object()->native_context(); if (native_context == NULL) return Local<Context>(); @@ -6657,73 +6347,119 @@ v8::Local<v8::Context> Isolate::GetCurrentContext() { } -void Isolate::SetObjectGroupId(const Persistent<Value>& object, - UniqueId id) { +v8::Local<v8::Context> Isolate::GetCallingContext() { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this); + i::Handle<i::Object> calling = isolate->GetCallingNativeContext(); + if (calling.is_null()) return Local<Context>(); + return Utils::ToLocal(i::Handle<i::Context>::cast(calling)); +} + + +v8::Local<v8::Context> Isolate::GetEnteredContext() { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this); + i::Handle<i::Object> last = + isolate->handle_scope_implementer()->LastEnteredContext(); + if (last.is_null()) return Local<Context>(); + return Utils::ToLocal(i::Handle<i::Context>::cast(last)); +} + + +v8::Local<Value> Isolate::ThrowException(v8::Local<v8::Value> value) { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this); + ENTER_V8(isolate); + // If we're passed an empty handle, we throw an undefined exception + // to deal more gracefully with out of memory situations. + if (value.IsEmpty()) { + isolate->ScheduleThrow(isolate->heap()->undefined_value()); + } else { + isolate->ScheduleThrow(*Utils::OpenHandle(*value)); + } + return v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate)); +} + + +void Isolate::SetObjectGroupId(internal::Object** object, UniqueId id) { i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(this); internal_isolate->global_handles()->SetObjectGroupId( - Utils::OpenPersistent(object).location(), + v8::internal::Handle<v8::internal::Object>(object).location(), id); } -void Isolate::SetReferenceFromGroup(UniqueId id, - const Persistent<Value>& object) { +void Isolate::SetReferenceFromGroup(UniqueId id, internal::Object** object) { i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(this); internal_isolate->global_handles()->SetReferenceFromGroup( id, - Utils::OpenPersistent(object).location()); + v8::internal::Handle<v8::internal::Object>(object).location()); } -void Isolate::SetReference(const Persistent<Object>& parent, - const Persistent<Value>& child) { +void Isolate::SetReference(internal::Object** parent, + internal::Object** child) { i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(this); - i::Object** parent_location = Utils::OpenPersistent(parent).location(); + i::Object** parent_location = + v8::internal::Handle<v8::internal::Object>(parent).location(); internal_isolate->global_handles()->SetReference( reinterpret_cast<i::HeapObject**>(parent_location), - Utils::OpenPersistent(child).location()); + v8::internal::Handle<v8::internal::Object>(child).location()); } -void V8::SetGlobalGCPrologueCallback(GCCallback callback) { - i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::V8::SetGlobalGCPrologueCallback()")) return; - isolate->heap()->SetGlobalGCPrologueCallback(callback); +void Isolate::AddGCPrologueCallback(GCPrologueCallback callback, + GCType gc_type) { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this); + isolate->heap()->AddGCPrologueCallback(callback, gc_type); } -void V8::SetGlobalGCEpilogueCallback(GCCallback callback) { - i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::V8::SetGlobalGCEpilogueCallback()")) return; - isolate->heap()->SetGlobalGCEpilogueCallback(callback); +void Isolate::RemoveGCPrologueCallback(GCPrologueCallback callback) { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this); + isolate->heap()->RemoveGCPrologueCallback(callback); +} + + +void Isolate::AddGCEpilogueCallback(GCEpilogueCallback callback, + GCType gc_type) { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this); + isolate->heap()->AddGCEpilogueCallback(callback, gc_type); +} + + +void Isolate::RemoveGCEpilogueCallback(GCEpilogueCallback callback) { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this); + isolate->heap()->RemoveGCEpilogueCallback(callback); } void V8::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::V8::AddGCPrologueCallback()")) return; - isolate->heap()->AddGCPrologueCallback(callback, gc_type); + isolate->heap()->AddGCPrologueCallback( + reinterpret_cast<v8::Isolate::GCPrologueCallback>(callback), + gc_type, + false); } void V8::RemoveGCPrologueCallback(GCPrologueCallback callback) { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::V8::RemoveGCPrologueCallback()")) return; - isolate->heap()->RemoveGCPrologueCallback(callback); + isolate->heap()->RemoveGCPrologueCallback( + reinterpret_cast<v8::Isolate::GCPrologueCallback>(callback)); } void V8::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::V8::AddGCEpilogueCallback()")) return; - isolate->heap()->AddGCEpilogueCallback(callback, gc_type); + isolate->heap()->AddGCEpilogueCallback( + reinterpret_cast<v8::Isolate::GCEpilogueCallback>(callback), + gc_type, + false); } void V8::RemoveGCEpilogueCallback(GCEpilogueCallback callback) { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::V8::RemoveGCEpilogueCallback()")) return; - isolate->heap()->RemoveGCEpilogueCallback(callback); + isolate->heap()->RemoveGCEpilogueCallback( + reinterpret_cast<v8::Isolate::GCEpilogueCallback>(callback)); } @@ -6731,7 +6467,6 @@ void V8::AddMemoryAllocationCallback(MemoryAllocationCallback callback, ObjectSpace space, AllocationAction action) { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::V8::AddMemoryAllocationCallback()")) return; isolate->memory_allocator()->AddMemoryAllocationCallback( callback, space, action); } @@ -6739,7 +6474,6 @@ void V8::AddMemoryAllocationCallback(MemoryAllocationCallback callback, void V8::RemoveMemoryAllocationCallback(MemoryAllocationCallback callback) { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::V8::RemoveMemoryAllocationCallback()")) return; isolate->memory_allocator()->RemoveMemoryAllocationCallback( callback); } @@ -6747,17 +6481,11 @@ void V8::RemoveMemoryAllocationCallback(MemoryAllocationCallback callback) { void V8::AddCallCompletedCallback(CallCompletedCallback callback) { if (callback == NULL) return; - i::Isolate::EnsureDefaultIsolate(); - i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::V8::AddLeaveScriptCallback()")) return; i::V8::AddCallCompletedCallback(callback); } void V8::RemoveCallCompletedCallback(CallCompletedCallback callback) { - i::Isolate::EnsureDefaultIsolate(); - i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::V8::RemoveLeaveScriptCallback()")) return; i::V8::RemoveCallCompletedCallback(callback); } @@ -6843,7 +6571,6 @@ void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) { String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj) : str_(NULL), length_(0) { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::String::Utf8Value::Utf8Value()")) return; if (obj.IsEmpty()) return; ENTER_V8(isolate); i::HandleScope scope(isolate); @@ -6865,7 +6592,6 @@ String::Utf8Value::~Utf8Value() { String::AsciiValue::AsciiValue(v8::Handle<v8::Value> obj) : str_(NULL), length_(0) { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::String::AsciiValue::AsciiValue()")) return; if (obj.IsEmpty()) return; ENTER_V8(isolate); i::HandleScope scope(isolate); @@ -6887,7 +6613,6 @@ String::AsciiValue::~AsciiValue() { String::Value::Value(v8::Handle<v8::Value> obj) : str_(NULL), length_(0) { i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::String::Value::Value()")) return; if (obj.IsEmpty()) return; ENTER_V8(isolate); i::HandleScope scope(isolate); @@ -7062,6 +6787,16 @@ void Debug::SetMessageHandler2(v8::Debug::MessageHandler2 handler) { } +void Debug::SendCommand(Isolate* isolate, + const uint16_t* command, + int length, + ClientData* client_data) { + i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate); + internal_isolate->debugger()->ProcessCommand( + i::Vector<const uint16_t>(command, length), client_data); +} + + void Debug::SendCommand(const uint16_t* command, int length, ClientData* client_data, Isolate* isolate) { @@ -7187,7 +6922,6 @@ void Debug::SetLiveEditEnabled(bool enable, Isolate* isolate) { Handle<String> CpuProfileNode::GetFunctionName() const { i::Isolate* isolate = i::Isolate::Current(); - IsDeadCheck(isolate, "v8::CpuProfileNode::GetFunctionName"); const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this); const i::CodeEntry* entry = node->entry(); if (!entry->has_name_prefix()) { @@ -7210,7 +6944,6 @@ int CpuProfileNode::GetScriptId() const { Handle<String> CpuProfileNode::GetScriptResourceName() const { i::Isolate* isolate = i::Isolate::Current(); - IsDeadCheck(isolate, "v8::CpuProfileNode::GetScriptResourceName"); const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this); return ToApiHandle<String>(isolate->factory()->InternalizeUtf8String( node->entry()->resource_name())); @@ -7222,16 +6955,15 @@ int CpuProfileNode::GetLineNumber() const { } -const char* CpuProfileNode::GetBailoutReason() const { - const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this); - return node->entry()->bailout_reason(); +int CpuProfileNode::GetColumnNumber() const { + return reinterpret_cast<const i::ProfileNode*>(this)-> + entry()->column_number(); } -double CpuProfileNode::GetSelfSamplesCount() const { - i::Isolate* isolate = i::Isolate::Current(); - IsDeadCheck(isolate, "v8::CpuProfileNode::GetSelfSamplesCount"); - return reinterpret_cast<const i::ProfileNode*>(this)->self_ticks(); +const char* CpuProfileNode::GetBailoutReason() const { + const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this); + return node->entry()->bailout_reason(); } @@ -7264,7 +6996,6 @@ const CpuProfileNode* CpuProfileNode::GetChild(int index) const { void CpuProfile::Delete() { i::Isolate* isolate = i::Isolate::Current(); - IsDeadCheck(isolate, "v8::CpuProfile::Delete"); i::CpuProfiler* profiler = isolate->cpu_profiler(); ASSERT(profiler != NULL); profiler->DeleteProfile(reinterpret_cast<i::CpuProfile*>(this)); @@ -7282,7 +7013,6 @@ unsigned CpuProfile::GetUid() const { Handle<String> CpuProfile::GetTitle() const { i::Isolate* isolate = i::Isolate::Current(); - IsDeadCheck(isolate, "v8::CpuProfile::GetTitle"); const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this); return ToApiHandle<String>(isolate->factory()->InternalizeUtf8String( profile->title())); @@ -7374,15 +7104,12 @@ static i::HeapGraphEdge* ToInternal(const HeapGraphEdge* edge) { HeapGraphEdge::Type HeapGraphEdge::GetType() const { - i::Isolate* isolate = i::Isolate::Current(); - IsDeadCheck(isolate, "v8::HeapGraphEdge::GetType"); return static_cast<HeapGraphEdge::Type>(ToInternal(this)->type()); } Handle<Value> HeapGraphEdge::GetName() const { i::Isolate* isolate = i::Isolate::Current(); - IsDeadCheck(isolate, "v8::HeapGraphEdge::GetName"); i::HeapGraphEdge* edge = ToInternal(this); switch (edge->type()) { case i::HeapGraphEdge::kContextVariable: @@ -7398,21 +7125,17 @@ Handle<Value> HeapGraphEdge::GetName() const { isolate->factory()->NewNumberFromInt(edge->index())); default: UNREACHABLE(); } - return v8::Undefined(); + return v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate)); } const HeapGraphNode* HeapGraphEdge::GetFromNode() const { - i::Isolate* isolate = i::Isolate::Current(); - IsDeadCheck(isolate, "v8::HeapGraphEdge::GetFromNode"); const i::HeapEntry* from = ToInternal(this)->from(); return reinterpret_cast<const HeapGraphNode*>(from); } const HeapGraphNode* HeapGraphEdge::GetToNode() const { - i::Isolate* isolate = i::Isolate::Current(); - IsDeadCheck(isolate, "v8::HeapGraphEdge::GetToNode"); const i::HeapEntry* to = ToInternal(this)->to(); return reinterpret_cast<const HeapGraphNode*>(to); } @@ -7425,44 +7148,33 @@ static i::HeapEntry* ToInternal(const HeapGraphNode* entry) { HeapGraphNode::Type HeapGraphNode::GetType() const { - i::Isolate* isolate = i::Isolate::Current(); - IsDeadCheck(isolate, "v8::HeapGraphNode::GetType"); return static_cast<HeapGraphNode::Type>(ToInternal(this)->type()); } Handle<String> HeapGraphNode::GetName() const { i::Isolate* isolate = i::Isolate::Current(); - IsDeadCheck(isolate, "v8::HeapGraphNode::GetName"); return ToApiHandle<String>( isolate->factory()->InternalizeUtf8String(ToInternal(this)->name())); } SnapshotObjectId HeapGraphNode::GetId() const { - i::Isolate* isolate = i::Isolate::Current(); - IsDeadCheck(isolate, "v8::HeapGraphNode::GetId"); return ToInternal(this)->id(); } int HeapGraphNode::GetSelfSize() const { - i::Isolate* isolate = i::Isolate::Current(); - IsDeadCheck(isolate, "v8::HeapGraphNode::GetSelfSize"); return ToInternal(this)->self_size(); } int HeapGraphNode::GetChildrenCount() const { - i::Isolate* isolate = i::Isolate::Current(); - IsDeadCheck(isolate, "v8::HeapSnapshot::GetChildrenCount"); return ToInternal(this)->children().length(); } const HeapGraphEdge* HeapGraphNode::GetChild(int index) const { - i::Isolate* isolate = i::Isolate::Current(); - IsDeadCheck(isolate, "v8::HeapSnapshot::GetChild"); return reinterpret_cast<const HeapGraphEdge*>( ToInternal(this)->children()[index]); } @@ -7470,7 +7182,6 @@ const HeapGraphEdge* HeapGraphNode::GetChild(int index) const { v8::Handle<v8::Value> HeapGraphNode::GetHeapValue() const { i::Isolate* isolate = i::Isolate::Current(); - IsDeadCheck(isolate, "v8::HeapGraphNode::GetHeapValue"); i::Handle<i::HeapObject> object = ToInternal(this)->GetHeapObject(); return !object.is_null() ? ToApiHandle<Value>(object) : @@ -7486,7 +7197,6 @@ static i::HeapSnapshot* ToInternal(const HeapSnapshot* snapshot) { void HeapSnapshot::Delete() { i::Isolate* isolate = i::Isolate::Current(); - IsDeadCheck(isolate, "v8::HeapSnapshot::Delete"); if (isolate->heap_profiler()->GetSnapshotsCount() > 1) { ToInternal(this)->Delete(); } else { @@ -7497,61 +7207,46 @@ void HeapSnapshot::Delete() { unsigned HeapSnapshot::GetUid() const { - i::Isolate* isolate = i::Isolate::Current(); - IsDeadCheck(isolate, "v8::HeapSnapshot::GetUid"); return ToInternal(this)->uid(); } Handle<String> HeapSnapshot::GetTitle() const { i::Isolate* isolate = i::Isolate::Current(); - IsDeadCheck(isolate, "v8::HeapSnapshot::GetTitle"); return ToApiHandle<String>( isolate->factory()->InternalizeUtf8String(ToInternal(this)->title())); } const HeapGraphNode* HeapSnapshot::GetRoot() const { - i::Isolate* isolate = i::Isolate::Current(); - IsDeadCheck(isolate, "v8::HeapSnapshot::GetHead"); return reinterpret_cast<const HeapGraphNode*>(ToInternal(this)->root()); } const HeapGraphNode* HeapSnapshot::GetNodeById(SnapshotObjectId id) const { - i::Isolate* isolate = i::Isolate::Current(); - IsDeadCheck(isolate, "v8::HeapSnapshot::GetNodeById"); return reinterpret_cast<const HeapGraphNode*>( ToInternal(this)->GetEntryById(id)); } int HeapSnapshot::GetNodesCount() const { - i::Isolate* isolate = i::Isolate::Current(); - IsDeadCheck(isolate, "v8::HeapSnapshot::GetNodesCount"); return ToInternal(this)->entries().length(); } const HeapGraphNode* HeapSnapshot::GetNode(int index) const { - i::Isolate* isolate = i::Isolate::Current(); - IsDeadCheck(isolate, "v8::HeapSnapshot::GetNode"); return reinterpret_cast<const HeapGraphNode*>( &ToInternal(this)->entries().at(index)); } SnapshotObjectId HeapSnapshot::GetMaxSnapshotJSObjectId() const { - i::Isolate* isolate = i::Isolate::Current(); - IsDeadCheck(isolate, "v8::HeapSnapshot::GetMaxSnapshotJSObjectId"); return ToInternal(this)->max_snapshot_js_object_id(); } void HeapSnapshot::Serialize(OutputStream* stream, HeapSnapshot::SerializationFormat format) const { - i::Isolate* isolate = i::Isolate::Current(); - IsDeadCheck(isolate, "v8::HeapSnapshot::Serialize"); ApiCheck(format == kJSON, "v8::HeapSnapshot::Serialize", "Unknown serialization format"); @@ -7632,6 +7327,16 @@ void HeapProfiler::SetRetainedObjectInfo(UniqueId id, } +void HeapProfiler::StartRecordingHeapAllocations() { + reinterpret_cast<i::HeapProfiler*>(this)->StartHeapAllocationsRecording(); +} + + +void HeapProfiler::StopRecordingHeapAllocations() { + reinterpret_cast<i::HeapProfiler*>(this)->StopHeapAllocationsRecording(); +} + + v8::Testing::StressType internal::Testing::stress_type_ = v8::Testing::kStressTypeOpt; @@ -7762,9 +7467,11 @@ void HandleScopeImplementer::IterateThis(ObjectVisitor* v) { v->VisitPointers(blocks()->last(), handle_scope_data_.next); } - if (!saved_contexts_.is_empty()) { - Object** start = reinterpret_cast<Object**>(&saved_contexts_.first()); - v->VisitPointers(start, start + saved_contexts_.length()); + List<Context*>* context_lists[2] = { &saved_contexts_, &entered_contexts_}; + for (unsigned i = 0; i < ARRAY_SIZE(context_lists); i++) { + if (context_lists[i]->is_empty()) continue; + Object** start = reinterpret_cast<Object**>(&context_lists[i]->first()); + v->VisitPointers(start, start + context_lists[i]->length()); } } @@ -7823,7 +7530,7 @@ DeferredHandles::~DeferredHandles() { isolate_->UnlinkDeferredHandles(this); for (int i = 0; i < blocks_.length(); i++) { -#ifdef ENABLE_EXTRA_CHECKS +#ifdef ENABLE_HANDLE_ZAPPING HandleScope::ZapRange(blocks_[i], &blocks_[i][kHandleBlockSize]); #endif isolate_->handle_scope_implementer()->ReturnBlock(blocks_[i]); diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h index 51bc4942b2..9197bafbc5 100644 --- a/deps/v8/src/api.h +++ b/deps/v8/src/api.h @@ -542,12 +542,12 @@ class HandleScopeImplementer { inline void DecrementCallDepth() {call_depth_--;} inline bool CallDepthIsZero() { return call_depth_ == 0; } - inline void EnterContext(Handle<Object> context); - inline bool LeaveLastContext(); + inline void EnterContext(Handle<Context> context); + inline bool LeaveContext(Handle<Context> context); // Returns the last entered context or an empty handle if no // contexts have been entered. - inline Handle<Object> LastEnteredContext(); + inline Handle<Context> LastEnteredContext(); inline void SaveContext(Context* context); inline Context* RestoreContext(); @@ -592,7 +592,7 @@ class HandleScopeImplementer { Isolate* isolate_; List<internal::Object**> blocks_; // Used as a stack to keep track of entered contexts. - List<Handle<Object> > entered_contexts_; + List<Context*> entered_contexts_; // Used as a stack to keep track of saved contexts. List<Context*> saved_contexts_; Object** spare_; @@ -630,21 +630,23 @@ bool HandleScopeImplementer::HasSavedContexts() { } -void HandleScopeImplementer::EnterContext(Handle<Object> context) { - entered_contexts_.Add(context); +void HandleScopeImplementer::EnterContext(Handle<Context> context) { + entered_contexts_.Add(*context); } -bool HandleScopeImplementer::LeaveLastContext() { +bool HandleScopeImplementer::LeaveContext(Handle<Context> context) { if (entered_contexts_.is_empty()) return false; + // TODO(dcarney): figure out what's wrong here + // if (entered_contexts_.last() != *context) return false; entered_contexts_.RemoveLast(); return true; } -Handle<Object> HandleScopeImplementer::LastEnteredContext() { - if (entered_contexts_.is_empty()) return Handle<Object>::null(); - return entered_contexts_.last(); +Handle<Context> HandleScopeImplementer::LastEnteredContext() { + if (entered_contexts_.is_empty()) return Handle<Context>::null(); + return Handle<Context>(entered_contexts_.last()); } @@ -665,7 +667,7 @@ void HandleScopeImplementer::DeleteExtensions(internal::Object** prev_limit) { #ifdef DEBUG // SealHandleScope may make the prev_limit to point inside the block. if (block_start <= prev_limit && prev_limit <= block_limit) { -#ifdef ENABLE_EXTRA_CHECKS +#ifdef ENABLE_HANDLE_ZAPPING internal::HandleScope::ZapRange(prev_limit, block_limit); #endif break; @@ -675,7 +677,7 @@ void HandleScopeImplementer::DeleteExtensions(internal::Object** prev_limit) { #endif blocks_.RemoveLast(); -#ifdef ENABLE_EXTRA_CHECKS +#ifdef ENABLE_HANDLE_ZAPPING internal::HandleScope::ZapRange(block_start, block_limit); #endif if (spare_ != NULL) { diff --git a/deps/v8/src/apinatives.js b/deps/v8/src/apinatives.js index 5fb36c09e7..6431901bf2 100644 --- a/deps/v8/src/apinatives.js +++ b/deps/v8/src/apinatives.js @@ -71,7 +71,6 @@ function InstantiateFunction(data, name) { (serialNumber in cache) && (cache[serialNumber] != kUninitialized); if (!isFunctionCached) { try { - cache[serialNumber] = null; var fun = %CreateApiFunction(data); if (name) %FunctionSetName(fun, name); var flags = %GetTemplateField(data, kApiFlagOffset); diff --git a/deps/v8/src/arguments.cc b/deps/v8/src/arguments.cc index 287805717e..3a4d733152 100644 --- a/deps/v8/src/arguments.cc +++ b/deps/v8/src/arguments.cc @@ -38,7 +38,7 @@ template<typename T> template<typename V> v8::Handle<V> CustomArguments<T>::GetReturnValue(Isolate* isolate) { // Check the ReturnValue. - Object** handle = &this->end()[kReturnValueOffset]; + Object** handle = &this->begin()[kReturnValueOffset]; // Nothing was set, return empty handle as per previous behaviour. if ((*handle)->IsTheHole()) return v8::Handle<V>(); return Utils::Convert<Object, V>(Handle<Object>(handle)); @@ -49,7 +49,7 @@ v8::Handle<v8::Value> FunctionCallbackArguments::Call(FunctionCallback f) { Isolate* isolate = this->isolate(); VMState<EXTERNAL> state(isolate); ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); - FunctionCallbackInfo<v8::Value> info(end(), + FunctionCallbackInfo<v8::Value> info(begin(), argv_, argc_, is_construct_call_); @@ -63,7 +63,7 @@ v8::Handle<ReturnValue> PropertyCallbackArguments::Call(Function f) { \ Isolate* isolate = this->isolate(); \ VMState<EXTERNAL> state(isolate); \ ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \ - PropertyCallbackInfo<ReturnValue> info(end()); \ + PropertyCallbackInfo<ReturnValue> info(begin()); \ f(info); \ return GetReturnValue<ReturnValue>(isolate); \ } @@ -75,7 +75,7 @@ v8::Handle<ReturnValue> PropertyCallbackArguments::Call(Function f, \ Isolate* isolate = this->isolate(); \ VMState<EXTERNAL> state(isolate); \ ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \ - PropertyCallbackInfo<ReturnValue> info(end()); \ + PropertyCallbackInfo<ReturnValue> info(begin()); \ f(arg1, info); \ return GetReturnValue<ReturnValue>(isolate); \ } @@ -88,7 +88,7 @@ v8::Handle<ReturnValue> PropertyCallbackArguments::Call(Function f, \ Isolate* isolate = this->isolate(); \ VMState<EXTERNAL> state(isolate); \ ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \ - PropertyCallbackInfo<ReturnValue> info(end()); \ + PropertyCallbackInfo<ReturnValue> info(begin()); \ f(arg1, arg2, info); \ return GetReturnValue<ReturnValue>(isolate); \ } @@ -101,7 +101,7 @@ void PropertyCallbackArguments::Call(Function f, \ Isolate* isolate = this->isolate(); \ VMState<EXTERNAL> state(isolate); \ ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \ - PropertyCallbackInfo<ReturnValue> info(end()); \ + PropertyCallbackInfo<ReturnValue> info(begin()); \ f(arg1, arg2, info); \ } @@ -118,4 +118,3 @@ FOR_EACH_CALLBACK_TABLE_MAPPING_2_VOID_RETURN(WRITE_CALL_2_VOID) } } // namespace v8::internal - diff --git a/deps/v8/src/arguments.h b/deps/v8/src/arguments.h index c1db98b53d..92e57401f2 100644 --- a/deps/v8/src/arguments.h +++ b/deps/v8/src/arguments.h @@ -137,7 +137,7 @@ class CustomArgumentsBase : public Relocatable { v->VisitPointers(values_, values_ + kArrayLength); } protected: - inline Object** end() { return values_ + kArrayLength - 1; } + inline Object** begin() { return values_; } explicit inline CustomArgumentsBase(Isolate* isolate) : Relocatable(isolate) {} Object* values_[kArrayLength]; @@ -151,7 +151,7 @@ class CustomArguments : public CustomArgumentsBase<T::kArgsLength> { typedef CustomArgumentsBase<T::kArgsLength> Super; ~CustomArguments() { - this->end()[kReturnValueOffset] = + this->begin()[kReturnValueOffset] = reinterpret_cast<Object*>(kHandleZapValue); } @@ -162,7 +162,7 @@ class CustomArguments : public CustomArgumentsBase<T::kArgsLength> { v8::Handle<V> GetReturnValue(Isolate* isolate); inline Isolate* isolate() { - return reinterpret_cast<Isolate*>(this->end()[T::kIsolateIndex]); + return reinterpret_cast<Isolate*>(this->begin()[T::kIsolateIndex]); } }; @@ -185,7 +185,7 @@ class PropertyCallbackArguments Object* self, JSObject* holder) : Super(isolate) { - Object** values = this->end(); + Object** values = this->begin(); values[T::kThisIndex] = self; values[T::kHolderIndex] = holder; values[T::kDataIndex] = data; @@ -237,6 +237,13 @@ class FunctionCallbackArguments typedef FunctionCallbackInfo<Value> T; typedef CustomArguments<T> Super; static const int kArgsLength = T::kArgsLength; + static const int kHolderIndex = T::kHolderIndex; + static const int kDataIndex = T::kDataIndex; + static const int kReturnValueDefaultValueIndex = + T::kReturnValueDefaultValueIndex; + static const int kIsolateIndex = T::kIsolateIndex; + static const int kCalleeIndex = T::kCalleeIndex; + static const int kContextSaveIndex = T::kContextSaveIndex; FunctionCallbackArguments(internal::Isolate* isolate, internal::Object* data, @@ -249,10 +256,11 @@ class FunctionCallbackArguments argv_(argv), argc_(argc), is_construct_call_(is_construct_call) { - Object** values = end(); + Object** values = begin(); values[T::kDataIndex] = data; values[T::kCalleeIndex] = callee; values[T::kHolderIndex] = holder; + values[T::kContextSaveIndex] = isolate->heap()->the_hole_value(); values[T::kIsolateIndex] = reinterpret_cast<internal::Object*>(isolate); // Here the hole is set as default value. // It cannot escape into js as it's remove in Call below. diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h index a1d1e1b567..e3b39f407c 100644 --- a/deps/v8/src/arm/assembler-arm-inl.h +++ b/deps/v8/src/arm/assembler-arm-inl.h @@ -208,6 +208,13 @@ void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) { static const int kNoCodeAgeSequenceLength = 3; + +Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) { + UNREACHABLE(); // This should never be reached on Arm. + return Handle<Object>(); +} + + Code* RelocInfo::code_age_stub() { ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); return Code::GetCodeFromTargetAddress( diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc index bd8b0613eb..05b25ae2d7 100644 --- a/deps/v8/src/arm/assembler-arm.cc +++ b/deps/v8/src/arm/assembler-arm.cc @@ -50,6 +50,7 @@ bool CpuFeatures::initialized_ = false; #endif unsigned CpuFeatures::supported_ = 0; unsigned CpuFeatures::found_by_runtime_probing_only_ = 0; +unsigned CpuFeatures::cross_compile_ = 0; unsigned CpuFeatures::cache_line_size_ = 64; diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h index 866b1c9024..8caa64df34 100644 --- a/deps/v8/src/arm/assembler-arm.h +++ b/deps/v8/src/arm/assembler-arm.h @@ -64,23 +64,41 @@ class CpuFeatures : public AllStatic { // Check whether a feature is supported by the target CPU. static bool IsSupported(CpuFeature f) { ASSERT(initialized_); - return (supported_ & (1u << f)) != 0; + return Check(f, supported_); } static bool IsFoundByRuntimeProbingOnly(CpuFeature f) { ASSERT(initialized_); - return (found_by_runtime_probing_only_ & - (static_cast<uint64_t>(1) << f)) != 0; + return Check(f, found_by_runtime_probing_only_); } static bool IsSafeForSnapshot(CpuFeature f) { - return (IsSupported(f) && + return Check(f, cross_compile_) || + (IsSupported(f) && (!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f))); } static unsigned cache_line_size() { return cache_line_size_; } + static bool VerifyCrossCompiling() { + return cross_compile_ == 0; + } + + static bool VerifyCrossCompiling(CpuFeature f) { + unsigned mask = flag2set(f); + return cross_compile_ == 0 || + (cross_compile_ & mask) == mask; + } + private: + static bool Check(CpuFeature f, unsigned set) { + return (set & flag2set(f)) != 0; + } + + static unsigned flag2set(CpuFeature f) { + return 1u << f; + } + #ifdef DEBUG static bool initialized_; #endif @@ -88,7 +106,10 @@ class CpuFeatures : public AllStatic { static unsigned found_by_runtime_probing_only_; static unsigned cache_line_size_; + static unsigned cross_compile_; + friend class ExternalReference; + friend class PlatformFeatureScope; DISALLOW_COPY_AND_ASSIGN(CpuFeatures); }; @@ -114,21 +135,47 @@ class CpuFeatures : public AllStatic { // mode. This way we get the compile-time error checking in debug mode // and best performance in optimized code. +// These constants are used in several locations, including static initializers +const int kRegister_no_reg_Code = -1; +const int kRegister_r0_Code = 0; +const int kRegister_r1_Code = 1; +const int kRegister_r2_Code = 2; +const int kRegister_r3_Code = 3; +const int kRegister_r4_Code = 4; +const int kRegister_r5_Code = 5; +const int kRegister_r6_Code = 6; +const int kRegister_r7_Code = 7; +const int kRegister_r8_Code = 8; +const int kRegister_r9_Code = 9; +const int kRegister_r10_Code = 10; +const int kRegister_fp_Code = 11; +const int kRegister_ip_Code = 12; +const int kRegister_sp_Code = 13; +const int kRegister_lr_Code = 14; +const int kRegister_pc_Code = 15; + // Core register struct Register { static const int kNumRegisters = 16; - static const int kMaxNumAllocatableRegisters = 8; + static const int kMaxNumAllocatableRegisters = + FLAG_enable_ool_constant_pool ? 8 : 9; static const int kSizeInBytes = 4; inline static int NumAllocatableRegisters(); static int ToAllocationIndex(Register reg) { + if (FLAG_enable_ool_constant_pool && (reg.code() >= kRegister_r8_Code)) { + return reg.code() - 1; + } ASSERT(reg.code() < kMaxNumAllocatableRegisters); return reg.code(); } static Register FromAllocationIndex(int index) { ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters); + if (FLAG_enable_ool_constant_pool && (index >= 7)) { + return from_code(index + 1); + } return from_code(index); } @@ -143,7 +190,11 @@ struct Register { "r5", "r6", "r7", + "r8", }; + if (FLAG_enable_ool_constant_pool && (index >= 7)) { + return names[index + 1]; + } return names[index]; } @@ -172,25 +223,6 @@ struct Register { int code_; }; -// These constants are used in several locations, including static initializers -const int kRegister_no_reg_Code = -1; -const int kRegister_r0_Code = 0; -const int kRegister_r1_Code = 1; -const int kRegister_r2_Code = 2; -const int kRegister_r3_Code = 3; -const int kRegister_r4_Code = 4; -const int kRegister_r5_Code = 5; -const int kRegister_r6_Code = 6; -const int kRegister_r7_Code = 7; -const int kRegister_r8_Code = 8; -const int kRegister_r9_Code = 9; -const int kRegister_r10_Code = 10; -const int kRegister_fp_Code = 11; -const int kRegister_ip_Code = 12; -const int kRegister_sp_Code = 13; -const int kRegister_lr_Code = 14; -const int kRegister_pc_Code = 15; - const Register no_reg = { kRegister_no_reg_Code }; const Register r0 = { kRegister_r0_Code }; @@ -200,6 +232,7 @@ const Register r3 = { kRegister_r3_Code }; const Register r4 = { kRegister_r4_Code }; const Register r5 = { kRegister_r5_Code }; const Register r6 = { kRegister_r6_Code }; +// Used as constant pool pointer register if FLAG_enable_ool_constant_pool. const Register r7 = { kRegister_r7_Code }; // Used as context register. const Register r8 = { kRegister_r8_Code }; diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc index f60e1f8671..60f5290030 100644 --- a/deps/v8/src/arm/builtins-arm.cc +++ b/deps/v8/src/arm/builtins-arm.cc @@ -193,14 +193,12 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { Register argument = r2; Label not_cached, argument_is_string; - NumberToStringStub::GenerateLookupNumberStringCache( - masm, - r0, // Input. - argument, // Result. - r3, // Scratch. - r4, // Scratch. - r5, // Scratch. - ¬_cached); + __ LookupNumberStringCache(r0, // Input. + argument, // Result. + r3, // Scratch. + r4, // Scratch. + r5, // Scratch. + ¬_cached); __ IncrementCounter(counters->string_ctor_cached_number(), 1, r3, r4); __ bind(&argument_is_string); @@ -447,9 +445,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, // r3: object size (in words) // r4: JSObject (not tagged) // r5: First in-object property of JSObject (not tagged) - __ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object. ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize); - __ LoadRoot(r7, Heap::kUndefinedValueRootIndex); + __ LoadRoot(r6, Heap::kUndefinedValueRootIndex); if (count_constructions) { __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset)); __ Ubfx(r0, r0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte, @@ -457,14 +454,16 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, __ add(r0, r5, Operand(r0, LSL, kPointerSizeLog2)); // r0: offset of first field after pre-allocated fields if (FLAG_debug_code) { - __ cmp(r0, r6); + __ add(ip, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object. + __ cmp(r0, ip); __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields); } - __ InitializeFieldsWithFiller(r5, r0, r7); + __ InitializeFieldsWithFiller(r5, r0, r6); // To allow for truncation. - __ LoadRoot(r7, Heap::kOnePointerFillerMapRootIndex); + __ LoadRoot(r6, Heap::kOnePointerFillerMapRootIndex); } - __ InitializeFieldsWithFiller(r5, r6, r7); + __ add(r0, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object. + __ InitializeFieldsWithFiller(r5, r0, r6); // Add the object tag to make the JSObject real, so that we can continue // and jump into the continuation code at any time from now on. Any @@ -529,16 +528,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, __ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2)); // End of object. ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize); { Label loop, entry; - if (count_constructions) { - __ LoadRoot(r7, Heap::kUndefinedValueRootIndex); - } else if (FLAG_debug_code) { - __ LoadRoot(r8, Heap::kUndefinedValueRootIndex); - __ cmp(r7, r8); - __ Assert(eq, kUndefinedValueNotLoaded); - } + __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); __ b(&entry); __ bind(&loop); - __ str(r7, MemOperand(r2, kPointerSize, PostIndex)); + __ str(r0, MemOperand(r2, kPointerSize, PostIndex)); __ bind(&entry); __ cmp(r2, r6); __ b(lt, &loop); @@ -702,7 +695,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, // r2: receiver // r3: argc // r4: argv - // r5-r7, cp may be clobbered + // r5-r6, r7 (if not FLAG_enable_ool_constant_pool) and cp may be clobbered ProfileEntryHookStub::MaybeCallEntryHook(masm); // Clear the context before we push it when entering the internal frame. @@ -742,7 +735,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, __ LoadRoot(r4, Heap::kUndefinedValueRootIndex); __ mov(r5, Operand(r4)); __ mov(r6, Operand(r4)); - __ mov(r7, Operand(r4)); + if (!FLAG_enable_ool_constant_pool) { + __ mov(r7, Operand(r4)); + } if (kR9Available == 1) { __ mov(r9, Operand(r4)); } @@ -807,12 +802,13 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) { // The following registers must be saved and restored when calling through to // the runtime: // r0 - contains return address (beginning of patch sequence) - // r1 - function object + // r1 - isolate FrameScope scope(masm, StackFrame::MANUAL); __ stm(db_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit()); - __ PrepareCallCFunction(1, 0, r1); + __ PrepareCallCFunction(1, 0, r2); + __ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate()))); __ CallCFunction( - ExternalReference::get_make_code_young_function(masm->isolate()), 1); + ExternalReference::get_make_code_young_function(masm->isolate()), 2); __ ldm(ia_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit()); __ mov(pc, r0); } @@ -830,6 +826,39 @@ CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR) #undef DEFINE_CODE_AGE_BUILTIN_GENERATOR +void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) { + // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact + // that make_code_young doesn't do any garbage collection which allows us to + // save/restore the registers without worrying about which of them contain + // pointers. + + // The following registers must be saved and restored when calling through to + // the runtime: + // r0 - contains return address (beginning of patch sequence) + // r1 - isolate + FrameScope scope(masm, StackFrame::MANUAL); + __ stm(db_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit()); + __ PrepareCallCFunction(1, 0, r2); + __ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate()))); + __ CallCFunction(ExternalReference::get_mark_code_as_executed_function( + masm->isolate()), 2); + __ ldm(ia_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit()); + + // Perform prologue operations usually performed by the young code stub. + __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit()); + __ add(fp, sp, Operand(2 * kPointerSize)); + + // Jump to point after the code-age stub. + __ add(r0, r0, Operand(kNoCodeAgeSequenceLength * Assembler::kInstrSize)); + __ mov(pc, r0); +} + + +void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) { + GenerateMakeCodeYoungAgainCommon(masm); +} + + void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { { FrameScope scope(masm, StackFrame::INTERNAL); @@ -895,21 +924,6 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) { } -void Builtins::Generate_NotifyOSR(MacroAssembler* masm) { - // For now, we are relying on the fact that Runtime::NotifyOSR - // doesn't do any garbage collection which allows us to save/restore - // the registers without worrying about which of them contain - // pointers. This seems a bit fragile. - __ stm(db_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit()); - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ CallRuntime(Runtime::kNotifyOSR, 0); - } - __ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved | lr.bit() | fp.bit()); - __ Ret(); -} - - void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { // Lookup the function in the JavaScript frame. __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); @@ -956,6 +970,24 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { } +void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) { + // We check the stack limit as indicator that recompilation might be done. + Label ok; + __ LoadRoot(ip, Heap::kStackLimitRootIndex); + __ cmp(sp, Operand(ip)); + __ b(hs, &ok); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ CallRuntime(Runtime::kStackGuard, 0); + } + __ Jump(masm->isolate()->builtins()->OnStackReplacement(), + RelocInfo::CODE_TARGET); + + __ bind(&ok); + __ Ret(); +} + + void Builtins::Generate_FunctionCall(MacroAssembler* masm) { // 1. Make sure we have at least one argument. // r0: actual number of arguments diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc index cd1809fb2a..9330eb1411 100644 --- a/deps/v8/src/arm/code-stubs-arm.cc +++ b/deps/v8/src/arm/code-stubs-arm.cc @@ -59,6 +59,17 @@ void ToNumberStub::InitializeInterfaceDescriptor( } +void NumberToStringStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + static Register registers[] = { r0 }; + descriptor->register_param_count_ = 1; + descriptor->register_params_ = registers; + descriptor->deoptimization_handler_ = + Runtime::FunctionForId(Runtime::kNumberToString)->entry; +} + + void FastCloneShallowArrayStub::InitializeInterfaceDescriptor( Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { @@ -77,7 +88,7 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor( descriptor->register_param_count_ = 4; descriptor->register_params_ = registers; descriptor->deoptimization_handler_ = - Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry; + Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry; } @@ -158,6 +169,18 @@ void CompareNilICStub::InitializeInterfaceDescriptor( } +void BinaryOpStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + static Register registers[] = { r1, r0 }; + descriptor->register_param_count_ = 2; + descriptor->register_params_ = registers; + descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss); + descriptor->SetMissHandler( + ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate)); +} + + static void InitializeArrayConstructorDescriptor( Isolate* isolate, CodeStubInterfaceDescriptor* descriptor, @@ -170,7 +193,7 @@ static void InitializeArrayConstructorDescriptor( descriptor->register_param_count_ = 2; if (constant_stack_parameter_count != 0) { // stack param count needs (constructor pointer, and single argument) - descriptor->stack_parameter_count_ = &r0; + descriptor->stack_parameter_count_ = r0; } descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count; descriptor->register_params_ = registers; @@ -192,7 +215,7 @@ static void InitializeInternalArrayConstructorDescriptor( if (constant_stack_parameter_count != 0) { // stack param count needs (constructor pointer, and single argument) - descriptor->stack_parameter_count_ = &r0; + descriptor->stack_parameter_count_ = r0; } descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count; descriptor->register_params_ = registers; @@ -825,8 +848,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, // Convert lhs to a double in d7. __ SmiToDouble(d7, lhs); // Load the double from rhs, tagged HeapNumber r0, to d6. - __ sub(r7, rhs, Operand(kHeapObjectTag)); - __ vldr(d6, r7, HeapNumber::kValueOffset); + __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag); // We now have both loaded as doubles but we can skip the lhs nan check // since it's a smi. @@ -851,8 +873,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, // Rhs is a smi, lhs is a heap number. // Load the double from lhs, tagged HeapNumber r1, to d7. - __ sub(r7, lhs, Operand(kHeapObjectTag)); - __ vldr(d7, r7, HeapNumber::kValueOffset); + __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag); // Convert rhs to a double in d6 . __ SmiToDouble(d6, rhs); // Fall through to both_loaded_as_doubles. @@ -920,10 +941,8 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, // Both are heap numbers. Load them up then jump to the code we have // for that. - __ sub(r7, rhs, Operand(kHeapObjectTag)); - __ vldr(d6, r7, HeapNumber::kValueOffset); - __ sub(r7, lhs, Operand(kHeapObjectTag)); - __ vldr(d7, r7, HeapNumber::kValueOffset); + __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag); + __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag); __ jmp(both_loaded_as_doubles); } @@ -972,108 +991,6 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm, } -void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, - Register object, - Register result, - Register scratch1, - Register scratch2, - Register scratch3, - Label* not_found) { - // Use of registers. Register result is used as a temporary. - Register number_string_cache = result; - Register mask = scratch3; - - // Load the number string cache. - __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); - - // Make the hash mask from the length of the number string cache. It - // contains two elements (number and string) for each cache entry. - __ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset)); - // Divide length by two (length is a smi). - __ mov(mask, Operand(mask, ASR, kSmiTagSize + 1)); - __ sub(mask, mask, Operand(1)); // Make mask. - - // Calculate the entry in the number string cache. The hash value in the - // number string cache for smis is just the smi value, and the hash for - // doubles is the xor of the upper and lower words. See - // Heap::GetNumberStringCache. - Isolate* isolate = masm->isolate(); - Label is_smi; - Label load_result_from_cache; - __ JumpIfSmi(object, &is_smi); - __ CheckMap(object, - scratch1, - Heap::kHeapNumberMapRootIndex, - not_found, - DONT_DO_SMI_CHECK); - - STATIC_ASSERT(8 == kDoubleSize); - __ add(scratch1, - object, - Operand(HeapNumber::kValueOffset - kHeapObjectTag)); - __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit()); - __ eor(scratch1, scratch1, Operand(scratch2)); - __ and_(scratch1, scratch1, Operand(mask)); - - // Calculate address of entry in string cache: each entry consists - // of two pointer sized fields. - __ add(scratch1, - number_string_cache, - Operand(scratch1, LSL, kPointerSizeLog2 + 1)); - - Register probe = mask; - __ ldr(probe, - FieldMemOperand(scratch1, FixedArray::kHeaderSize)); - __ JumpIfSmi(probe, not_found); - __ sub(scratch2, object, Operand(kHeapObjectTag)); - __ vldr(d0, scratch2, HeapNumber::kValueOffset); - __ sub(probe, probe, Operand(kHeapObjectTag)); - __ vldr(d1, probe, HeapNumber::kValueOffset); - __ VFPCompareAndSetFlags(d0, d1); - __ b(ne, not_found); // The cache did not contain this value. - __ b(&load_result_from_cache); - - __ bind(&is_smi); - Register scratch = scratch1; - __ and_(scratch, mask, Operand(object, ASR, 1)); - // Calculate address of entry in string cache: each entry consists - // of two pointer sized fields. - __ add(scratch, - number_string_cache, - Operand(scratch, LSL, kPointerSizeLog2 + 1)); - - // Check if the entry is the smi we are looking for. - __ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize)); - __ cmp(object, probe); - __ b(ne, not_found); - - // Get the result from the cache. - __ bind(&load_result_from_cache); - __ ldr(result, - FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize)); - __ IncrementCounter(isolate->counters()->number_to_string_native(), - 1, - scratch1, - scratch2); -} - - -void NumberToStringStub::Generate(MacroAssembler* masm) { - Label runtime; - - __ ldr(r1, MemOperand(sp, 0)); - - // Generate code to lookup number in the number string cache. - GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, &runtime); - __ add(sp, sp, Operand(1 * kPointerSize)); - __ Ret(); - - __ bind(&runtime); - // Handle number to string in the runtime system if not found in the cache. - __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1); -} - - static void ICCompareStub_CheckInputType(MacroAssembler* masm, Register input, Register scratch, @@ -1281,994 +1198,6 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { } -// Generates code to call a C function to do a double operation. -// This code never falls through, but returns with a heap number containing -// the result in r0. -// Register heapnumber_result must be a heap number in which the -// result of the operation will be stored. -// Requires the following layout on entry: -// d0: Left value. -// d1: Right value. -// If soft float ABI, use also r0, r1, r2, r3. -static void CallCCodeForDoubleOperation(MacroAssembler* masm, - Token::Value op, - Register heap_number_result, - Register scratch) { - // Assert that heap_number_result is callee-saved. - // We currently always use r5 to pass it. - ASSERT(heap_number_result.is(r5)); - - // Push the current return address before the C call. Return will be - // through pop(pc) below. - __ push(lr); - __ PrepareCallCFunction(0, 2, scratch); - if (!masm->use_eabi_hardfloat()) { - __ vmov(r0, r1, d0); - __ vmov(r2, r3, d1); - } - { - AllowExternalCallThatCantCauseGC scope(masm); - __ CallCFunction( - ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2); - } - // Store answer in the overwritable heap number. Double returned in - // registers r0 and r1 or in d0. - if (masm->use_eabi_hardfloat()) { - __ vstr(d0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); - } else { - __ Strd(r0, r1, - FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); - } - // Place heap_number_result in r0 and return to the pushed return address. - __ mov(r0, Operand(heap_number_result)); - __ pop(pc); -} - - -void BinaryOpStub::Initialize() { - platform_specific_bit_ = true; // VFP2 is a base requirement for V8 -} - - -void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { - Label get_result; - - __ Push(r1, r0); - - __ mov(r2, Operand(Smi::FromInt(MinorKey()))); - __ push(r2); - - __ TailCallExternalReference( - ExternalReference(IC_Utility(IC::kBinaryOp_Patch), - masm->isolate()), - 3, - 1); -} - - -void BinaryOpStub::GenerateTypeTransitionWithSavedArgs( - MacroAssembler* masm) { - UNIMPLEMENTED(); -} - - -void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm, - Token::Value op) { - Register left = r1; - Register right = r0; - Register scratch1 = r7; - Register scratch2 = r9; - - ASSERT(right.is(r0)); - STATIC_ASSERT(kSmiTag == 0); - - Label not_smi_result; - switch (op) { - case Token::ADD: - __ add(right, left, Operand(right), SetCC); // Add optimistically. - __ Ret(vc); - __ sub(right, right, Operand(left)); // Revert optimistic add. - break; - case Token::SUB: - __ sub(right, left, Operand(right), SetCC); // Subtract optimistically. - __ Ret(vc); - __ sub(right, left, Operand(right)); // Revert optimistic subtract. - break; - case Token::MUL: - // Remove tag from one of the operands. This way the multiplication result - // will be a smi if it fits the smi range. - __ SmiUntag(ip, right); - // Do multiplication - // scratch1 = lower 32 bits of ip * left. - // scratch2 = higher 32 bits of ip * left. - __ smull(scratch1, scratch2, left, ip); - // Check for overflowing the smi range - no overflow if higher 33 bits of - // the result are identical. - __ mov(ip, Operand(scratch1, ASR, 31)); - __ cmp(ip, Operand(scratch2)); - __ b(ne, ¬_smi_result); - // Go slow on zero result to handle -0. - __ cmp(scratch1, Operand::Zero()); - __ mov(right, Operand(scratch1), LeaveCC, ne); - __ Ret(ne); - // We need -0 if we were multiplying a negative number with 0 to get 0. - // We know one of them was zero. - __ add(scratch2, right, Operand(left), SetCC); - __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl); - __ Ret(pl); // Return smi 0 if the non-zero one was positive. - // We fall through here if we multiplied a negative number with 0, because - // that would mean we should produce -0. - break; - case Token::DIV: { - Label div_with_sdiv; - - // Check for 0 divisor. - __ cmp(right, Operand::Zero()); - __ b(eq, ¬_smi_result); - - // Check for power of two on the right hand side. - __ sub(scratch1, right, Operand(1)); - __ tst(scratch1, right); - if (CpuFeatures::IsSupported(SUDIV)) { - __ b(ne, &div_with_sdiv); - // Check for no remainder. - __ tst(left, scratch1); - __ b(ne, ¬_smi_result); - // Check for positive left hand side. - __ cmp(left, Operand::Zero()); - __ b(mi, &div_with_sdiv); - } else { - __ b(ne, ¬_smi_result); - // Check for positive and no remainder. - __ orr(scratch2, scratch1, Operand(0x80000000u)); - __ tst(left, scratch2); - __ b(ne, ¬_smi_result); - } - - // Perform division by shifting. - __ clz(scratch1, scratch1); - __ rsb(scratch1, scratch1, Operand(31)); - __ mov(right, Operand(left, LSR, scratch1)); - __ Ret(); - - if (CpuFeatures::IsSupported(SUDIV)) { - CpuFeatureScope scope(masm, SUDIV); - Label result_not_zero; - - __ bind(&div_with_sdiv); - // Do division. - __ sdiv(scratch1, left, right); - // Check that the remainder is zero. - __ mls(scratch2, scratch1, right, left); - __ cmp(scratch2, Operand::Zero()); - __ b(ne, ¬_smi_result); - // Check for negative zero result. - __ cmp(scratch1, Operand::Zero()); - __ b(ne, &result_not_zero); - __ cmp(right, Operand::Zero()); - __ b(lt, ¬_smi_result); - __ bind(&result_not_zero); - // Check for the corner case of dividing the most negative smi by -1. - __ cmp(scratch1, Operand(0x40000000)); - __ b(eq, ¬_smi_result); - // Tag and return the result. - __ SmiTag(right, scratch1); - __ Ret(); - } - break; - } - case Token::MOD: { - Label modulo_with_sdiv; - - if (CpuFeatures::IsSupported(SUDIV)) { - // Check for x % 0. - __ cmp(right, Operand::Zero()); - __ b(eq, ¬_smi_result); - - // Check for two positive smis. - __ orr(scratch1, left, Operand(right)); - __ tst(scratch1, Operand(0x80000000u)); - __ b(ne, &modulo_with_sdiv); - - // Check for power of two on the right hand side. - __ sub(scratch1, right, Operand(1)); - __ tst(scratch1, right); - __ b(ne, &modulo_with_sdiv); - } else { - // Check for two positive smis. - __ orr(scratch1, left, Operand(right)); - __ tst(scratch1, Operand(0x80000000u)); - __ b(ne, ¬_smi_result); - - // Check for power of two on the right hand side. - __ JumpIfNotPowerOfTwoOrZero(right, scratch1, ¬_smi_result); - } - - // Perform modulus by masking (scratch1 contains right - 1). - __ and_(right, left, Operand(scratch1)); - __ Ret(); - - if (CpuFeatures::IsSupported(SUDIV)) { - CpuFeatureScope scope(masm, SUDIV); - __ bind(&modulo_with_sdiv); - __ mov(scratch2, right); - // Perform modulus with sdiv and mls. - __ sdiv(scratch1, left, right); - __ mls(right, scratch1, right, left); - // Return if the result is not 0. - __ cmp(right, Operand::Zero()); - __ Ret(ne); - // The result is 0, check for -0 case. - __ cmp(left, Operand::Zero()); - __ Ret(pl); - // This is a -0 case, restore the value of right. - __ mov(right, scratch2); - // We fall through here to not_smi_result to produce -0. - } - break; - } - case Token::BIT_OR: - __ orr(right, left, Operand(right)); - __ Ret(); - break; - case Token::BIT_AND: - __ and_(right, left, Operand(right)); - __ Ret(); - break; - case Token::BIT_XOR: - __ eor(right, left, Operand(right)); - __ Ret(); - break; - case Token::SAR: - // Remove tags from right operand. - __ GetLeastBitsFromSmi(scratch1, right, 5); - __ mov(right, Operand(left, ASR, scratch1)); - // Smi tag result. - __ bic(right, right, Operand(kSmiTagMask)); - __ Ret(); - break; - case Token::SHR: - // Remove tags from operands. We can't do this on a 31 bit number - // because then the 0s get shifted into bit 30 instead of bit 31. - __ SmiUntag(scratch1, left); - __ GetLeastBitsFromSmi(scratch2, right, 5); - __ mov(scratch1, Operand(scratch1, LSR, scratch2)); - // Unsigned shift is not allowed to produce a negative number, so - // check the sign bit and the sign bit after Smi tagging. - __ tst(scratch1, Operand(0xc0000000)); - __ b(ne, ¬_smi_result); - // Smi tag result. - __ SmiTag(right, scratch1); - __ Ret(); - break; - case Token::SHL: - // Remove tags from operands. - __ SmiUntag(scratch1, left); - __ GetLeastBitsFromSmi(scratch2, right, 5); - __ mov(scratch1, Operand(scratch1, LSL, scratch2)); - // Check that the signed result fits in a Smi. - __ TrySmiTag(right, scratch1, ¬_smi_result); - __ Ret(); - break; - default: - UNREACHABLE(); - } - __ bind(¬_smi_result); -} - - -void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, - Register result, - Register heap_number_map, - Register scratch1, - Register scratch2, - Label* gc_required, - OverwriteMode mode); - - -void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, - BinaryOpIC::TypeInfo left_type, - BinaryOpIC::TypeInfo right_type, - bool smi_operands, - Label* not_numbers, - Label* gc_required, - Label* miss, - Token::Value op, - OverwriteMode mode) { - Register left = r1; - Register right = r0; - Register scratch1 = r6; - Register scratch2 = r7; - - ASSERT(smi_operands || (not_numbers != NULL)); - if (smi_operands) { - __ AssertSmi(left); - __ AssertSmi(right); - } - if (left_type == BinaryOpIC::SMI) { - __ JumpIfNotSmi(left, miss); - } - if (right_type == BinaryOpIC::SMI) { - __ JumpIfNotSmi(right, miss); - } - - Register heap_number_map = r9; - __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); - - switch (op) { - case Token::ADD: - case Token::SUB: - case Token::MUL: - case Token::DIV: - case Token::MOD: { - // Allocate new heap number for result. - Register result = r5; - BinaryOpStub_GenerateHeapResultAllocation( - masm, result, heap_number_map, scratch1, scratch2, gc_required, mode); - - // Load left and right operands into d0 and d1. - if (smi_operands) { - __ SmiToDouble(d1, right); - __ SmiToDouble(d0, left); - } else { - // Load right operand into d1. - if (right_type == BinaryOpIC::INT32) { - __ LoadNumberAsInt32Double( - right, d1, heap_number_map, scratch1, d8, miss); - } else { - Label* fail = (right_type == BinaryOpIC::NUMBER) ? miss : not_numbers; - __ LoadNumber(right, d1, heap_number_map, scratch1, fail); - } - // Load left operand into d0. - if (left_type == BinaryOpIC::INT32) { - __ LoadNumberAsInt32Double( - left, d0, heap_number_map, scratch1, d8, miss); - } else { - Label* fail = (left_type == BinaryOpIC::NUMBER) ? miss : not_numbers; - __ LoadNumber( - left, d0, heap_number_map, scratch1, fail); - } - } - - // Calculate the result. - if (op != Token::MOD) { - // Using VFP registers: - // d0: Left value - // d1: Right value - switch (op) { - case Token::ADD: - __ vadd(d5, d0, d1); - break; - case Token::SUB: - __ vsub(d5, d0, d1); - break; - case Token::MUL: - __ vmul(d5, d0, d1); - break; - case Token::DIV: - __ vdiv(d5, d0, d1); - break; - default: - UNREACHABLE(); - } - - __ sub(r0, result, Operand(kHeapObjectTag)); - __ vstr(d5, r0, HeapNumber::kValueOffset); - __ add(r0, r0, Operand(kHeapObjectTag)); - __ Ret(); - } else { - // Call the C function to handle the double operation. - CallCCodeForDoubleOperation(masm, op, result, scratch1); - if (FLAG_debug_code) { - __ stop("Unreachable code."); - } - } - break; - } - case Token::BIT_OR: - case Token::BIT_XOR: - case Token::BIT_AND: - case Token::SAR: - case Token::SHR: - case Token::SHL: { - if (smi_operands) { - __ SmiUntag(r3, left); - __ SmiUntag(r2, right); - } else { - // Convert operands to 32-bit integers. Right in r2 and left in r3. - __ TruncateNumberToI(left, r3, heap_number_map, scratch1, not_numbers); - __ TruncateNumberToI(right, r2, heap_number_map, scratch1, not_numbers); - } - - Label result_not_a_smi; - switch (op) { - case Token::BIT_OR: - __ orr(r2, r3, Operand(r2)); - break; - case Token::BIT_XOR: - __ eor(r2, r3, Operand(r2)); - break; - case Token::BIT_AND: - __ and_(r2, r3, Operand(r2)); - break; - case Token::SAR: - // Use only the 5 least significant bits of the shift count. - __ GetLeastBitsFromInt32(r2, r2, 5); - __ mov(r2, Operand(r3, ASR, r2)); - break; - case Token::SHR: - // Use only the 5 least significant bits of the shift count. - __ GetLeastBitsFromInt32(r2, r2, 5); - __ mov(r2, Operand(r3, LSR, r2), SetCC); - // SHR is special because it is required to produce a positive answer. - // The code below for writing into heap numbers isn't capable of - // writing the register as an unsigned int so we go to slow case if we - // hit this case. - __ b(mi, &result_not_a_smi); - break; - case Token::SHL: - // Use only the 5 least significant bits of the shift count. - __ GetLeastBitsFromInt32(r2, r2, 5); - __ mov(r2, Operand(r3, LSL, r2)); - break; - default: - UNREACHABLE(); - } - - // Check that the *signed* result fits in a smi. - __ TrySmiTag(r0, r2, &result_not_a_smi); - __ Ret(); - - // Allocate new heap number for result. - __ bind(&result_not_a_smi); - Register result = r5; - if (smi_operands) { - __ AllocateHeapNumber( - result, scratch1, scratch2, heap_number_map, gc_required); - } else { - BinaryOpStub_GenerateHeapResultAllocation( - masm, result, heap_number_map, scratch1, scratch2, gc_required, - mode); - } - - // r2: Answer as signed int32. - // r5: Heap number to write answer into. - - // Nothing can go wrong now, so move the heap number to r0, which is the - // result. - __ mov(r0, Operand(r5)); - - // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As - // mentioned above SHR needs to always produce a positive result. - __ vmov(s0, r2); - if (op == Token::SHR) { - __ vcvt_f64_u32(d0, s0); - } else { - __ vcvt_f64_s32(d0, s0); - } - __ sub(r3, r0, Operand(kHeapObjectTag)); - __ vstr(d0, r3, HeapNumber::kValueOffset); - __ Ret(); - break; - } - default: - UNREACHABLE(); - } -} - - -// Generate the smi code. If the operation on smis are successful this return is -// generated. If the result is not a smi and heap number allocation is not -// requested the code falls through. If number allocation is requested but a -// heap number cannot be allocated the code jumps to the label gc_required. -void BinaryOpStub_GenerateSmiCode( - MacroAssembler* masm, - Label* use_runtime, - Label* gc_required, - Token::Value op, - BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results, - OverwriteMode mode) { - Label not_smis; - - Register left = r1; - Register right = r0; - Register scratch1 = r7; - - // Perform combined smi check on both operands. - __ orr(scratch1, left, Operand(right)); - __ JumpIfNotSmi(scratch1, ¬_smis); - - // If the smi-smi operation results in a smi return is generated. - BinaryOpStub_GenerateSmiSmiOperation(masm, op); - - // If heap number results are possible generate the result in an allocated - // heap number. - if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) { - BinaryOpStub_GenerateFPOperation( - masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true, - use_runtime, gc_required, ¬_smis, op, mode); - } - __ bind(¬_smis); -} - - -void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { - Label right_arg_changed, call_runtime; - - if (op_ == Token::MOD && encoded_right_arg_.has_value) { - // It is guaranteed that the value will fit into a Smi, because if it - // didn't, we wouldn't be here, see BinaryOp_Patch. - __ cmp(r0, Operand(Smi::FromInt(fixed_right_arg_value()))); - __ b(ne, &right_arg_changed); - } - - if (result_type_ == BinaryOpIC::UNINITIALIZED || - result_type_ == BinaryOpIC::SMI) { - // Only allow smi results. - BinaryOpStub_GenerateSmiCode( - masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS, mode_); - } else { - // Allow heap number result and don't make a transition if a heap number - // cannot be allocated. - BinaryOpStub_GenerateSmiCode( - masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, - mode_); - } - - // Code falls through if the result is not returned as either a smi or heap - // number. - __ bind(&right_arg_changed); - GenerateTypeTransition(masm); - - __ bind(&call_runtime); - { - FrameScope scope(masm, StackFrame::INTERNAL); - GenerateRegisterArgsPush(masm); - GenerateCallRuntime(masm); - } - __ Ret(); -} - - -void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { - Label call_runtime; - ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING); - ASSERT(op_ == Token::ADD); - // If both arguments are strings, call the string add stub. - // Otherwise, do a transition. - - // Registers containing left and right operands respectively. - Register left = r1; - Register right = r0; - - // Test if left operand is a string. - __ JumpIfSmi(left, &call_runtime); - __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE); - __ b(ge, &call_runtime); - - // Test if right operand is a string. - __ JumpIfSmi(right, &call_runtime); - __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE); - __ b(ge, &call_runtime); - - StringAddStub string_add_stub( - (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME)); - GenerateRegisterArgsPush(masm); - __ TailCallStub(&string_add_stub); - - __ bind(&call_runtime); - GenerateTypeTransition(masm); -} - - -void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { - ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32); - - Register left = r1; - Register right = r0; - Register scratch1 = r7; - Register scratch2 = r9; - LowDwVfpRegister double_scratch = d0; - - Register heap_number_result = no_reg; - Register heap_number_map = r6; - __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); - - Label call_runtime; - // Labels for type transition, used for wrong input or output types. - // Both label are currently actually bound to the same position. We use two - // different label to differentiate the cause leading to type transition. - Label transition; - - // Smi-smi fast case. - Label skip; - __ orr(scratch1, left, right); - __ JumpIfNotSmi(scratch1, &skip); - BinaryOpStub_GenerateSmiSmiOperation(masm, op_); - // Fall through if the result is not a smi. - __ bind(&skip); - - switch (op_) { - case Token::ADD: - case Token::SUB: - case Token::MUL: - case Token::DIV: - case Token::MOD: { - // It could be that only SMIs have been seen at either the left - // or the right operand. For precise type feedback, patch the IC - // again if this changes. - if (left_type_ == BinaryOpIC::SMI) { - __ JumpIfNotSmi(left, &transition); - } - if (right_type_ == BinaryOpIC::SMI) { - __ JumpIfNotSmi(right, &transition); - } - // Load both operands and check that they are 32-bit integer. - // Jump to type transition if they are not. The registers r0 and r1 (right - // and left) are preserved for the runtime call. - __ LoadNumberAsInt32Double( - right, d1, heap_number_map, scratch1, d8, &transition); - __ LoadNumberAsInt32Double( - left, d0, heap_number_map, scratch1, d8, &transition); - - if (op_ != Token::MOD) { - Label return_heap_number; - switch (op_) { - case Token::ADD: - __ vadd(d5, d0, d1); - break; - case Token::SUB: - __ vsub(d5, d0, d1); - break; - case Token::MUL: - __ vmul(d5, d0, d1); - break; - case Token::DIV: - __ vdiv(d5, d0, d1); - break; - default: - UNREACHABLE(); - } - - if (result_type_ <= BinaryOpIC::INT32) { - __ TryDoubleToInt32Exact(scratch1, d5, d8); - // If the ne condition is set, result does - // not fit in a 32-bit integer. - __ b(ne, &transition); - // Try to tag the result as a Smi, return heap number on overflow. - __ SmiTag(scratch1, SetCC); - __ b(vs, &return_heap_number); - // Check for minus zero, transition in that case (because we need - // to return a heap number). - Label not_zero; - ASSERT(kSmiTag == 0); - __ b(ne, ¬_zero); - __ VmovHigh(scratch2, d5); - __ tst(scratch2, Operand(HeapNumber::kSignMask)); - __ b(ne, &transition); - __ bind(¬_zero); - __ mov(r0, scratch1); - __ Ret(); - } - - __ bind(&return_heap_number); - // Return a heap number, or fall through to type transition or runtime - // call if we can't. - // We are using vfp registers so r5 is available. - heap_number_result = r5; - BinaryOpStub_GenerateHeapResultAllocation(masm, - heap_number_result, - heap_number_map, - scratch1, - scratch2, - &call_runtime, - mode_); - __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); - __ vstr(d5, r0, HeapNumber::kValueOffset); - __ mov(r0, heap_number_result); - __ Ret(); - - // A DIV operation expecting an integer result falls through - // to type transition. - - } else { - if (encoded_right_arg_.has_value) { - __ Vmov(d8, fixed_right_arg_value(), scratch1); - __ VFPCompareAndSetFlags(d1, d8); - __ b(ne, &transition); - } - - // We preserved r0 and r1 to be able to call runtime. - // Save the left value on the stack. - __ Push(r5, r4); - - Label pop_and_call_runtime; - - // Allocate a heap number to store the result. - heap_number_result = r5; - BinaryOpStub_GenerateHeapResultAllocation(masm, - heap_number_result, - heap_number_map, - scratch1, - scratch2, - &pop_and_call_runtime, - mode_); - - // Load the left value from the value saved on the stack. - __ Pop(r1, r0); - - // Call the C function to handle the double operation. - CallCCodeForDoubleOperation(masm, op_, heap_number_result, scratch1); - if (FLAG_debug_code) { - __ stop("Unreachable code."); - } - - __ bind(&pop_and_call_runtime); - __ Drop(2); - __ b(&call_runtime); - } - - break; - } - - case Token::BIT_OR: - case Token::BIT_XOR: - case Token::BIT_AND: - case Token::SAR: - case Token::SHR: - case Token::SHL: { - Label return_heap_number; - // Convert operands to 32-bit integers. Right in r2 and left in r3. The - // registers r0 and r1 (right and left) are preserved for the runtime - // call. - __ LoadNumberAsInt32(left, r3, heap_number_map, - scratch1, d0, d1, &transition); - __ LoadNumberAsInt32(right, r2, heap_number_map, - scratch1, d0, d1, &transition); - - // The ECMA-262 standard specifies that, for shift operations, only the - // 5 least significant bits of the shift value should be used. - switch (op_) { - case Token::BIT_OR: - __ orr(r2, r3, Operand(r2)); - break; - case Token::BIT_XOR: - __ eor(r2, r3, Operand(r2)); - break; - case Token::BIT_AND: - __ and_(r2, r3, Operand(r2)); - break; - case Token::SAR: - __ and_(r2, r2, Operand(0x1f)); - __ mov(r2, Operand(r3, ASR, r2)); - break; - case Token::SHR: - __ and_(r2, r2, Operand(0x1f)); - __ mov(r2, Operand(r3, LSR, r2), SetCC); - // SHR is special because it is required to produce a positive answer. - // We only get a negative result if the shift value (r2) is 0. - // This result cannot be respresented as a signed 32-bit integer, try - // to return a heap number if we can. - __ b(mi, (result_type_ <= BinaryOpIC::INT32) - ? &transition - : &return_heap_number); - break; - case Token::SHL: - __ and_(r2, r2, Operand(0x1f)); - __ mov(r2, Operand(r3, LSL, r2)); - break; - default: - UNREACHABLE(); - } - - // Check if the result fits in a smi. If not try to return a heap number. - // (We know the result is an int32). - __ TrySmiTag(r0, r2, &return_heap_number); - __ Ret(); - - __ bind(&return_heap_number); - heap_number_result = r5; - BinaryOpStub_GenerateHeapResultAllocation(masm, - heap_number_result, - heap_number_map, - scratch1, - scratch2, - &call_runtime, - mode_); - - if (op_ != Token::SHR) { - // Convert the result to a floating point value. - __ vmov(double_scratch.low(), r2); - __ vcvt_f64_s32(double_scratch, double_scratch.low()); - } else { - // The result must be interpreted as an unsigned 32-bit integer. - __ vmov(double_scratch.low(), r2); - __ vcvt_f64_u32(double_scratch, double_scratch.low()); - } - - // Store the result. - __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); - __ vstr(double_scratch, r0, HeapNumber::kValueOffset); - __ mov(r0, heap_number_result); - __ Ret(); - - break; - } - - default: - UNREACHABLE(); - } - - // We never expect DIV to yield an integer result, so we always generate - // type transition code for DIV operations expecting an integer result: the - // code will fall through to this type transition. - if (transition.is_linked() || - ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) { - __ bind(&transition); - GenerateTypeTransition(masm); - } - - __ bind(&call_runtime); - { - FrameScope scope(masm, StackFrame::INTERNAL); - GenerateRegisterArgsPush(masm); - GenerateCallRuntime(masm); - } - __ Ret(); -} - - -void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { - Label call_runtime; - - if (op_ == Token::ADD) { - // Handle string addition here, because it is the only operation - // that does not do a ToNumber conversion on the operands. - GenerateAddStrings(masm); - } - - // Convert oddball arguments to numbers. - Label check, done; - __ CompareRoot(r1, Heap::kUndefinedValueRootIndex); - __ b(ne, &check); - if (Token::IsBitOp(op_)) { - __ mov(r1, Operand(Smi::FromInt(0))); - } else { - __ LoadRoot(r1, Heap::kNanValueRootIndex); - } - __ jmp(&done); - __ bind(&check); - __ CompareRoot(r0, Heap::kUndefinedValueRootIndex); - __ b(ne, &done); - if (Token::IsBitOp(op_)) { - __ mov(r0, Operand(Smi::FromInt(0))); - } else { - __ LoadRoot(r0, Heap::kNanValueRootIndex); - } - __ bind(&done); - - GenerateNumberStub(masm); -} - - -void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) { - Label call_runtime, transition; - BinaryOpStub_GenerateFPOperation( - masm, left_type_, right_type_, false, - &transition, &call_runtime, &transition, op_, mode_); - - __ bind(&transition); - GenerateTypeTransition(masm); - - __ bind(&call_runtime); - { - FrameScope scope(masm, StackFrame::INTERNAL); - GenerateRegisterArgsPush(masm); - GenerateCallRuntime(masm); - } - __ Ret(); -} - - -void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { - Label call_runtime, call_string_add_or_runtime, transition; - - BinaryOpStub_GenerateSmiCode( - masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_); - - BinaryOpStub_GenerateFPOperation( - masm, left_type_, right_type_, false, - &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_); - - __ bind(&transition); - GenerateTypeTransition(masm); - - __ bind(&call_string_add_or_runtime); - if (op_ == Token::ADD) { - GenerateAddStrings(masm); - } - - __ bind(&call_runtime); - { - FrameScope scope(masm, StackFrame::INTERNAL); - GenerateRegisterArgsPush(masm); - GenerateCallRuntime(masm); - } - __ Ret(); -} - - -void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { - ASSERT(op_ == Token::ADD); - Label left_not_string, call_runtime; - - Register left = r1; - Register right = r0; - - // Check if left argument is a string. - __ JumpIfSmi(left, &left_not_string); - __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE); - __ b(ge, &left_not_string); - - StringAddStub string_add_left_stub( - (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME)); - GenerateRegisterArgsPush(masm); - __ TailCallStub(&string_add_left_stub); - - // Left operand is not a string, test right. - __ bind(&left_not_string); - __ JumpIfSmi(right, &call_runtime); - __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE); - __ b(ge, &call_runtime); - - StringAddStub string_add_right_stub( - (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME)); - GenerateRegisterArgsPush(masm); - __ TailCallStub(&string_add_right_stub); - - // At least one argument is not a string. - __ bind(&call_runtime); -} - - -void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, - Register result, - Register heap_number_map, - Register scratch1, - Register scratch2, - Label* gc_required, - OverwriteMode mode) { - // Code below will scratch result if allocation fails. To keep both arguments - // intact for the runtime call result cannot be one of these. - ASSERT(!result.is(r0) && !result.is(r1)); - - if (mode == OVERWRITE_LEFT || mode == OVERWRITE_RIGHT) { - Label skip_allocation, allocated; - Register overwritable_operand = mode == OVERWRITE_LEFT ? r1 : r0; - // If the overwritable operand is already an object, we skip the - // allocation of a heap number. - __ JumpIfNotSmi(overwritable_operand, &skip_allocation); - // Allocate a heap number for the result. - __ AllocateHeapNumber( - result, scratch1, scratch2, heap_number_map, gc_required); - __ b(&allocated); - __ bind(&skip_allocation); - // Use object holding the overwritable operand for result. - __ mov(result, Operand(overwritable_operand)); - __ bind(&allocated); - } else { - ASSERT(mode == NO_OVERWRITE); - __ AllocateHeapNumber( - result, scratch1, scratch2, heap_number_map, gc_required); - } -} - - -void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { - __ Push(r1, r0); -} - - void TranscendentalCacheStub::Generate(MacroAssembler* masm) { // Untagged case: double input in d2, double result goes // into d2. @@ -2280,7 +1209,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { Label calculate; Label invalid_cache; const Register scratch0 = r9; - const Register scratch1 = r7; + Register scratch1 = no_reg; // will be r4 const Register cache_entry = r0; const bool tagged = (argument_type_ == TAGGED); @@ -2360,6 +1289,9 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { __ cmp(r2, r4); __ cmp(r3, r5, eq); __ b(ne, &calculate); + + scratch1 = r4; // Start of scratch1 range. + // Cache hit. Load result, cleanup and return. Counters* counters = masm->isolate()->counters(); __ IncrementCounter( @@ -2502,7 +1434,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { const DwVfpRegister double_scratch = d0; const SwVfpRegister single_scratch = s0; const Register scratch = r9; - const Register scratch2 = r7; + const Register scratch2 = r4; Label call_runtime, done, int_exponent; if (exponent_type_ == ON_STACK) { @@ -2708,6 +1640,7 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate); ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); CreateAllocationSiteStub::GenerateAheadOfTime(isolate); + BinaryOpStub::GenerateAheadOfTime(isolate); } @@ -2765,9 +1698,10 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, if (do_gc) { // Passing r0. - __ PrepareCallCFunction(1, 0, r1); + __ PrepareCallCFunction(2, 0, r1); + __ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate()))); __ CallCFunction(ExternalReference::perform_gc_function(isolate), - 1, 0); + 2, 0); } ExternalReference scope_depth = @@ -2841,7 +1775,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, // sp: stack pointer // fp: frame pointer // Callee-saved register r4 still holds argc. - __ LeaveExitFrame(save_doubles_, r4); + __ LeaveExitFrame(save_doubles_, r4, true); __ mov(pc, lr); // check if we should retry or throw exception @@ -3011,14 +1945,14 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // r3: argc // r4: argv Isolate* isolate = masm->isolate(); - __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used. int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; - __ mov(r7, Operand(Smi::FromInt(marker))); + __ mov(r8, Operand(Smi::FromInt(marker))); __ mov(r6, Operand(Smi::FromInt(marker))); __ mov(r5, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate))); __ ldr(r5, MemOperand(r5)); - __ Push(r8, r7, r6, r5); + __ mov(ip, Operand(-1)); // Push a bad frame pointer to fail if it is used. + __ Push(ip, r8, r6, r5); // Set up frame pointer for the frame to be pushed. __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); @@ -3064,7 +1998,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // Invoke: Link this frame into the handler chain. There's only one // handler block in this code object, so its index is 0. __ bind(&invoke); - // Must preserve r0-r4, r5-r7 are available. + // Must preserve r0-r4, r5-r6 are available. __ PushTryHandler(StackHandler::JS_ENTRY, 0); // If an exception not caught by another handler occurs, this handler // returns control to the code after the bl(&invoke) above, which @@ -3375,8 +2309,7 @@ void StringLengthStub::Generate(MacroAssembler* masm) { receiver = r0; } - StubCompiler::GenerateLoadStringLength(masm, receiver, r3, r4, &miss, - support_wrapper_); + StubCompiler::GenerateLoadStringLength(masm, receiver, r3, r4, &miss); __ bind(&miss); StubCompiler::TailCallBuiltin( @@ -3672,31 +2605,36 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { __ ldr(r9, MemOperand(sp, 0 * kPointerSize)); __ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS))); __ sub(r9, r9, Operand(r1)); - __ LoadRoot(r7, Heap::kTheHoleValueRootIndex); + __ LoadRoot(r5, Heap::kTheHoleValueRootIndex); __ add(r3, r4, Operand(r6, LSL, 1)); __ add(r3, r3, Operand(kParameterMapHeaderSize)); // r6 = loop variable (tagged) // r1 = mapping index (tagged) // r3 = address of backing store (tagged) - // r4 = address of parameter map (tagged) - // r5 = temporary scratch (a.o., for address calculation) - // r7 = the hole value + // r4 = address of parameter map (tagged), which is also the address of new + // object + Heap::kArgumentsObjectSize (tagged) + // r0 = temporary scratch (a.o., for address calculation) + // r5 = the hole value __ jmp(¶meters_test); __ bind(¶meters_loop); __ sub(r6, r6, Operand(Smi::FromInt(1))); - __ mov(r5, Operand(r6, LSL, 1)); - __ add(r5, r5, Operand(kParameterMapHeaderSize - kHeapObjectTag)); - __ str(r9, MemOperand(r4, r5)); - __ sub(r5, r5, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize)); - __ str(r7, MemOperand(r3, r5)); + __ mov(r0, Operand(r6, LSL, 1)); + __ add(r0, r0, Operand(kParameterMapHeaderSize - kHeapObjectTag)); + __ str(r9, MemOperand(r4, r0)); + __ sub(r0, r0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize)); + __ str(r5, MemOperand(r3, r0)); __ add(r9, r9, Operand(Smi::FromInt(1))); __ bind(¶meters_test); __ cmp(r6, Operand(Smi::FromInt(0))); __ b(ne, ¶meters_loop); + // Restore r0 = new object (tagged) + __ sub(r0, r4, Operand(Heap::kArgumentsObjectSize)); + __ bind(&skip_parameter_map); + // r0 = address of new object (tagged) // r2 = argument count (tagged) // r3 = address of backing store (tagged) // r5 = scratch @@ -3727,6 +2665,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { __ Ret(); // Do the runtime call to allocate the arguments object. + // r0 = address of new object (tagged) // r2 = argument count (tagged) __ bind(&runtime); __ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count. @@ -3855,7 +2794,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // therefore the content of these registers are safe to use after the call. Register subject = r4; Register regexp_data = r5; - Register last_match_info_elements = r6; + Register last_match_info_elements = no_reg; // will be r6; // Ensure that a RegExp stack is allocated. Isolate* isolate = masm->isolate(); @@ -3988,19 +2927,19 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { STATIC_ASSERT(kTwoByteStringTag == 0); __ and_(r0, r0, Operand(kStringEncodingMask)); __ mov(r3, Operand(r0, ASR, 2), SetCC); - __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne); - __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq); + __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne); + __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq); // (E) Carry on. String handling is done. - // r7: irregexp code + // r6: irregexp code // Check that the irregexp code has been generated for the actual string // encoding. If it has, the field contains a code object otherwise it contains // a smi (code flushing support). - __ JumpIfSmi(r7, &runtime); + __ JumpIfSmi(r6, &runtime); // r1: previous index // r3: encoding of subject string (1 if ASCII, 0 if two_byte); - // r7: code + // r6: code // subject: Subject string // regexp_data: RegExp data (FixedArray) // All checks done. Now push arguments for native regexp code. @@ -4067,11 +3006,13 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ mov(r0, subject); // Locate the code entry and call it. - __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag)); + __ add(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag)); DirectCEntryStub stub; - stub.GenerateCall(masm, r7); + stub.GenerateCall(masm, r6); - __ LeaveExitFrame(false, no_reg); + __ LeaveExitFrame(false, no_reg, true); + + last_match_info_elements = r6; // r0: result // subject: subject string (callee saved) @@ -4161,7 +3102,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ RecordWriteField(last_match_info_elements, RegExpImpl::kLastSubjectOffset, subject, - r7, + r3, kLRHasNotBeenSaved, kDontSaveFPRegs); __ mov(subject, r2); @@ -4171,7 +3112,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ RecordWriteField(last_match_info_elements, RegExpImpl::kLastInputOffset, subject, - r7, + r3, kLRHasNotBeenSaved, kDontSaveFPRegs); @@ -4343,6 +3284,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) { // Cache the called function in a global property cell. Cache states // are uninitialized, monomorphic (indicated by a JSFunction), and // megamorphic. + // r0 : number of arguments to the construct function // r1 : the function to call // r2 : cache cell for call target Label initialize, done, miss, megamorphic, not_array_function; @@ -4364,9 +3306,6 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) { // If we didn't have a matching function, and we didn't find the megamorph // sentinel, then we have in the cell either some other function or an // AllocationSite. Do a map check on the object in ecx. - Handle<Map> allocation_site_map( - masm->isolate()->heap()->allocation_site_map(), - masm->isolate()); __ ldr(r5, FieldMemOperand(r3, 0)); __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex); __ b(ne, &miss); @@ -4403,6 +3342,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) { { FrameScope scope(masm, StackFrame::INTERNAL); + // Arguments register must be smi-tagged to call out. __ SmiTag(r0); __ push(r0); __ push(r1); @@ -4739,7 +3679,6 @@ void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm, Register scratch2, Register scratch3, Register scratch4, - Register scratch5, int flags) { bool ascii = (flags & COPY_ASCII) != 0; bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0; @@ -4814,30 +3753,29 @@ void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm, __ bind(&loop); __ ldr(scratch3, MemOperand(src, 4, PostIndex)); - __ sub(scratch5, limit, Operand(dest)); __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift)); __ str(scratch1, MemOperand(dest, 4, PostIndex)); __ mov(scratch1, Operand(scratch3, LSR, right_shift)); // Loop if four or more bytes left to copy. - // Compare to eight, because we did the subtract before increasing dst. - __ sub(scratch5, scratch5, Operand(8), SetCC); + __ sub(scratch3, limit, Operand(dest)); + __ sub(scratch3, scratch3, Operand(4), SetCC); __ b(ge, &loop); } // There is now between zero and three bytes left to copy (negative that - // number is in scratch5), and between one and three bytes already read into + // number is in scratch3), and between one and three bytes already read into // scratch1 (eight times that number in scratch4). We may have read past // the end of the string, but because objects are aligned, we have not read // past the end of the object. // Find the minimum of remaining characters to move and preloaded characters // and write those as bytes. - __ add(scratch5, scratch5, Operand(4), SetCC); + __ add(scratch3, scratch3, Operand(4), SetCC); __ b(eq, &done); - __ cmp(scratch4, Operand(scratch5, LSL, 3), ne); + __ cmp(scratch4, Operand(scratch3, LSL, 3), ne); // Move minimum of bytes read and bytes left to copy to scratch4. - __ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt); - // Between one and three (value in scratch5) characters already read into + __ mov(scratch3, Operand(scratch4, LSR, 3), LeaveCC, lt); + // Between one and three (value in scratch3) characters already read into // scratch ready to write. - __ cmp(scratch5, Operand(2)); + __ cmp(scratch3, Operand(2)); __ strb(scratch1, MemOperand(dest, 1, PostIndex)); __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge); __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge); @@ -5177,10 +4115,10 @@ void SubStringStub::Generate(MacroAssembler* masm) { STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); __ tst(r1, Operand(kStringEncodingMask)); __ b(eq, &two_byte_slice); - __ AllocateAsciiSlicedString(r0, r2, r6, r7, &runtime); + __ AllocateAsciiSlicedString(r0, r2, r6, r4, &runtime); __ jmp(&set_slice_header); __ bind(&two_byte_slice); - __ AllocateTwoByteSlicedString(r0, r2, r6, r7, &runtime); + __ AllocateTwoByteSlicedString(r0, r2, r6, r4, &runtime); __ bind(&set_slice_header); __ mov(r3, Operand(r3, LSL, 1)); __ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset)); @@ -5221,7 +4159,7 @@ void SubStringStub::Generate(MacroAssembler* masm) { __ b(eq, &two_byte_sequential); // Allocate and copy the resulting ASCII string. - __ AllocateAsciiString(r0, r2, r4, r6, r7, &runtime); + __ AllocateAsciiString(r0, r2, r4, r6, r1, &runtime); // Locate first character of substring to copy. __ add(r5, r5, r3); @@ -5233,13 +4171,13 @@ void SubStringStub::Generate(MacroAssembler* masm) { // r2: result string length // r5: first character of substring to copy STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); - StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9, + StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r9, COPY_ASCII | DEST_ALWAYS_ALIGNED); __ jmp(&return_r0); // Allocate and copy the resulting two-byte string. __ bind(&two_byte_sequential); - __ AllocateTwoByteString(r0, r2, r4, r6, r7, &runtime); + __ AllocateTwoByteString(r0, r2, r4, r6, r1, &runtime); // Locate first character of substring to copy. STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); @@ -5253,7 +4191,7 @@ void SubStringStub::Generate(MacroAssembler* masm) { // r5: first character of substring to copy. STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); StringHelper::GenerateCopyCharactersLong( - masm, r1, r5, r2, r3, r4, r6, r7, r9, DEST_ALWAYS_ALIGNED); + masm, r1, r5, r2, r3, r4, r6, r9, DEST_ALWAYS_ALIGNED); __ bind(&return_r0); Counters* counters = masm->isolate()->counters(); @@ -5519,7 +4457,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); } - __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7, + __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r3, &call_runtime); // Get the two characters forming the sub string. @@ -5530,7 +4468,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { // just allocate a new one. Label make_two_character_string; StringHelper::GenerateTwoCharacterStringTableProbe( - masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string); + masm, r2, r3, r6, r0, r4, r5, r9, &make_two_character_string); __ IncrementCounter(counters->string_add_native(), 1, r2, r3); __ add(sp, sp, Operand(2 * kPointerSize)); __ Ret(); @@ -5575,7 +4513,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { // Allocate an ASCII cons string. __ bind(&ascii_data); - __ AllocateAsciiConsString(r7, r6, r4, r5, &call_runtime); + __ AllocateAsciiConsString(r3, r6, r4, r5, &call_runtime); __ bind(&allocated); // Fill the fields of the cons string. Label skip_write_barrier, after_writing; @@ -5586,15 +4524,15 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ cmp(r4, Operand::Zero()); __ b(eq, &skip_write_barrier); - __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset)); - __ RecordWriteField(r7, + __ str(r0, FieldMemOperand(r3, ConsString::kFirstOffset)); + __ RecordWriteField(r3, ConsString::kFirstOffset, r0, r4, kLRHasNotBeenSaved, kDontSaveFPRegs); - __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset)); - __ RecordWriteField(r7, + __ str(r1, FieldMemOperand(r3, ConsString::kSecondOffset)); + __ RecordWriteField(r3, ConsString::kSecondOffset, r1, r4, @@ -5603,12 +4541,12 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ jmp(&after_writing); __ bind(&skip_write_barrier); - __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset)); - __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset)); + __ str(r0, FieldMemOperand(r3, ConsString::kFirstOffset)); + __ str(r1, FieldMemOperand(r3, ConsString::kSecondOffset)); __ bind(&after_writing); - __ mov(r0, Operand(r7)); + __ mov(r0, Operand(r3)); __ IncrementCounter(counters->string_add_native(), 1, r2, r3); __ add(sp, sp, Operand(2 * kPointerSize)); __ Ret(); @@ -5628,7 +4566,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ b(eq, &ascii_data); // Allocate a two byte cons string. - __ AllocateTwoByteConsString(r7, r6, r4, r5, &call_runtime); + __ AllocateTwoByteConsString(r3, r6, r4, r5, &call_runtime); __ jmp(&allocated); // We cannot encounter sliced strings or cons strings here since: @@ -5652,14 +4590,15 @@ void StringAddStub::Generate(MacroAssembler* masm) { } // Check whether both strings have same encoding - __ eor(r7, r4, Operand(r5)); - __ tst(r7, Operand(kStringEncodingMask)); + __ eor(ip, r4, Operand(r5)); + ASSERT(__ ImmediateFitsAddrMode1Instruction(kStringEncodingMask)); + __ tst(ip, Operand(kStringEncodingMask)); __ b(ne, &call_runtime); STATIC_ASSERT(kSeqStringTag == 0); __ tst(r4, Operand(kStringRepresentationMask)); STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); - __ add(r7, + __ add(r6, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag), LeaveCC, @@ -5669,7 +4608,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { STATIC_ASSERT(kShortExternalStringTag != 0); __ tst(r4, Operand(kShortExternalStringMask)); __ b(ne, &call_runtime); - __ ldr(r7, FieldMemOperand(r0, ExternalString::kResourceDataOffset)); + __ ldr(r6, FieldMemOperand(r0, ExternalString::kResourceDataOffset)); __ bind(&first_prepared); STATIC_ASSERT(kSeqStringTag == 0); @@ -5689,76 +4628,57 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ bind(&second_prepared); Label non_ascii_string_add_flat_result; - // r7: first character of first string + // r6: first character of first string // r1: first character of second string // r2: length of first string. // r3: length of second string. - // r6: sum of lengths. // Both strings have the same encoding. STATIC_ASSERT(kTwoByteStringTag == 0); __ tst(r5, Operand(kStringEncodingMask)); __ b(eq, &non_ascii_string_add_flat_result); - __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime); - __ add(r6, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); + __ add(r2, r2, Operand(r3)); + __ AllocateAsciiString(r0, r2, r4, r5, r9, &call_runtime); + __ sub(r2, r2, Operand(r3)); + __ add(r5, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); // r0: result string. - // r7: first character of first string. + // r6: first character of first string. // r1: first character of second string. // r2: length of first string. // r3: length of second string. - // r6: first character of result. - StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, true); - // r6: next character of result. - StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true); + // r5: first character of result. + StringHelper::GenerateCopyCharacters(masm, r5, r6, r2, r4, true); + // r5: next character of result. + StringHelper::GenerateCopyCharacters(masm, r5, r1, r3, r4, true); __ IncrementCounter(counters->string_add_native(), 1, r2, r3); __ add(sp, sp, Operand(2 * kPointerSize)); __ Ret(); __ bind(&non_ascii_string_add_flat_result); - __ AllocateTwoByteString(r0, r6, r4, r5, r9, &call_runtime); - __ add(r6, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); + __ add(r2, r2, Operand(r3)); + __ AllocateTwoByteString(r0, r2, r4, r5, r9, &call_runtime); + __ sub(r2, r2, Operand(r3)); + __ add(r5, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); // r0: result string. - // r7: first character of first string. + // r6: first character of first string. // r1: first character of second string. // r2: length of first string. // r3: length of second string. - // r6: first character of result. - StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, false); - // r6: next character of result. - StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false); + // r5: first character of result. + StringHelper::GenerateCopyCharacters(masm, r5, r6, r2, r4, false); + // r5: next character of result. + StringHelper::GenerateCopyCharacters(masm, r5, r1, r3, r4, false); __ IncrementCounter(counters->string_add_native(), 1, r2, r3); __ add(sp, sp, Operand(2 * kPointerSize)); __ Ret(); // Just jump to runtime to add the two strings. __ bind(&call_runtime); - if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) { - GenerateRegisterArgsPop(masm); - // Build a frame - { - FrameScope scope(masm, StackFrame::INTERNAL); - GenerateRegisterArgsPush(masm); - __ CallRuntime(Runtime::kStringAdd, 2); - } - __ Ret(); - } else { - __ TailCallRuntime(Runtime::kStringAdd, 2, 1); - } + __ TailCallRuntime(Runtime::kStringAdd, 2, 1); if (call_builtin.is_linked()) { __ bind(&call_builtin); - if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) { - GenerateRegisterArgsPop(masm); - // Build a frame - { - FrameScope scope(masm, StackFrame::INTERNAL); - GenerateRegisterArgsPush(masm); - __ InvokeBuiltin(builtin_id, CALL_FUNCTION); - } - __ Ret(); - } else { - __ InvokeBuiltin(builtin_id, JUMP_FUNCTION); - } + __ InvokeBuiltin(builtin_id, JUMP_FUNCTION); } } @@ -5792,13 +4712,7 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm, // Check the number to string cache. __ bind(¬_string); // Puts the cached result into scratch1. - NumberToStringStub::GenerateLookupNumberStringCache(masm, - arg, - scratch1, - scratch2, - scratch3, - scratch4, - slow); + __ LookupNumberStringCache(arg, scratch1, scratch2, scratch3, scratch4, slow); __ mov(arg, scratch1); __ str(arg, MemOperand(sp, stack_offset)); __ bind(&done); @@ -6401,7 +5315,7 @@ struct AheadOfTimeWriteBarrierStubList { static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { // Used in RegExpExecStub. - { REG(r6), REG(r4), REG(r7), EMIT_REMEMBERED_SET }, + { REG(r6), REG(r4), REG(r3), EMIT_REMEMBERED_SET }, // Used in CompileArrayPushCall. // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore. // Also used in KeyedStoreIC::GenerateGeneric. @@ -6428,8 +5342,8 @@ static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { // FastNewClosureStub::Generate { REG(r2), REG(r4), REG(r1), EMIT_REMEMBERED_SET }, // StringAddStub::Generate - { REG(r7), REG(r1), REG(r4), EMIT_REMEMBERED_SET }, - { REG(r7), REG(r0), REG(r4), EMIT_REMEMBERED_SET }, + { REG(r3), REG(r1), REG(r4), EMIT_REMEMBERED_SET }, + { REG(r3), REG(r0), REG(r4), EMIT_REMEMBERED_SET }, // Null termination. { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET} }; diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h index d05e9a1d84..c03d8f27ec 100644 --- a/deps/v8/src/arm/code-stubs-arm.h +++ b/deps/v8/src/arm/code-stubs-arm.h @@ -106,7 +106,6 @@ class StringHelper : public AllStatic { Register scratch2, Register scratch3, Register scratch4, - Register scratch5, int flags); @@ -257,31 +256,6 @@ class WriteInt32ToHeapNumberStub : public PlatformCodeStub { }; -class NumberToStringStub: public PlatformCodeStub { - public: - NumberToStringStub() { } - - // Generate code to do a lookup in the number string cache. If the number in - // the register object is found in the cache the generated code falls through - // with the result in the result register. The object and the result register - // can be the same. If the number is not found in the cache the code jumps to - // the label not_found with only the content of register object unchanged. - static void GenerateLookupNumberStringCache(MacroAssembler* masm, - Register object, - Register result, - Register scratch1, - Register scratch2, - Register scratch3, - Label* not_found); - - private: - Major MajorKey() { return NumberToString; } - int MinorKey() { return 0; } - - void Generate(MacroAssembler* masm); -}; - - class RecordWriteStub: public PlatformCodeStub { public: RecordWriteStub(Register object, diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc index 1bcf3e3a60..44c331b75f 100644 --- a/deps/v8/src/arm/codegen-arm.cc +++ b/deps/v8/src/arm/codegen-arm.cc @@ -55,7 +55,7 @@ UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) { #if defined(USE_SIMULATOR) byte* fast_exp_arm_machine_code = NULL; double fast_exp_simulator(double x) { - return Simulator::current(Isolate::Current())->CallFP( + return Simulator::current(Isolate::Current())->CallFPReturnsDouble( fast_exp_arm_machine_code, x, 0); } #endif @@ -402,8 +402,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( // ----------------------------------- if (mode == TRACK_ALLOCATION_SITE) { ASSERT(allocation_memento_found != NULL); - __ TestJSArrayForAllocationMemento(r2, r4); - __ b(eq, allocation_memento_found); + __ JumpIfJSArrayHasAllocationMemento(r2, r4, allocation_memento_found); } // Set transitioned map. @@ -432,8 +431,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( Label loop, entry, convert_hole, gc_required, only_change_map, done; if (mode == TRACK_ALLOCATION_SITE) { - __ TestJSArrayForAllocationMemento(r2, r4); - __ b(eq, fail); + __ JumpIfJSArrayHasAllocationMemento(r2, r4, fail); } // Check for empty arrays, which only require a map transition and no changes @@ -444,15 +442,16 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( __ push(lr); __ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset)); - // r4: source FixedArray // r5: number of elements (smi-tagged) // Allocate new FixedDoubleArray. // Use lr as a temporary register. __ mov(lr, Operand(r5, LSL, 2)); __ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize)); - __ Allocate(lr, r6, r7, r9, &gc_required, DOUBLE_ALIGNMENT); + __ Allocate(lr, r6, r4, r9, &gc_required, DOUBLE_ALIGNMENT); // r6: destination FixedDoubleArray, not tagged as heap object. + __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset)); + // r4: source FixedArray. // Set destination FixedDoubleArray's length and map. __ LoadRoot(r9, Heap::kFixedDoubleArrayMapRootIndex); @@ -483,15 +482,15 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( // Prepare for conversion loop. __ add(r3, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ add(r7, r6, Operand(FixedDoubleArray::kHeaderSize)); - __ add(r6, r7, Operand(r5, LSL, 2)); + __ add(r9, r6, Operand(FixedDoubleArray::kHeaderSize)); + __ add(r6, r9, Operand(r5, LSL, 2)); __ mov(r4, Operand(kHoleNanLower32)); __ mov(r5, Operand(kHoleNanUpper32)); // r3: begin of source FixedArray element fields, not tagged // r4: kHoleNanLower32 // r5: kHoleNanUpper32 // r6: end of destination FixedDoubleArray, not tagged - // r7: begin of FixedDoubleArray element fields, not tagged + // r9: begin of FixedDoubleArray element fields, not tagged __ b(&entry); @@ -514,30 +513,30 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( // Convert and copy elements. __ bind(&loop); - __ ldr(r9, MemOperand(r3, 4, PostIndex)); - // r9: current element - __ UntagAndJumpIfNotSmi(r9, r9, &convert_hole); + __ ldr(lr, MemOperand(r3, 4, PostIndex)); + // lr: current element + __ UntagAndJumpIfNotSmi(lr, lr, &convert_hole); // Normal smi, convert to double and store. - __ vmov(s0, r9); + __ vmov(s0, lr); __ vcvt_f64_s32(d0, s0); - __ vstr(d0, r7, 0); - __ add(r7, r7, Operand(8)); + __ vstr(d0, r9, 0); + __ add(r9, r9, Operand(8)); __ b(&entry); // Hole found, store the-hole NaN. __ bind(&convert_hole); if (FLAG_debug_code) { // Restore a "smi-untagged" heap object. - __ SmiTag(r9); - __ orr(r9, r9, Operand(1)); - __ CompareRoot(r9, Heap::kTheHoleValueRootIndex); + __ SmiTag(lr); + __ orr(lr, lr, Operand(1)); + __ CompareRoot(lr, Heap::kTheHoleValueRootIndex); __ Assert(eq, kObjectFoundInSmiOnlyArray); } - __ Strd(r4, r5, MemOperand(r7, 8, PostIndex)); + __ Strd(r4, r5, MemOperand(r9, 8, PostIndex)); __ bind(&entry); - __ cmp(r7, r6); + __ cmp(r9, r6); __ b(lt, &loop); __ pop(lr); @@ -558,8 +557,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( Label entry, loop, convert_hole, gc_required, only_change_map; if (mode == TRACK_ALLOCATION_SITE) { - __ TestJSArrayForAllocationMemento(r2, r4); - __ b(eq, fail); + __ JumpIfJSArrayHasAllocationMemento(r2, r4, fail); } // Check for empty arrays, which only require a map transition and no changes @@ -577,7 +575,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( // Allocate new FixedArray. __ mov(r0, Operand(FixedDoubleArray::kHeaderSize)); __ add(r0, r0, Operand(r5, LSL, 1)); - __ Allocate(r0, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS); + __ Allocate(r0, r6, r3, r9, &gc_required, NO_ALLOCATION_FLAGS); // r6: destination FixedArray, not tagged as heap object // Set destination FixedDoubleArray's length and map. __ LoadRoot(r9, Heap::kFixedArrayMapRootIndex); @@ -589,14 +587,12 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( __ add(r3, r6, Operand(FixedArray::kHeaderSize)); __ add(r6, r6, Operand(kHeapObjectTag)); __ add(r5, r3, Operand(r5, LSL, 1)); - __ LoadRoot(r7, Heap::kTheHoleValueRootIndex); __ LoadRoot(r9, Heap::kHeapNumberMapRootIndex); // Using offsetted addresses in r4 to fully take advantage of post-indexing. // r3: begin of destination FixedArray element fields, not tagged // r4: begin of source FixedDoubleArray element fields, not tagged, +4 // r5: end of destination FixedArray, not tagged // r6: destination FixedArray - // r7: the-hole pointer // r9: heap number map __ b(&entry); @@ -608,7 +604,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( __ bind(&loop); __ ldr(r1, MemOperand(r4, 8, PostIndex)); - // lr: current element's upper 32 bit + // r1: current element's upper 32 bit // r4: address of next element's upper 32 bit __ cmp(r1, Operand(kHoleNanUpper32)); __ b(eq, &convert_hole); @@ -631,7 +627,8 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( // Replace the-hole NaN with the-hole pointer. __ bind(&convert_hole); - __ str(r7, MemOperand(r3, 4, PostIndex)); + __ LoadRoot(r0, Heap::kTheHoleValueRootIndex); + __ str(r0, MemOperand(r3, 4, PostIndex)); __ bind(&entry); __ cmp(r3, r5); @@ -775,50 +772,65 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm, ASSERT(!temp2.is(temp3)); ASSERT(ExternalReference::math_exp_constants(0).address() != NULL); - Label done; + Label zero, infinity, done; __ mov(temp3, Operand(ExternalReference::math_exp_constants(0))); __ vldr(double_scratch1, ExpConstant(0, temp3)); - __ vmov(result, kDoubleRegZero); __ VFPCompareAndSetFlags(double_scratch1, input); - __ b(ge, &done); + __ b(ge, &zero); + __ vldr(double_scratch2, ExpConstant(1, temp3)); __ VFPCompareAndSetFlags(input, double_scratch2); - __ vldr(result, ExpConstant(2, temp3)); - __ b(ge, &done); + __ b(ge, &infinity); + __ vldr(double_scratch1, ExpConstant(3, temp3)); __ vldr(result, ExpConstant(4, temp3)); __ vmul(double_scratch1, double_scratch1, input); __ vadd(double_scratch1, double_scratch1, result); - __ vmov(temp2, temp1, double_scratch1); + __ VmovLow(temp2, double_scratch1); __ vsub(double_scratch1, double_scratch1, result); __ vldr(result, ExpConstant(6, temp3)); __ vldr(double_scratch2, ExpConstant(5, temp3)); __ vmul(double_scratch1, double_scratch1, double_scratch2); __ vsub(double_scratch1, double_scratch1, input); __ vsub(result, result, double_scratch1); - __ vmul(input, double_scratch1, double_scratch1); - __ vmul(result, result, input); - __ mov(temp1, Operand(temp2, LSR, 11)); + __ vmul(double_scratch2, double_scratch1, double_scratch1); + __ vmul(result, result, double_scratch2); __ vldr(double_scratch2, ExpConstant(7, temp3)); __ vmul(result, result, double_scratch2); __ vsub(result, result, double_scratch1); - __ vldr(double_scratch2, ExpConstant(8, temp3)); + // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1. + ASSERT(*reinterpret_cast<double*> + (ExternalReference::math_exp_constants(8).address()) == 1); + __ vmov(double_scratch2, 1); __ vadd(result, result, double_scratch2); - __ movw(ip, 0x7ff); - __ and_(temp2, temp2, Operand(ip)); + __ mov(temp1, Operand(temp2, LSR, 11)); + __ Ubfx(temp2, temp2, 0, 11); __ add(temp1, temp1, Operand(0x3ff)); - __ mov(temp1, Operand(temp1, LSL, 20)); // Must not call ExpConstant() after overwriting temp3! __ mov(temp3, Operand(ExternalReference::math_exp_log_table())); - __ ldr(ip, MemOperand(temp3, temp2, LSL, 3)); - __ add(temp3, temp3, Operand(kPointerSize)); - __ ldr(temp2, MemOperand(temp3, temp2, LSL, 3)); - __ orr(temp1, temp1, temp2); - __ vmov(input, ip, temp1); - __ vmul(result, result, input); + __ add(temp3, temp3, Operand(temp2, LSL, 3)); + __ ldm(ia, temp3, temp2.bit() | temp3.bit()); + // The first word is loaded is the lower number register. + if (temp2.code() < temp3.code()) { + __ orr(temp1, temp3, Operand(temp1, LSL, 20)); + __ vmov(double_scratch1, temp2, temp1); + } else { + __ orr(temp1, temp2, Operand(temp1, LSL, 20)); + __ vmov(double_scratch1, temp3, temp1); + } + __ vmul(result, result, double_scratch1); + __ b(&done); + + __ bind(&zero); + __ vmov(result, kDoubleRegZero); + __ b(&done); + + __ bind(&infinity); + __ vldr(result, ExpConstant(2, temp3)); + __ bind(&done); } @@ -859,7 +871,7 @@ bool Code::IsYoungSequence(byte* sequence) { void Code::GetCodeAgeAndParity(byte* sequence, Age* age, MarkingParity* parity) { if (IsYoungSequence(sequence)) { - *age = kNoAge; + *age = kNoAgeCodeAge; *parity = NO_MARKING_PARITY; } else { Address target_address = Memory::Address_at( @@ -870,16 +882,17 @@ void Code::GetCodeAgeAndParity(byte* sequence, Age* age, } -void Code::PatchPlatformCodeAge(byte* sequence, +void Code::PatchPlatformCodeAge(Isolate* isolate, + byte* sequence, Code::Age age, MarkingParity parity) { uint32_t young_length; byte* young_sequence = GetNoCodeAgeSequence(&young_length); - if (age == kNoAge) { + if (age == kNoAgeCodeAge) { CopyBytes(sequence, young_sequence, young_length); CPU::FlushICache(sequence, young_length); } else { - Code* stub = GetCodeAgeStub(age, parity); + Code* stub = GetCodeAgeStub(isolate, age, parity); CodePatcher patcher(sequence, young_length / Assembler::kInstrSize); patcher.masm()->add(r0, pc, Operand(-8)); patcher.masm()->ldr(pc, MemOperand(pc, -4)); diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h index 54530d8726..ecbe64cbad 100644 --- a/deps/v8/src/arm/codegen-arm.h +++ b/deps/v8/src/arm/codegen-arm.h @@ -97,6 +97,7 @@ class StringCharLoadGenerator : public AllStatic { class MathExpGenerator : public AllStatic { public: + // Register input isn't modified. All other registers are clobbered. static void EmitMathExp(MacroAssembler* masm, DwVfpRegister input, DwVfpRegister result, diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc index 3c57b64395..9339c5fade 100644 --- a/deps/v8/src/arm/deoptimizer-arm.cc +++ b/deps/v8/src/arm/deoptimizer-arm.cc @@ -81,100 +81,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) { } -static const int32_t kBranchBeforeInterrupt = 0x5a000004; - -// The back edge bookkeeping code matches the pattern: -// -// <decrement profiling counter> -// 2a 00 00 01 bpl ok -// e5 9f c? ?? ldr ip, [pc, <interrupt stub address>] -// e1 2f ff 3c blx ip -// ok-label -// -// We patch the code to the following form: -// -// <decrement profiling counter> -// e1 a0 00 00 mov r0, r0 (NOP) -// e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>] -// e1 2f ff 3c blx ip -// ok-label - -void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code, - Address pc_after, - Code* replacement_code) { - static const int kInstrSize = Assembler::kInstrSize; - // Turn the jump into nops. - CodePatcher patcher(pc_after - 3 * kInstrSize, 1); - patcher.masm()->nop(); - // Replace the call address. - uint32_t interrupt_address_offset = Memory::uint16_at(pc_after - - 2 * kInstrSize) & 0xfff; - Address interrupt_address_pointer = pc_after + interrupt_address_offset; - Memory::uint32_at(interrupt_address_pointer) = - reinterpret_cast<uint32_t>(replacement_code->entry()); - - unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch( - unoptimized_code, pc_after - 2 * kInstrSize, replacement_code); -} - - -void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code, - Address pc_after, - Code* interrupt_code) { - static const int kInstrSize = Assembler::kInstrSize; - // Restore the original jump. - CodePatcher patcher(pc_after - 3 * kInstrSize, 1); - patcher.masm()->b(4 * kInstrSize, pl); // ok-label is 4 instructions later. - ASSERT_EQ(kBranchBeforeInterrupt, - Memory::int32_at(pc_after - 3 * kInstrSize)); - // Restore the original call address. - uint32_t interrupt_address_offset = Memory::uint16_at(pc_after - - 2 * kInstrSize) & 0xfff; - Address interrupt_address_pointer = pc_after + interrupt_address_offset; - Memory::uint32_at(interrupt_address_pointer) = - reinterpret_cast<uint32_t>(interrupt_code->entry()); - - interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch( - unoptimized_code, pc_after - 2 * kInstrSize, interrupt_code); -} - - -#ifdef DEBUG -Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState( - Isolate* isolate, - Code* unoptimized_code, - Address pc_after) { - static const int kInstrSize = Assembler::kInstrSize; - ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp); - - uint32_t interrupt_address_offset = - Memory::uint16_at(pc_after - 2 * kInstrSize) & 0xfff; - Address interrupt_address_pointer = pc_after + interrupt_address_offset; - - if (Assembler::IsNop(Assembler::instr_at(pc_after - 3 * kInstrSize))) { - ASSERT(Assembler::IsLdrPcImmediateOffset( - Assembler::instr_at(pc_after - 2 * kInstrSize))); - Code* osr_builtin = - isolate->builtins()->builtin(Builtins::kOnStackReplacement); - ASSERT(reinterpret_cast<uint32_t>(osr_builtin->entry()) == - Memory::uint32_at(interrupt_address_pointer)); - return PATCHED_FOR_OSR; - } else { - // Get the interrupt stub code object to match against from cache. - Code* interrupt_builtin = - isolate->builtins()->builtin(Builtins::kInterruptCheck); - ASSERT(Assembler::IsLdrPcImmediateOffset( - Assembler::instr_at(pc_after - 2 * kInstrSize))); - ASSERT_EQ(kBranchBeforeInterrupt, - Memory::int32_at(pc_after - 3 * kInstrSize)); - ASSERT(reinterpret_cast<uint32_t>(interrupt_builtin->entry()) == - Memory::uint32_at(interrupt_address_pointer)); - return NOT_PATCHED; - } -} -#endif // DEBUG - - void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) { // Set the register values. The values are not important as there are no // callee saved registers in JavaScript frames, so all registers are @@ -201,10 +107,7 @@ void Deoptimizer::SetPlatformCompiledStubRegisters( ApiFunction function(descriptor->deoptimization_handler_); ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_); intptr_t handler = reinterpret_cast<intptr_t>(xref.address()); - int params = descriptor->register_param_count_; - if (descriptor->stack_parameter_count_ != NULL) { - params++; - } + int params = descriptor->environment_length(); output_frame->SetRegister(r0.code(), params); output_frame->SetRegister(r1.code(), handler); } @@ -362,8 +265,8 @@ void Deoptimizer::EntryGenerator::Generate() { __ bind(&inner_push_loop); __ sub(r3, r3, Operand(sizeof(uint32_t))); __ add(r6, r2, Operand(r3)); - __ ldr(r7, MemOperand(r6, FrameDescription::frame_content_offset())); - __ push(r7); + __ ldr(r6, MemOperand(r6, FrameDescription::frame_content_offset())); + __ push(r6); __ bind(&inner_loop_header); __ cmp(r3, Operand::Zero()); __ b(ne, &inner_push_loop); // test for gt? @@ -409,9 +312,9 @@ void Deoptimizer::EntryGenerator::Generate() { __ InitializeRootRegister(); __ pop(ip); // remove pc - __ pop(r7); // get continuation, leave pc on stack + __ pop(ip); // get continuation, leave pc on stack __ pop(lr); - __ Jump(r7); + __ Jump(ip); __ stop("Unreachable."); } diff --git a/deps/v8/src/arm/frames-arm.h b/deps/v8/src/arm/frames-arm.h index d022b414b4..64a718e89f 100644 --- a/deps/v8/src/arm/frames-arm.h +++ b/deps/v8/src/arm/frames-arm.h @@ -64,7 +64,7 @@ const RegList kCalleeSaved = 1 << 4 | // r4 v1 1 << 5 | // r5 v2 1 << 6 | // r6 v3 - 1 << 7 | // r7 v4 + 1 << 7 | // r7 v4 (pp in JavaScript code) 1 << 8 | // r8 v5 (cp in JavaScript code) kR9Available << 9 | // r9 v6 1 << 10 | // r10 v7 diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc index b6fb70b5df..c57c785598 100644 --- a/deps/v8/src/arm/full-codegen-arm.cc +++ b/deps/v8/src/arm/full-codegen-arm.cc @@ -148,13 +148,10 @@ void FullCodeGenerator::Generate() { // receiver object). r5 is zero for method calls and non-zero for // function calls. if (!info->is_classic_mode() || info->is_native()) { - Label ok; __ cmp(r5, Operand::Zero()); - __ b(eq, &ok); int receiver_offset = info->scope()->num_parameters() * kPointerSize; __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); - __ str(r2, MemOperand(sp, receiver_offset)); - __ bind(&ok); + __ str(r2, MemOperand(sp, receiver_offset), ne); } // Open a frame scope to indicate that there is a frame on the stack. The @@ -163,16 +160,7 @@ void FullCodeGenerator::Generate() { FrameScope frame_scope(masm_, StackFrame::MANUAL); info->set_prologue_offset(masm_->pc_offset()); - { - PredictableCodeSizeScope predictible_code_size_scope( - masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize); - // The following three instructions must remain together and unmodified - // for code aging to work properly. - __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit()); - __ nop(ip.code()); - // Adjust FP to point to saved FP. - __ add(fp, sp, Operand(2 * kPointerSize)); - } + __ Prologue(BUILD_FUNCTION_FRAME); info->AddNoFrameRange(0, masm_->pc_offset()); { Comment cmnt(masm_, "[ Allocate locals"); @@ -1167,7 +1155,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { Handle<Object>(Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker), isolate())); RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell); - __ LoadHeapObject(r1, cell); + __ Move(r1, cell); __ mov(r2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker))); __ str(r2, FieldMemOperand(r1, Cell::kValueOffset)); @@ -1651,13 +1639,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { __ mov(r0, Operand(Smi::FromInt(flags))); int properties_count = constant_properties->length() / 2; if ((FLAG_track_double_fields && expr->may_store_doubles()) || - expr->depth() > 1) { - __ Push(r3, r2, r1, r0); - __ CallRuntime(Runtime::kCreateObjectLiteral, 4); - } else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements || + expr->depth() > 1 || Serializer::enabled() || + flags != ObjectLiteral::kFastElements || properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) { __ Push(r3, r2, r1, r0); - __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4); + __ CallRuntime(Runtime::kCreateObjectLiteral, 4); } else { FastCloneShallowObjectStub stub(properties_count); __ CallStub(&stub); @@ -3592,8 +3578,8 @@ void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) { void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); ASSERT_EQ(args->length(), 1); - // Load the argument on the stack and call the stub. - VisitForStackValue(args->at(0)); + // Load the argument into r0 and call the stub. + VisitForAccumulatorValue(args->at(0)); NumberToStringStub stub; __ CallStub(&stub); @@ -3964,9 +3950,8 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) { void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { - Label bailout, done, one_char_separator, long_separator, - non_trivial_array, not_size_one_array, loop, - empty_separator_loop, one_char_separator_loop, + Label bailout, done, one_char_separator, long_separator, non_trivial_array, + not_size_one_array, loop, empty_separator_loop, one_char_separator_loop, one_char_separator_loop_entry, long_separator_loop; ZoneList<Expression*>* args = expr->arguments(); ASSERT(args->length() == 2); @@ -3984,19 +3969,18 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { Register string = r4; Register element = r5; Register elements_end = r6; - Register scratch1 = r7; - Register scratch2 = r9; + Register scratch = r9; // Separator operand is on the stack. __ pop(separator); // Check that the array is a JSArray. __ JumpIfSmi(array, &bailout); - __ CompareObjectType(array, scratch1, scratch2, JS_ARRAY_TYPE); + __ CompareObjectType(array, scratch, array_length, JS_ARRAY_TYPE); __ b(ne, &bailout); // Check that the array has fast elements. - __ CheckFastElements(scratch1, scratch2, &bailout); + __ CheckFastElements(scratch, array_length, &bailout); // If the array has length zero, return the empty string. __ ldr(array_length, FieldMemOperand(array, JSArray::kLengthOffset)); @@ -4033,11 +4017,11 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ bind(&loop); __ ldr(string, MemOperand(element, kPointerSize, PostIndex)); __ JumpIfSmi(string, &bailout); - __ ldr(scratch1, FieldMemOperand(string, HeapObject::kMapOffset)); - __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); - __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout); - __ ldr(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset)); - __ add(string_length, string_length, Operand(scratch1), SetCC); + __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset)); + __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); + __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &bailout); + __ ldr(scratch, FieldMemOperand(string, SeqOneByteString::kLengthOffset)); + __ add(string_length, string_length, Operand(scratch), SetCC); __ b(vs, &bailout); __ cmp(element, elements_end); __ b(lt, &loop); @@ -4058,23 +4042,23 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { // Check that the separator is a flat ASCII string. __ JumpIfSmi(separator, &bailout); - __ ldr(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset)); - __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); - __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout); + __ ldr(scratch, FieldMemOperand(separator, HeapObject::kMapOffset)); + __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); + __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &bailout); // Add (separator length times array_length) - separator length to the // string_length to get the length of the result string. array_length is not // smi but the other values are, so the result is a smi - __ ldr(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset)); - __ sub(string_length, string_length, Operand(scratch1)); - __ smull(scratch2, ip, array_length, scratch1); + __ ldr(scratch, FieldMemOperand(separator, SeqOneByteString::kLengthOffset)); + __ sub(string_length, string_length, Operand(scratch)); + __ smull(scratch, ip, array_length, scratch); // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are // zero. __ cmp(ip, Operand::Zero()); __ b(ne, &bailout); - __ tst(scratch2, Operand(0x80000000)); + __ tst(scratch, Operand(0x80000000)); __ b(ne, &bailout); - __ add(string_length, string_length, Operand(scratch2), SetCC); + __ add(string_length, string_length, Operand(scratch), SetCC); __ b(vs, &bailout); __ SmiUntag(string_length); @@ -4091,9 +4075,9 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { // array_length: Length of the array. __ AllocateAsciiString(result, string_length, - scratch1, - scratch2, - elements_end, + scratch, + string, // used as scratch + elements_end, // used as scratch &bailout); // Prepare for looping. Set up elements_end to end of the array. Set // result_pos to the position of the result where to write the first @@ -4106,8 +4090,8 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); // Check the length of the separator. - __ ldr(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset)); - __ cmp(scratch1, Operand(Smi::FromInt(1))); + __ ldr(scratch, FieldMemOperand(separator, SeqOneByteString::kLengthOffset)); + __ cmp(scratch, Operand(Smi::FromInt(1))); __ b(eq, &one_char_separator); __ b(gt, &long_separator); @@ -4125,7 +4109,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ add(string, string, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); - __ CopyBytes(string, result_pos, string_length, scratch1); + __ CopyBytes(string, result_pos, string_length, scratch); __ cmp(element, elements_end); __ b(lt, &empty_separator_loop); // End while (element < elements_end). ASSERT(result.is(r0)); @@ -4157,7 +4141,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ add(string, string, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); - __ CopyBytes(string, result_pos, string_length, scratch1); + __ CopyBytes(string, result_pos, string_length, scratch); __ cmp(element, elements_end); __ b(lt, &one_char_separator_loop); // End while (element < elements_end). ASSERT(result.is(r0)); @@ -4178,7 +4162,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ add(string, separator, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); - __ CopyBytes(string, result_pos, string_length, scratch1); + __ CopyBytes(string, result_pos, string_length, scratch); __ bind(&long_separator); __ ldr(string, MemOperand(element, kPointerSize, PostIndex)); @@ -4187,7 +4171,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ add(string, string, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); - __ CopyBytes(string, result_pos, string_length, scratch1); + __ CopyBytes(string, result_pos, string_length, scratch); __ cmp(element, elements_end); __ b(lt, &long_separator_loop); // End while (element < elements_end). ASSERT(result.is(r0)); @@ -4894,6 +4878,91 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit( #undef __ + +static const int32_t kBranchBeforeInterrupt = 0x5a000004; + + +void BackEdgeTable::PatchAt(Code* unoptimized_code, + Address pc, + BackEdgeState target_state, + Code* replacement_code) { + static const int kInstrSize = Assembler::kInstrSize; + Address branch_address = pc - 3 * kInstrSize; + CodePatcher patcher(branch_address, 1); + + switch (target_state) { + case INTERRUPT: + // <decrement profiling counter> + // 2a 00 00 01 bpl ok + // e5 9f c? ?? ldr ip, [pc, <interrupt stub address>] + // e1 2f ff 3c blx ip + // ok-label + patcher.masm()->b(4 * kInstrSize, pl); // Jump offset is 4 instructions. + ASSERT_EQ(kBranchBeforeInterrupt, Memory::int32_at(branch_address)); + break; + case ON_STACK_REPLACEMENT: + case OSR_AFTER_STACK_CHECK: + // <decrement profiling counter> + // e1 a0 00 00 mov r0, r0 (NOP) + // e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>] + // e1 2f ff 3c blx ip + // ok-label + patcher.masm()->nop(); + break; + } + + Address pc_immediate_load_address = pc - 2 * kInstrSize; + // Replace the call address. + uint32_t interrupt_address_offset = + Memory::uint16_at(pc_immediate_load_address) & 0xfff; + Address interrupt_address_pointer = pc + interrupt_address_offset; + Memory::uint32_at(interrupt_address_pointer) = + reinterpret_cast<uint32_t>(replacement_code->entry()); + + unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch( + unoptimized_code, pc_immediate_load_address, replacement_code); +} + + +BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState( + Isolate* isolate, + Code* unoptimized_code, + Address pc) { + static const int kInstrSize = Assembler::kInstrSize; + ASSERT(Memory::int32_at(pc - kInstrSize) == kBlxIp); + + Address branch_address = pc - 3 * kInstrSize; + Address pc_immediate_load_address = pc - 2 * kInstrSize; + uint32_t interrupt_address_offset = + Memory::uint16_at(pc_immediate_load_address) & 0xfff; + Address interrupt_address_pointer = pc + interrupt_address_offset; + + if (Memory::int32_at(branch_address) == kBranchBeforeInterrupt) { + ASSERT(Memory::uint32_at(interrupt_address_pointer) == + reinterpret_cast<uint32_t>( + isolate->builtins()->InterruptCheck()->entry())); + ASSERT(Assembler::IsLdrPcImmediateOffset( + Assembler::instr_at(pc_immediate_load_address))); + return INTERRUPT; + } + + ASSERT(Assembler::IsNop(Assembler::instr_at(branch_address))); + ASSERT(Assembler::IsLdrPcImmediateOffset( + Assembler::instr_at(pc_immediate_load_address))); + + if (Memory::uint32_at(interrupt_address_pointer) == + reinterpret_cast<uint32_t>( + isolate->builtins()->OnStackReplacement()->entry())) { + return ON_STACK_REPLACEMENT; + } + + ASSERT(Memory::uint32_at(interrupt_address_pointer) == + reinterpret_cast<uint32_t>( + isolate->builtins()->OsrAfterStackCheck()->entry())); + return OSR_AFTER_STACK_CHECK; +} + + } } // namespace v8::internal #endif // V8_TARGET_ARCH_ARM diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc index f15d4b11f8..aded4c1dd8 100644 --- a/deps/v8/src/arm/ic-arm.cc +++ b/deps/v8/src/arm/ic-arm.cc @@ -656,7 +656,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) { // Probe the stub cache. Code::Flags flags = Code::ComputeFlags( - Code::STUB, MONOMORPHIC, Code::kNoExtraICState, + Code::HANDLER, MONOMORPHIC, Code::kNoExtraICState, Code::NORMAL, Code::LOAD_IC); masm->isolate()->stub_cache()->GenerateProbe( masm, flags, r0, r2, r3, r4, r5, r6); @@ -1394,7 +1394,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, Register receiver = r2; Register receiver_map = r3; Register elements_map = r6; - Register elements = r7; // Elements array of the receiver. + Register elements = r9; // Elements array of the receiver. // r4 and r5 are used as general scratch registers. // Check that the key is a smi. @@ -1487,7 +1487,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm, // Get the receiver from the stack and probe the stub cache. Code::Flags flags = Code::ComputeFlags( - Code::STUB, MONOMORPHIC, strict_mode, + Code::HANDLER, MONOMORPHIC, strict_mode, Code::NORMAL, Code::STORE_IC); masm->isolate()->stub_cache()->GenerateProbe( diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc index 59a8818ac6..86d5d2b329 100644 --- a/deps/v8/src/arm/lithium-arm.cc +++ b/deps/v8/src/arm/lithium-arm.cc @@ -412,18 +412,19 @@ void LTransitionElementsKind::PrintDataTo(StringStream* stream) { } -int LPlatformChunk::GetNextSpillIndex(bool is_double) { +int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) { // Skip a slot if for a double-width slot. - if (is_double) spill_slot_count_++; + if (kind == DOUBLE_REGISTERS) spill_slot_count_++; return spill_slot_count_++; } -LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) { - int index = GetNextSpillIndex(is_double); - if (is_double) { +LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) { + int index = GetNextSpillIndex(kind); + if (kind == DOUBLE_REGISTERS) { return LDoubleStackSlot::Create(index, zone()); } else { + ASSERT(kind == GENERAL_REGISTERS); return LStackSlot::Create(index, zone()); } } @@ -439,7 +440,7 @@ LPlatformChunk* LChunkBuilder::Build() { // which will be subsumed into this frame. if (graph()->has_osr()) { for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) { - chunk_->GetNextSpillIndex(false); + chunk_->GetNextSpillIndex(GENERAL_REGISTERS); } } @@ -655,7 +656,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr, LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) { ASSERT(!instr->HasPointerMap()); - instr->set_pointer_map(new(zone()) LPointerMap(position_, zone())); + instr->set_pointer_map(new(zone()) LPointerMap(zone())); return instr; } @@ -710,51 +711,44 @@ LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) { LInstruction* LChunkBuilder::DoShift(Token::Value op, HBitwiseBinaryOperation* instr) { - if (instr->representation().IsTagged()) { - ASSERT(instr->left()->representation().IsTagged()); - ASSERT(instr->right()->representation().IsTagged()); - - LOperand* left = UseFixed(instr->left(), r1); - LOperand* right = UseFixed(instr->right(), r0); - LArithmeticT* result = new(zone()) LArithmeticT(op, left, right); - return MarkAsCall(DefineFixed(result, r0), instr); - } - - ASSERT(instr->representation().IsSmiOrInteger32()); - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); - LOperand* left = UseRegisterAtStart(instr->left()); + if (instr->representation().IsSmiOrInteger32()) { + ASSERT(instr->left()->representation().Equals(instr->representation())); + ASSERT(instr->right()->representation().Equals(instr->representation())); + LOperand* left = UseRegisterAtStart(instr->left()); - HValue* right_value = instr->right(); - LOperand* right = NULL; - int constant_value = 0; - bool does_deopt = false; - if (right_value->IsConstant()) { - HConstant* constant = HConstant::cast(right_value); - right = chunk_->DefineConstantOperand(constant); - constant_value = constant->Integer32Value() & 0x1f; - // Left shifts can deoptimize if we shift by > 0 and the result cannot be - // truncated to smi. - if (instr->representation().IsSmi() && constant_value > 0) { - does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi); + HValue* right_value = instr->right(); + LOperand* right = NULL; + int constant_value = 0; + bool does_deopt = false; + if (right_value->IsConstant()) { + HConstant* constant = HConstant::cast(right_value); + right = chunk_->DefineConstantOperand(constant); + constant_value = constant->Integer32Value() & 0x1f; + // Left shifts can deoptimize if we shift by > 0 and the result cannot be + // truncated to smi. + if (instr->representation().IsSmi() && constant_value > 0) { + does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi); + } + } else { + right = UseRegisterAtStart(right_value); } - } else { - right = UseRegisterAtStart(right_value); - } - // Shift operations can only deoptimize if we do a logical shift - // by 0 and the result cannot be truncated to int32. - if (op == Token::SHR && constant_value == 0) { - if (FLAG_opt_safe_uint32_operations) { - does_deopt = !instr->CheckFlag(HInstruction::kUint32); - } else { - does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32); + // Shift operations can only deoptimize if we do a logical shift + // by 0 and the result cannot be truncated to int32. + if (op == Token::SHR && constant_value == 0) { + if (FLAG_opt_safe_uint32_operations) { + does_deopt = !instr->CheckFlag(HInstruction::kUint32); + } else { + does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32); + } } - } - LInstruction* result = - DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt)); - return does_deopt ? AssignEnvironment(result) : result; + LInstruction* result = + DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt)); + return does_deopt ? AssignEnvironment(result) : result; + } else { + return DoArithmeticT(op, instr); + } } @@ -763,29 +757,34 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op, ASSERT(instr->representation().IsDouble()); ASSERT(instr->left()->representation().IsDouble()); ASSERT(instr->right()->representation().IsDouble()); - ASSERT(op != Token::MOD); - LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseRegisterAtStart(instr->right()); - LArithmeticD* result = new(zone()) LArithmeticD(op, left, right); - return DefineAsRegister(result); + if (op == Token::MOD) { + LOperand* left = UseFixedDouble(instr->left(), d1); + LOperand* right = UseFixedDouble(instr->right(), d2); + LArithmeticD* result = new(zone()) LArithmeticD(op, left, right); + // We call a C function for double modulo. It can't trigger a GC. We need + // to use fixed result register for the call. + // TODO(fschneider): Allow any register as input registers. + return MarkAsCall(DefineFixedDouble(result, d1), instr); + } else { + LOperand* left = UseRegisterAtStart(instr->left()); + LOperand* right = UseRegisterAtStart(instr->right()); + LArithmeticD* result = new(zone()) LArithmeticD(op, left, right); + return DefineAsRegister(result); + } } LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op, - HArithmeticBinaryOperation* instr) { - ASSERT(op == Token::ADD || - op == Token::DIV || - op == Token::MOD || - op == Token::MUL || - op == Token::SUB); + HBinaryOperation* instr) { HValue* left = instr->left(); HValue* right = instr->right(); ASSERT(left->representation().IsTagged()); ASSERT(right->representation().IsTagged()); + LOperand* context = UseFixed(instr->context(), cp); LOperand* left_operand = UseFixed(left, r1); LOperand* right_operand = UseFixed(right, r0); LArithmeticT* result = - new(zone()) LArithmeticT(op, left_operand, right_operand); + new(zone()) LArithmeticT(op, context, left_operand, right_operand); return MarkAsCall(DefineFixed(result, r0), instr); } @@ -861,9 +860,31 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) { HInstruction* old_current = current_instruction_; current_instruction_ = current; if (current->has_position()) position_ = current->position(); - LInstruction* instr = current->CompileToLithium(this); + + LInstruction* instr = NULL; + if (current->CanReplaceWithDummyUses()) { + HValue* first_operand = current->OperandCount() == 0 + ? graph()->GetConstant1() + : current->OperandAt(0); + instr = DefineAsRegister(new(zone()) LDummyUse(UseAny(first_operand))); + for (int i = 1; i < current->OperandCount(); ++i) { + LInstruction* dummy = + new(zone()) LDummyUse(UseAny(current->OperandAt(i))); + dummy->set_hydrogen_value(current); + chunk_->AddInstruction(dummy, current_block_); + } + } else { + instr = current->CompileToLithium(this); + } + + argument_count_ += current->argument_delta(); + ASSERT(argument_count_ >= 0); if (instr != NULL) { + // Associate the hydrogen instruction first, since we may need it for + // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below. + instr->set_hydrogen_value(current); + #if DEBUG // Make sure that the lithium instruction has either no fixed register // constraints in temps or the result OR no uses that are only used at @@ -893,14 +914,12 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) { } #endif - instr->set_position(position_); if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) { instr = AssignPointerMap(instr); } if (FLAG_stress_environments && !instr->HasEnvironment()) { instr = AssignEnvironment(instr); } - instr->set_hydrogen_value(current); chunk_->AddInstruction(instr, current_block_); } current_instruction_ = old_current; @@ -992,19 +1011,15 @@ LEnvironment* LChunkBuilder::CreateEnvironment( LInstruction* LChunkBuilder::DoGoto(HGoto* instr) { - return new(zone()) LGoto(instr->FirstSuccessor()->block_id()); + return new(zone()) LGoto(instr->FirstSuccessor()); } LInstruction* LChunkBuilder::DoBranch(HBranch* instr) { - HValue* value = instr->value(); - if (value->EmitAtUses()) { - HBasicBlock* successor = HConstant::cast(value)->BooleanValue() - ? instr->FirstSuccessor() - : instr->SecondSuccessor(); - return new(zone()) LGoto(successor->block_id()); - } + LInstruction* goto_instr = CheckElideControlInstruction(instr); + if (goto_instr != NULL) return goto_instr; + HValue* value = instr->value(); LBranch* result = new(zone()) LBranch(UseRegister(value)); // Tagged values that are not known smis or booleans require a // deoptimization environment. If the instruction is generic no @@ -1047,9 +1062,10 @@ LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) { LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) { + LOperand* context = UseFixed(instr->context(), cp); LInstanceOf* result = - new(zone()) LInstanceOf(UseFixed(instr->left(), r0), - UseFixed(instr->right(), r1)); + new(zone()) LInstanceOf(context, UseFixed(instr->left(), r0), + UseFixed(instr->right(), r1)); return MarkAsCall(DefineFixed(result, r0), instr); } @@ -1057,18 +1073,14 @@ LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) { LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal( HInstanceOfKnownGlobal* instr) { LInstanceOfKnownGlobal* result = - new(zone()) LInstanceOfKnownGlobal(UseFixed(instr->left(), r0), - FixedTemp(r4)); + new(zone()) LInstanceOfKnownGlobal( + UseFixed(instr->context(), cp), + UseFixed(instr->left(), r0), + FixedTemp(r4)); return MarkAsCall(DefineFixed(result, r0), instr); } -LInstruction* LChunkBuilder::DoInstanceSize(HInstanceSize* instr) { - LOperand* object = UseRegisterAtStart(instr->object()); - return DefineAsRegister(new(zone()) LInstanceSize(object)); -} - - LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) { LOperand* receiver = UseRegisterAtStart(instr->receiver()); LOperand* function = UseRegisterAtStart(instr->function()); @@ -1091,7 +1103,6 @@ LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) { LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) { - ++argument_count_; LOperand* argument = Use(instr->argument()); return new(zone()) LPushArgument(argument); } @@ -1122,14 +1133,13 @@ LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) { LInstruction* LChunkBuilder::DoContext(HContext* instr) { - // If there is a non-return use, the context must be allocated in a register. - for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) { - if (!it.value()->IsReturn()) { - return DefineAsRegister(new(zone()) LContext); - } + if (instr->HasNoUses()) return NULL; + + if (info()->IsStub()) { + return DefineFixed(new(zone()) LContext, cp); } - return NULL; + return DefineAsRegister(new(zone()) LContext); } @@ -1140,7 +1150,8 @@ LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) { LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) { - return MarkAsCall(new(zone()) LDeclareGlobals, instr); + LOperand* context = UseFixed(instr->context(), cp); + return MarkAsCall(new(zone()) LDeclareGlobals(context), instr); } @@ -1158,15 +1169,14 @@ LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) { LInstruction* LChunkBuilder::DoCallConstantFunction( HCallConstantFunction* instr) { - argument_count_ -= instr->argument_count(); return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, r0), instr); } LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) { + LOperand* context = UseFixed(instr->context(), cp); LOperand* function = UseFixed(instr->function(), r1); - argument_count_ -= instr->argument_count(); - LInvokeFunction* result = new(zone()) LInvokeFunction(function); + LInvokeFunction* result = new(zone()) LInvokeFunction(context, function); return MarkAsCall(DefineFixed(result, r0), instr, CANNOT_DEOPTIMIZE_EAGERLY); } @@ -1206,8 +1216,12 @@ LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) { LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) { + Representation r = instr->value()->representation(); + LOperand* context = (r.IsDouble() || r.IsSmiOrInteger32()) + ? NULL + : UseFixed(instr->context(), cp); LOperand* input = UseRegister(instr->value()); - LMathAbs* result = new(zone()) LMathAbs(input); + LMathAbs* result = new(zone()) LMathAbs(context, input); return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); } @@ -1243,7 +1257,7 @@ LInstruction* LChunkBuilder::DoMathTan(HUnaryMathOperation* instr) { LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) { ASSERT(instr->representation().IsDouble()); ASSERT(instr->value()->representation().IsDouble()); - LOperand* input = UseTempRegister(instr->value()); + LOperand* input = UseRegister(instr->value()); LOperand* temp1 = TempRegister(); LOperand* temp2 = TempRegister(); LOperand* double_temp = FixedTemp(d3); // Chosen by fair dice roll. @@ -1269,57 +1283,57 @@ LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) { LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) { ASSERT(instr->key()->representation().IsTagged()); - argument_count_ -= instr->argument_count(); + LOperand* context = UseFixed(instr->context(), cp); LOperand* key = UseFixed(instr->key(), r2); - return MarkAsCall(DefineFixed(new(zone()) LCallKeyed(key), r0), instr); + return MarkAsCall( + DefineFixed(new(zone()) LCallKeyed(context, key), r0), instr); } LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) { - argument_count_ -= instr->argument_count(); - return MarkAsCall(DefineFixed(new(zone()) LCallNamed, r0), instr); + LOperand* context = UseFixed(instr->context(), cp); + return MarkAsCall(DefineFixed(new(zone()) LCallNamed(context), r0), instr); } LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) { - argument_count_ -= instr->argument_count(); - return MarkAsCall(DefineFixed(new(zone()) LCallGlobal, r0), instr); + LOperand* context = UseFixed(instr->context(), cp); + return MarkAsCall(DefineFixed(new(zone()) LCallGlobal(context), r0), instr); } LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) { - argument_count_ -= instr->argument_count(); return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, r0), instr); } LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) { + LOperand* context = UseFixed(instr->context(), cp); LOperand* constructor = UseFixed(instr->constructor(), r1); - argument_count_ -= instr->argument_count(); - LCallNew* result = new(zone()) LCallNew(constructor); + LCallNew* result = new(zone()) LCallNew(context, constructor); return MarkAsCall(DefineFixed(result, r0), instr); } LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) { + LOperand* context = UseFixed(instr->context(), cp); LOperand* constructor = UseFixed(instr->constructor(), r1); - argument_count_ -= instr->argument_count(); - LCallNewArray* result = new(zone()) LCallNewArray(constructor); + LCallNewArray* result = new(zone()) LCallNewArray(context, constructor); return MarkAsCall(DefineFixed(result, r0), instr); } LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) { + LOperand* context = UseFixed(instr->context(), cp); LOperand* function = UseFixed(instr->function(), r1); - argument_count_ -= instr->argument_count(); - return MarkAsCall(DefineFixed(new(zone()) LCallFunction(function), r0), - instr); + return MarkAsCall( + DefineFixed(new(zone()) LCallFunction(context, function), r0), instr); } LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) { - argument_count_ -= instr->argument_count(); - return MarkAsCall(DefineFixed(new(zone()) LCallRuntime, r0), instr); + LOperand* context = UseFixed(instr->context(), cp); + return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), r0), instr); } @@ -1347,41 +1361,34 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) { if (instr->representation().IsSmiOrInteger32()) { ASSERT(instr->left()->representation().Equals(instr->representation())); ASSERT(instr->right()->representation().Equals(instr->representation())); + ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32)); LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand()); return DefineAsRegister(new(zone()) LBitI(left, right)); } else { - ASSERT(instr->representation().IsTagged()); - ASSERT(instr->left()->representation().IsTagged()); - ASSERT(instr->right()->representation().IsTagged()); - - LOperand* left = UseFixed(instr->left(), r1); - LOperand* right = UseFixed(instr->right(), r0); - LArithmeticT* result = new(zone()) LArithmeticT(instr->op(), left, right); - return MarkAsCall(DefineFixed(result, r0), instr); + return DoArithmeticT(instr->op(), instr); } } LInstruction* LChunkBuilder::DoDiv(HDiv* instr) { - if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::DIV, instr); - } else if (instr->representation().IsSmiOrInteger32()) { + if (instr->representation().IsSmiOrInteger32()) { ASSERT(instr->left()->representation().Equals(instr->representation())); ASSERT(instr->right()->representation().Equals(instr->representation())); if (instr->HasPowerOf2Divisor()) { ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero)); LOperand* value = UseRegisterAtStart(instr->left()); - LDivI* div = - new(zone()) LDivI(value, UseOrConstant(instr->right()), NULL); - return AssignEnvironment(DefineSameAsFirst(div)); + LDivI* div = new(zone()) LDivI(value, UseConstant(instr->right()), NULL); + return AssignEnvironment(DefineAsRegister(div)); } LOperand* dividend = UseRegister(instr->left()); LOperand* divisor = UseRegister(instr->right()); LOperand* temp = CpuFeatures::IsSupported(SUDIV) ? NULL : FixedTemp(d4); LDivI* div = new(zone()) LDivI(dividend, divisor, temp); return AssignEnvironment(DefineAsRegister(div)); + } else if (instr->representation().IsDouble()) { + return DoArithmeticD(Token::DIV, instr); } else { return DoArithmeticT(Token::DIV, instr); } @@ -1502,17 +1509,10 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) { ? AssignEnvironment(result) : result; } - } else if (instr->representation().IsTagged()) { - return DoArithmeticT(Token::MOD, instr); + } else if (instr->representation().IsDouble()) { + return DoArithmeticD(Token::MOD, instr); } else { - ASSERT(instr->representation().IsDouble()); - // We call a C function for double modulo. It can't trigger a GC. We need - // to use fixed result register for the call. - // TODO(fschneider): Allow any register as input registers. - LArithmeticD* mod = new(zone()) LArithmeticD(Token::MOD, - UseFixedDouble(left, d1), - UseFixedDouble(right, d2)); - return MarkAsCall(DefineFixedDouble(mod, d1), instr); + return DoArithmeticT(Token::MOD, instr); } } @@ -1679,7 +1679,6 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) { return DoArithmeticD(Token::ADD, instr); } else { - ASSERT(instr->representation().IsTagged()); return DoArithmeticT(Token::ADD, instr); } } @@ -1737,9 +1736,10 @@ LInstruction* LChunkBuilder::DoRandom(HRandom* instr) { LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) { ASSERT(instr->left()->representation().IsTagged()); ASSERT(instr->right()->representation().IsTagged()); + LOperand* context = UseFixed(instr->context(), cp); LOperand* left = UseFixed(instr->left(), r1); LOperand* right = UseFixed(instr->right(), r0); - LCmpT* result = new(zone()) LCmpT(left, right); + LCmpT* result = new(zone()) LCmpT(context, left, right); return MarkAsCall(DefineFixed(result, r0), instr); } @@ -1766,6 +1766,8 @@ LInstruction* LChunkBuilder::DoCompareNumericAndBranch( LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch( HCompareObjectEqAndBranch* instr) { + LInstruction* goto_instr = CheckElideControlInstruction(instr); + if (goto_instr != NULL) return goto_instr; LOperand* left = UseRegisterAtStart(instr->left()); LOperand* right = UseRegisterAtStart(instr->right()); return new(zone()) LCmpObjectEqAndBranch(left, right); @@ -1774,8 +1776,8 @@ LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch( LInstruction* LChunkBuilder::DoCompareHoleAndBranch( HCompareHoleAndBranch* instr) { - LOperand* object = UseRegisterAtStart(instr->object()); - return new(zone()) LCmpHoleAndBranch(object); + LOperand* value = UseRegisterAtStart(instr->value()); + return new(zone()) LCmpHoleAndBranch(value); } @@ -1813,10 +1815,11 @@ LInstruction* LChunkBuilder::DoStringCompareAndBranch( HStringCompareAndBranch* instr) { ASSERT(instr->left()->representation().IsTagged()); ASSERT(instr->right()->representation().IsTagged()); + LOperand* context = UseFixed(instr->context(), cp); LOperand* left = UseFixed(instr->left(), r1); LOperand* right = UseFixed(instr->right(), r0); LStringCompareAndBranch* result = - new(zone()) LStringCompareAndBranch(left, right); + new(zone()) LStringCompareAndBranch(context, left, right); return MarkAsCall(result, instr); } @@ -1883,11 +1886,9 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) { LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) { LOperand* string = UseRegister(instr->string()); - LOperand* index = UseRegister(instr->index()); - LOperand* value = UseTempRegister(instr->value()); - LSeqStringSetChar* result = - new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value); - return DefineAsRegister(result); + LOperand* index = UseRegisterOrConstant(instr->index()); + LOperand* value = UseRegister(instr->value()); + return new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value); } @@ -1905,9 +1906,17 @@ LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation( } +LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) { + // The control instruction marking the end of a block that completed + // abruptly (e.g., threw an exception). There is nothing specific to do. + return NULL; +} + + LInstruction* LChunkBuilder::DoThrow(HThrow* instr) { + LOperand* context = UseFixed(instr->context(), cp); LOperand* value = UseFixed(instr->value(), r0); - return MarkAsCall(new(zone()) LThrow(value), instr); + return MarkAsCall(new(zone()) LThrow(context, value), instr); } @@ -1936,7 +1945,6 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { } if (from.IsTagged()) { if (to.IsDouble()) { - info()->MarkAsDeferredCalling(); LOperand* value = UseRegister(instr->value()); LNumberUntagD* res = new(zone()) LNumberUntagD(value); return AssignEnvironment(DefineAsRegister(res)); @@ -2006,8 +2014,9 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { } else if (to.IsSmi()) { HValue* val = instr->value(); LOperand* value = UseRegister(val); - LInstruction* result = - DefineSameAsFirst(new(zone()) LInteger32ToSmi(value)); + LInstruction* result = val->CheckFlag(HInstruction::kUint32) + ? DefineSameAsFirst(new(zone()) LUint32ToSmi(value)) + : DefineSameAsFirst(new(zone()) LInteger32ToSmi(value)); if (val->HasRange() && val->range()->IsInSmiRange()) { return result; } @@ -2040,12 +2049,6 @@ LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) { } -LInstruction* LChunkBuilder::DoIsNumberAndBranch(HIsNumberAndBranch* instr) { - return new(zone()) - LIsNumberAndBranch(UseRegisterOrConstantAtStart(instr->value())); -} - - LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) { LOperand* value = UseRegisterAtStart(instr->value()); LInstruction* result = new(zone()) LCheckInstanceType(value); @@ -2093,8 +2096,11 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) { LInstruction* LChunkBuilder::DoReturn(HReturn* instr) { + LOperand* context = info()->IsStub() + ? UseFixed(instr->context(), cp) + : NULL; LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count()); - return new(zone()) LReturn(UseFixed(instr->value(), r0), + return new(zone()) LReturn(UseFixed(instr->value(), r0), context, parameter_count); } @@ -2127,8 +2133,10 @@ LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) { LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) { + LOperand* context = UseFixed(instr->context(), cp); LOperand* global_object = UseFixed(instr->global_object(), r0); - LLoadGlobalGeneric* result = new(zone()) LLoadGlobalGeneric(global_object); + LLoadGlobalGeneric* result = + new(zone()) LLoadGlobalGeneric(context, global_object); return MarkAsCall(DefineFixed(result, r0), instr); } @@ -2144,10 +2152,11 @@ LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) { LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) { + LOperand* context = UseFixed(instr->context(), cp); LOperand* global_object = UseFixed(instr->global_object(), r1); LOperand* value = UseFixed(instr->value(), r0); LStoreGlobalGeneric* result = - new(zone()) LStoreGlobalGeneric(global_object, value); + new(zone()) LStoreGlobalGeneric(context, global_object, value); return MarkAsCall(result, instr); } @@ -2182,8 +2191,10 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) { LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) { + LOperand* context = UseFixed(instr->context(), cp); LOperand* object = UseFixed(instr->object(), r0); - LInstruction* result = DefineFixed(new(zone()) LLoadNamedGeneric(object), r0); + LInstruction* result = + DefineFixed(new(zone()) LLoadNamedGeneric(context, object), r0); return MarkAsCall(result, instr); } @@ -2195,6 +2206,11 @@ LInstruction* LChunkBuilder::DoLoadFunctionPrototype( } +LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) { + return DefineAsRegister(new(zone()) LLoadRoot); +} + + LInstruction* LChunkBuilder::DoLoadExternalArrayPointer( HLoadExternalArrayPointer* instr) { LOperand* input = UseRegisterAtStart(instr->value()); @@ -2211,7 +2227,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) { if (!instr->is_external()) { LOperand* obj = NULL; if (instr->representation().IsDouble()) { - obj = UseTempRegister(instr->elements()); + obj = UseRegister(instr->elements()); } else { ASSERT(instr->representation().IsSmiOrTagged()); obj = UseRegisterAtStart(instr->elements()); @@ -2239,18 +2255,17 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) { LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) { + LOperand* context = UseFixed(instr->context(), cp); LOperand* object = UseFixed(instr->object(), r1); LOperand* key = UseFixed(instr->key(), r0); LInstruction* result = - DefineFixed(new(zone()) LLoadKeyedGeneric(object, key), r0); + DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key), r0); return MarkAsCall(result, instr); } LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) { - ElementsKind elements_kind = instr->elements_kind(); - if (!instr->is_external()) { ASSERT(instr->elements()->representation().IsTagged()); bool needs_write_barrier = instr->NeedsWriteBarrier(); @@ -2260,15 +2275,19 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) { if (instr->value()->representation().IsDouble()) { object = UseRegisterAtStart(instr->elements()); - val = UseTempRegister(instr->value()); + val = UseRegister(instr->value()); key = UseRegisterOrConstantAtStart(instr->key()); } else { ASSERT(instr->value()->representation().IsSmiOrTagged()); - object = UseTempRegister(instr->elements()); - val = needs_write_barrier ? UseTempRegister(instr->value()) - : UseRegisterAtStart(instr->value()); - key = needs_write_barrier ? UseTempRegister(instr->key()) - : UseRegisterOrConstantAtStart(instr->key()); + if (needs_write_barrier) { + object = UseTempRegister(instr->elements()); + val = UseTempRegister(instr->value()); + key = UseTempRegister(instr->key()); + } else { + object = UseRegisterAtStart(instr->elements()); + val = UseRegisterAtStart(instr->value()); + key = UseRegisterOrConstantAtStart(instr->key()); + } } return new(zone()) LStoreKeyed(object, key, val); @@ -2276,17 +2295,13 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) { ASSERT( (instr->value()->representation().IsInteger32() && - (elements_kind != EXTERNAL_FLOAT_ELEMENTS) && - (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) || + (instr->elements_kind() != EXTERNAL_FLOAT_ELEMENTS) && + (instr->elements_kind() != EXTERNAL_DOUBLE_ELEMENTS)) || (instr->value()->representation().IsDouble() && - ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) || - (elements_kind == EXTERNAL_DOUBLE_ELEMENTS)))); + ((instr->elements_kind() == EXTERNAL_FLOAT_ELEMENTS) || + (instr->elements_kind() == EXTERNAL_DOUBLE_ELEMENTS)))); ASSERT(instr->elements()->representation().IsExternal()); - bool val_is_temp_register = - elements_kind == EXTERNAL_PIXEL_ELEMENTS || - elements_kind == EXTERNAL_FLOAT_ELEMENTS; - LOperand* val = val_is_temp_register ? UseTempRegister(instr->value()) - : UseRegister(instr->value()); + LOperand* val = UseRegister(instr->value()); LOperand* key = UseRegisterOrConstantAtStart(instr->key()); LOperand* external_pointer = UseRegister(instr->elements()); return new(zone()) LStoreKeyed(external_pointer, key, val); @@ -2294,6 +2309,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) { LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) { + LOperand* context = UseFixed(instr->context(), cp); LOperand* obj = UseFixed(instr->object(), r2); LOperand* key = UseFixed(instr->key(), r1); LOperand* val = UseFixed(instr->value(), r0); @@ -2302,7 +2318,8 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) { ASSERT(instr->key()->representation().IsTagged()); ASSERT(instr->value()->representation().IsTagged()); - return MarkAsCall(new(zone()) LStoreKeyedGeneric(obj, key, val), instr); + return MarkAsCall( + new(zone()) LStoreKeyedGeneric(context, obj, key, val), instr); } @@ -2312,11 +2329,12 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind( if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) { LOperand* new_map_reg = TempRegister(); LTransitionElementsKind* result = - new(zone()) LTransitionElementsKind(object, new_map_reg); + new(zone()) LTransitionElementsKind(object, NULL, new_map_reg); return result; } else { + LOperand* context = UseFixed(instr->context(), cp); LTransitionElementsKind* result = - new(zone()) LTransitionElementsKind(object, NULL); + new(zone()) LTransitionElementsKind(object, context, NULL); return AssignPointerMap(result); } } @@ -2375,56 +2393,68 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) { LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) { + LOperand* context = UseFixed(instr->context(), cp); LOperand* obj = UseFixed(instr->object(), r1); LOperand* val = UseFixed(instr->value(), r0); - LInstruction* result = new(zone()) LStoreNamedGeneric(obj, val); + LInstruction* result = new(zone()) LStoreNamedGeneric(context, obj, val); return MarkAsCall(result, instr); } LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) { + LOperand* context = UseFixed(instr->context(), cp); LOperand* left = UseRegisterAtStart(instr->left()); LOperand* right = UseRegisterAtStart(instr->right()); - return MarkAsCall(DefineFixed(new(zone()) LStringAdd(left, right), r0), - instr); + return MarkAsCall( + DefineFixed(new(zone()) LStringAdd(context, left, right), r0), + instr); } LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) { LOperand* string = UseTempRegister(instr->string()); LOperand* index = UseTempRegister(instr->index()); - LStringCharCodeAt* result = new(zone()) LStringCharCodeAt(string, index); + LOperand* context = UseAny(instr->context()); + LStringCharCodeAt* result = + new(zone()) LStringCharCodeAt(context, string, index); return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); } LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) { LOperand* char_code = UseRegister(instr->value()); - LStringCharFromCode* result = new(zone()) LStringCharFromCode(char_code); + LOperand* context = UseAny(instr->context()); + LStringCharFromCode* result = + new(zone()) LStringCharFromCode(context, char_code); return AssignPointerMap(DefineAsRegister(result)); } LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) { info()->MarkAsDeferredCalling(); + LOperand* context = UseAny(instr->context()); LOperand* size = instr->size()->IsConstant() ? UseConstant(instr->size()) : UseTempRegister(instr->size()); LOperand* temp1 = TempRegister(); LOperand* temp2 = TempRegister(); - LAllocate* result = new(zone()) LAllocate(size, temp1, temp2); + LAllocate* result = new(zone()) LAllocate(context, size, temp1, temp2); return AssignPointerMap(DefineAsRegister(result)); } LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) { - return MarkAsCall(DefineFixed(new(zone()) LRegExpLiteral, r0), instr); + LOperand* context = UseFixed(instr->context(), cp); + return MarkAsCall( + DefineFixed(new(zone()) LRegExpLiteral(context), r0), instr); } LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) { - return MarkAsCall(DefineFixed(new(zone()) LFunctionLiteral, r0), instr); + LOperand* context = UseFixed(instr->context(), cp); + return MarkAsCall( + DefineFixed(new(zone()) LFunctionLiteral(context), r0), instr); } @@ -2471,8 +2501,8 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) { LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) { - argument_count_ -= instr->argument_count(); - return MarkAsCall(DefineFixed(new(zone()) LCallStub, r0), instr); + LOperand* context = UseFixed(instr->context(), cp); + return MarkAsCall(DefineFixed(new(zone()) LCallStub(context), r0), instr); } @@ -2517,7 +2547,8 @@ LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) { LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) { - LTypeof* result = new(zone()) LTypeof(UseFixed(instr->value(), r0)); + LOperand* context = UseFixed(instr->context(), cp); + LTypeof* result = new(zone()) LTypeof(context, UseFixed(instr->value(), r0)); return MarkAsCall(DefineFixed(result, r0), instr); } @@ -2556,10 +2587,13 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) { LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) { if (instr->is_function_entry()) { - return MarkAsCall(new(zone()) LStackCheck, instr); + LOperand* context = UseFixed(instr->context(), cp); + return MarkAsCall(new(zone()) LStackCheck(context), instr); } else { ASSERT(instr->is_backwards_branch()); - return AssignEnvironment(AssignPointerMap(new(zone()) LStackCheck)); + LOperand* context = UseAny(instr->context()); + return AssignEnvironment( + AssignPointerMap(new(zone()) LStackCheck(context))); } } @@ -2592,7 +2626,7 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) { if (env->entry()->arguments_pushed()) { int argument_count = env->arguments_environment()->parameter_count(); pop = new(zone()) LDrop(argument_count); - argument_count_ -= argument_count; + ASSERT(instr->argument_delta() == -argument_count); } HEnvironment* outer = current_block_->last_environment()-> @@ -2604,8 +2638,9 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) { LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) { + LOperand* context = UseFixed(instr->context(), cp); LOperand* object = UseFixed(instr->enumerable(), r0); - LForInPrepareMap* result = new(zone()) LForInPrepareMap(object); + LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object); return MarkAsCall(DefineFixed(result, r0), instr, CAN_DEOPTIMIZE_EAGERLY); } diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h index 98cacacae1..ed07229e17 100644 --- a/deps/v8/src/arm/lithium-arm.h +++ b/deps/v8/src/arm/lithium-arm.h @@ -105,7 +105,6 @@ class LCodeGen; V(InnerAllocatedObject) \ V(InstanceOf) \ V(InstanceOfKnownGlobal) \ - V(InstanceSize) \ V(InstructionGap) \ V(Integer32ToDouble) \ V(Integer32ToSmi) \ @@ -113,13 +112,13 @@ class LCodeGen; V(IsConstructCallAndBranch) \ V(IsObjectAndBranch) \ V(IsStringAndBranch) \ - V(IsNumberAndBranch) \ V(IsSmiAndBranch) \ V(IsUndetectableAndBranch) \ V(Label) \ V(LazyBailout) \ V(LoadContextSlot) \ V(LoadExternalArrayPointer) \ + V(LoadRoot) \ V(LoadFieldByIndex) \ V(LoadFunctionPrototype) \ V(LoadGlobalCell) \ @@ -185,6 +184,7 @@ class LCodeGen; V(Typeof) \ V(TypeofIsAndBranch) \ V(Uint32ToDouble) \ + V(Uint32ToSmi) \ V(UnknownOSRValue) \ V(ValueOf) \ V(WrapReceiver) @@ -216,7 +216,6 @@ class LInstruction : public ZoneObject { : environment_(NULL), hydrogen_value_(NULL), bit_field_(IsCallBits::encode(false)) { - set_position(RelocInfo::kNoPosition); } virtual ~LInstruction() {} @@ -257,15 +256,6 @@ class LInstruction : public ZoneObject { LPointerMap* pointer_map() const { return pointer_map_.get(); } bool HasPointerMap() const { return pointer_map_.is_set(); } - // The 31 bits PositionBits is used to store the int position value. And the - // position value may be RelocInfo::kNoPosition (-1). The accessor always - // +1/-1 so that the encoded value of position in bit_field_ is always >= 0 - // and can fit into the 31 bits PositionBits. - void set_position(int pos) { - bit_field_ = PositionBits::update(bit_field_, pos + 1); - } - int position() { return PositionBits::decode(bit_field_) - 1; } - void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; } HValue* hydrogen_value() const { return hydrogen_value_; } @@ -277,7 +267,7 @@ class LInstruction : public ZoneObject { // Interface to the register allocator and iterators. bool ClobbersTemps() const { return IsCall(); } bool ClobbersRegisters() const { return IsCall(); } - bool ClobbersDoubleRegisters() const { return IsCall(); } + virtual bool ClobbersDoubleRegisters() const { return IsCall(); } // Interface to the register allocator and iterators. bool IsMarkedAsCall() const { return IsCall(); } @@ -305,7 +295,6 @@ class LInstruction : public ZoneObject { virtual LOperand* TempAt(int i) = 0; class IsCallBits: public BitField<bool, 0, 1> {}; - class PositionBits: public BitField<int, 1, 31> {}; LEnvironment* environment_; SetOncePointer<LPointerMap> pointer_map_; @@ -404,17 +393,17 @@ class LInstructionGap V8_FINAL : public LGap { class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> { public: - explicit LGoto(int block_id) : block_id_(block_id) { } + explicit LGoto(HBasicBlock* block) : block_(block) { } virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE; DECLARE_CONCRETE_INSTRUCTION(Goto, "goto") virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; virtual bool IsControl() const V8_OVERRIDE { return true; } - int block_id() const { return block_id_; } + int block_id() const { return block_->block_id(); } private: - int block_id_; + HBasicBlock* block_; }; @@ -483,8 +472,14 @@ class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> { }; -class LCallStub V8_FINAL : public LTemplateInstruction<1, 0, 0> { +class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> { public: + explicit LCallStub(LOperand* context) { + inputs_[0] = context; + } + + LOperand* context() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub") DECLARE_HYDROGEN_ACCESSOR(CallStub) @@ -785,12 +780,14 @@ class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 1> { }; -class LMathAbs V8_FINAL : public LTemplateInstruction<1, 1, 0> { +class LMathAbs V8_FINAL : public LTemplateInstruction<1, 2, 0> { public: - explicit LMathAbs(LOperand* value) { + LMathAbs(LOperand* context, LOperand* value) { + inputs_[1] = context; inputs_[0] = value; } + LOperand* context() { return inputs_[1]; } LOperand* value() { return inputs_[0]; } DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs") @@ -939,19 +936,6 @@ class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> { }; -class LIsNumberAndBranch V8_FINAL : public LControlInstruction<1, 0> { - public: - explicit LIsNumberAndBranch(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(IsNumberAndBranch, "is-number-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsNumberAndBranch) -}; - - class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> { public: LIsStringAndBranch(LOperand* value, LOperand* temp) { @@ -1002,15 +986,17 @@ class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> { }; -class LStringCompareAndBranch V8_FINAL : public LControlInstruction<2, 0> { +class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> { public: - LStringCompareAndBranch(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; + LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) { + inputs_[0] = context; + inputs_[1] = left; + inputs_[2] = right; } - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } + LOperand* context() { return inputs_[0]; } + LOperand* left() { return inputs_[1]; } + LOperand* right() { return inputs_[2]; } DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch, "string-compare-and-branch") @@ -1086,15 +1072,17 @@ class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 1> { }; -class LCmpT V8_FINAL : public LTemplateInstruction<1, 2, 0> { +class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> { public: - LCmpT(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; + LCmpT(LOperand* context, LOperand* left, LOperand* right) { + inputs_[0] = context; + inputs_[1] = left; + inputs_[2] = right; } - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } + LOperand* context() { return inputs_[0]; } + LOperand* left() { return inputs_[1]; } + LOperand* right() { return inputs_[2]; } DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t") DECLARE_HYDROGEN_ACCESSOR(CompareGeneric) @@ -1103,28 +1091,32 @@ class LCmpT V8_FINAL : public LTemplateInstruction<1, 2, 0> { }; -class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 2, 0> { +class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> { public: - LInstanceOf(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; + LInstanceOf(LOperand* context, LOperand* left, LOperand* right) { + inputs_[0] = context; + inputs_[1] = left; + inputs_[2] = right; } - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } + LOperand* context() { return inputs_[0]; } + LOperand* left() { return inputs_[1]; } + LOperand* right() { return inputs_[2]; } DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of") }; -class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 1, 1> { +class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> { public: - LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) { - inputs_[0] = value; + LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) { + inputs_[0] = context; + inputs_[1] = value; temps_[0] = temp; } - LOperand* value() { return inputs_[0]; } + LOperand* context() { return inputs_[0]; } + LOperand* value() { return inputs_[1]; } LOperand* temp() { return temps_[0]; } DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal, @@ -1145,19 +1137,6 @@ class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 1, 1> { }; -class LInstanceSize V8_FINAL : public LTemplateInstruction<1, 1, 0> { - public: - explicit LInstanceSize(LOperand* object) { - inputs_[0] = object; - } - - LOperand* object() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(InstanceSize, "instance-size") - DECLARE_HYDROGEN_ACCESSOR(InstanceSize) -}; - - class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> { public: LBoundsCheck(LOperand* index, LOperand* length) { @@ -1318,7 +1297,7 @@ class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 1> { DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch") DECLARE_HYDROGEN_ACCESSOR(CompareMap) - Handle<Map> map() const { return hydrogen()->map(); } + Handle<Map> map() const { return hydrogen()->map().handle(); } }; @@ -1373,8 +1352,8 @@ class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> { LOperand* temp() { return temps_[0]; } Smi* index() const { return index_; } - DECLARE_CONCRETE_INSTRUCTION(ValueOf, "date-field") - DECLARE_HYDROGEN_ACCESSOR(ValueOf) + DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field") + DECLARE_HYDROGEN_ACCESSOR(DateField) private: Smi* index_; @@ -1405,13 +1384,15 @@ class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 3, 0> { }; -class LThrow V8_FINAL : public LTemplateInstruction<0, 1, 0> { +class LThrow V8_FINAL : public LTemplateInstruction<0, 2, 0> { public: - explicit LThrow(LOperand* value) { - inputs_[0] = value; + LThrow(LOperand* context, LOperand* value) { + inputs_[0] = context; + inputs_[1] = value; } - LOperand* value() { return inputs_[0]; } + LOperand* context() { return inputs_[0]; } + LOperand* value() { return inputs_[1]; } DECLARE_CONCRETE_INSTRUCTION(Throw, "throw") }; @@ -1507,16 +1488,21 @@ class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> { }; -class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 2, 0> { +class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> { public: - LArithmeticT(Token::Value op, LOperand* left, LOperand* right) + LArithmeticT(Token::Value op, + LOperand* context, + LOperand* left, + LOperand* right) : op_(op) { - inputs_[0] = left; - inputs_[1] = right; + inputs_[0] = context; + inputs_[1] = left; + inputs_[2] = right; } - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } + LOperand* context() { return inputs_[0]; } + LOperand* left() { return inputs_[1]; } + LOperand* right() { return inputs_[2]; } Token::Value op() const { return op_; } virtual Opcode opcode() const V8_OVERRIDE { @@ -1530,11 +1516,12 @@ class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 2, 0> { }; -class LReturn V8_FINAL : public LTemplateInstruction<0, 2, 0> { +class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> { public: - explicit LReturn(LOperand* value, LOperand* parameter_count) { + LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) { inputs_[0] = value; - inputs_[1] = parameter_count; + inputs_[1] = context; + inputs_[2] = parameter_count; } LOperand* value() { return inputs_[0]; } @@ -1546,7 +1533,7 @@ class LReturn V8_FINAL : public LTemplateInstruction<0, 2, 0> { ASSERT(has_constant_parameter_count()); return LConstantOperand::cast(parameter_count()); } - LOperand* parameter_count() { return inputs_[1]; } + LOperand* parameter_count() { return inputs_[2]; } DECLARE_CONCRETE_INSTRUCTION(Return, "return") }; @@ -1565,13 +1552,15 @@ class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> { }; -class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 1, 0> { +class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> { public: - explicit LLoadNamedGeneric(LOperand* object) { - inputs_[0] = object; + LLoadNamedGeneric(LOperand* context, LOperand* object) { + inputs_[0] = context; + inputs_[1] = object; } - LOperand* object() { return inputs_[0]; } + LOperand* context() { return inputs_[0]; } + LOperand* object() { return inputs_[1]; } DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic") DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric) @@ -1593,6 +1582,15 @@ class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 0> { }; +class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> { + public: + DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root") + DECLARE_HYDROGEN_ACCESSOR(LoadRoot) + + Heap::RootListIndex index() const { return hydrogen()->index(); } +}; + + class LLoadExternalArrayPointer V8_FINAL : public LTemplateInstruction<1, 1, 0> { public: @@ -1631,15 +1629,17 @@ class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> { }; -class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> { +class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 0> { public: - LLoadKeyedGeneric(LOperand* object, LOperand* key) { - inputs_[0] = object; - inputs_[1] = key; + LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key) { + inputs_[0] = context; + inputs_[1] = object; + inputs_[2] = key; } - LOperand* object() { return inputs_[0]; } - LOperand* key() { return inputs_[1]; } + LOperand* context() { return inputs_[0]; } + LOperand* object() { return inputs_[1]; } + LOperand* key() { return inputs_[2]; } DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic") }; @@ -1652,13 +1652,15 @@ class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> { }; -class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 1, 0> { +class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> { public: - explicit LLoadGlobalGeneric(LOperand* global_object) { - inputs_[0] = global_object; + LLoadGlobalGeneric(LOperand* context, LOperand* global_object) { + inputs_[0] = context; + inputs_[1] = global_object; } - LOperand* global_object() { return inputs_[0]; } + LOperand* context() { return inputs_[0]; } + LOperand* global_object() { return inputs_[1]; } DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic") DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric) @@ -1683,16 +1685,19 @@ class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 1> { }; -class LStoreGlobalGeneric V8_FINAL : public LTemplateInstruction<0, 2, 0> { +class LStoreGlobalGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> { public: - explicit LStoreGlobalGeneric(LOperand* global_object, - LOperand* value) { - inputs_[0] = global_object; - inputs_[1] = value; + LStoreGlobalGeneric(LOperand* context, + LOperand* global_object, + LOperand* value) { + inputs_[0] = context; + inputs_[1] = global_object; + inputs_[2] = value; } - LOperand* global_object() { return inputs_[0]; } - LOperand* value() { return inputs_[1]; } + LOperand* context() { return inputs_[0]; } + LOperand* global_object() { return inputs_[1]; } + LOperand* value() { return inputs_[2]; } DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic") DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric) @@ -1822,8 +1827,14 @@ class LOuterContext V8_FINAL : public LTemplateInstruction<1, 1, 0> { }; -class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 0, 0> { +class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> { public: + explicit LDeclareGlobals(LOperand* context) { + inputs_[0] = context; + } + + LOperand* context() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals") DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals) }; @@ -1865,13 +1876,15 @@ class LCallConstantFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> { }; -class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> { +class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> { public: - explicit LInvokeFunction(LOperand* function) { - inputs_[0] = function; + LInvokeFunction(LOperand* context, LOperand* function) { + inputs_[0] = context; + inputs_[1] = function; } - LOperand* function() { return inputs_[0]; } + LOperand* context() { return inputs_[0]; } + LOperand* function() { return inputs_[1]; } DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function") DECLARE_HYDROGEN_ACCESSOR(InvokeFunction) @@ -1882,13 +1895,15 @@ class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> { }; -class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 1, 0> { +class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> { public: - explicit LCallKeyed(LOperand* key) { - inputs_[0] = key; + LCallKeyed(LOperand* context, LOperand* key) { + inputs_[0] = context; + inputs_[1] = key; } - LOperand* key() { return inputs_[0]; } + LOperand* context() { return inputs_[0]; } + LOperand* key() { return inputs_[1]; } DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed") DECLARE_HYDROGEN_ACCESSOR(CallKeyed) @@ -1900,8 +1915,14 @@ class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 1, 0> { -class LCallNamed V8_FINAL : public LTemplateInstruction<1, 0, 0> { +class LCallNamed V8_FINAL : public LTemplateInstruction<1, 1, 0> { public: + explicit LCallNamed(LOperand* context) { + inputs_[0] = context; + } + + LOperand* context() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named") DECLARE_HYDROGEN_ACCESSOR(CallNamed) @@ -1912,13 +1933,15 @@ class LCallNamed V8_FINAL : public LTemplateInstruction<1, 0, 0> { }; -class LCallFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> { +class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> { public: - explicit LCallFunction(LOperand* function) { - inputs_[0] = function; + LCallFunction(LOperand* context, LOperand* function) { + inputs_[0] = context; + inputs_[1] = function; } - LOperand* function() { return inputs_[0]; } + LOperand* context() { return inputs_[0]; } + LOperand* function() { return inputs_[1]; } DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function") DECLARE_HYDROGEN_ACCESSOR(CallFunction) @@ -1927,8 +1950,14 @@ class LCallFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> { }; -class LCallGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> { +class LCallGlobal V8_FINAL : public LTemplateInstruction<1, 1, 0> { public: + explicit LCallGlobal(LOperand* context) { + inputs_[0] = context; + } + + LOperand* context() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global") DECLARE_HYDROGEN_ACCESSOR(CallGlobal) @@ -1950,13 +1979,15 @@ class LCallKnownGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> { }; -class LCallNew V8_FINAL : public LTemplateInstruction<1, 1, 0> { +class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> { public: - explicit LCallNew(LOperand* constructor) { - inputs_[0] = constructor; + LCallNew(LOperand* context, LOperand* constructor) { + inputs_[0] = context; + inputs_[1] = constructor; } - LOperand* constructor() { return inputs_[0]; } + LOperand* context() { return inputs_[0]; } + LOperand* constructor() { return inputs_[1]; } DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new") DECLARE_HYDROGEN_ACCESSOR(CallNew) @@ -1967,13 +1998,15 @@ class LCallNew V8_FINAL : public LTemplateInstruction<1, 1, 0> { }; -class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 1, 0> { +class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> { public: - explicit LCallNewArray(LOperand* constructor) { - inputs_[0] = constructor; + LCallNewArray(LOperand* context, LOperand* constructor) { + inputs_[0] = context; + inputs_[1] = constructor; } - LOperand* constructor() { return inputs_[0]; } + LOperand* context() { return inputs_[0]; } + LOperand* constructor() { return inputs_[1]; } DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array") DECLARE_HYDROGEN_ACCESSOR(CallNewArray) @@ -1984,13 +2017,24 @@ class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 1, 0> { }; -class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 0, 0> { +class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> { public: + explicit LCallRuntime(LOperand* context) { + inputs_[0] = context; + } + + LOperand* context() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime") DECLARE_HYDROGEN_ACCESSOR(CallRuntime) + virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE { + return save_doubles() == kDontSaveFPRegs; + } + const Runtime::Function* function() const { return hydrogen()->function(); } int arity() const { return hydrogen()->argument_count(); } + SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); } }; @@ -2031,6 +2075,19 @@ class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> { }; +class LUint32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LUint32ToSmi(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(Uint32ToSmi, "uint32-to-smi") + DECLARE_HYDROGEN_ACCESSOR(Change) +}; + + class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 0> { public: explicit LNumberTagI(LOperand* value) { @@ -2119,7 +2176,7 @@ class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 2> { LOperand* temp2() { return temps_[1]; } DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i") - DECLARE_HYDROGEN_ACCESSOR(UnaryOperation) + DECLARE_HYDROGEN_ACCESSOR(Change) bool truncating() { return hydrogen()->CanTruncateToInt32(); } }; @@ -2191,15 +2248,17 @@ class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> { }; -class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 2, 0> { +class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> { public: - LStoreNamedGeneric(LOperand* object, LOperand* value) { - inputs_[0] = object; - inputs_[1] = value; + LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) { + inputs_[0] = context; + inputs_[1] = object; + inputs_[2] = value; } - LOperand* object() { return inputs_[0]; } - LOperand* value() { return inputs_[1]; } + LOperand* context() { return inputs_[0]; } + LOperand* object() { return inputs_[1]; } + LOperand* value() { return inputs_[2]; } DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic") DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric) @@ -2242,17 +2301,22 @@ class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> { }; -class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> { +class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> { public: - LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* value) { - inputs_[0] = obj; - inputs_[1] = key; - inputs_[2] = value; + LStoreKeyedGeneric(LOperand* context, + LOperand* obj, + LOperand* key, + LOperand* value) { + inputs_[0] = context; + inputs_[1] = obj; + inputs_[2] = key; + inputs_[3] = value; } - LOperand* object() { return inputs_[0]; } - LOperand* key() { return inputs_[1]; } - LOperand* value() { return inputs_[2]; } + LOperand* context() { return inputs_[0]; } + LOperand* object() { return inputs_[1]; } + LOperand* key() { return inputs_[2]; } + LOperand* value() { return inputs_[3]; } DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic") DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric) @@ -2263,14 +2327,17 @@ class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> { }; -class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 1, 1> { +class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 1> { public: LTransitionElementsKind(LOperand* object, + LOperand* context, LOperand* new_map_temp) { inputs_[0] = object; + inputs_[1] = context; temps_[0] = new_map_temp; } + LOperand* context() { return inputs_[1]; } LOperand* object() { return inputs_[0]; } LOperand* new_map_temp() { return temps_[0]; } @@ -2280,8 +2347,10 @@ class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 1, 1> { virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; - Handle<Map> original_map() { return hydrogen()->original_map(); } - Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); } + Handle<Map> original_map() { return hydrogen()->original_map().handle(); } + Handle<Map> transitioned_map() { + return hydrogen()->transitioned_map().handle(); + } ElementsKind from_kind() { return hydrogen()->from_kind(); } ElementsKind to_kind() { return hydrogen()->to_kind(); } }; @@ -2303,15 +2372,17 @@ class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 1> { }; -class LStringAdd V8_FINAL : public LTemplateInstruction<1, 2, 0> { +class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> { public: - LStringAdd(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; + LStringAdd(LOperand* context, LOperand* left, LOperand* right) { + inputs_[0] = context; + inputs_[1] = left; + inputs_[2] = right; } - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } + LOperand* context() { return inputs_[0]; } + LOperand* left() { return inputs_[1]; } + LOperand* right() { return inputs_[2]; } DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add") DECLARE_HYDROGEN_ACCESSOR(StringAdd) @@ -2319,28 +2390,32 @@ class LStringAdd V8_FINAL : public LTemplateInstruction<1, 2, 0> { -class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 2, 0> { +class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> { public: - LStringCharCodeAt(LOperand* string, LOperand* index) { - inputs_[0] = string; - inputs_[1] = index; + LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) { + inputs_[0] = context; + inputs_[1] = string; + inputs_[2] = index; } - LOperand* string() { return inputs_[0]; } - LOperand* index() { return inputs_[1]; } + LOperand* context() { return inputs_[0]; } + LOperand* string() { return inputs_[1]; } + LOperand* index() { return inputs_[2]; } DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at") DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt) }; -class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 1, 0> { +class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> { public: - explicit LStringCharFromCode(LOperand* char_code) { - inputs_[0] = char_code; + explicit LStringCharFromCode(LOperand* context, LOperand* char_code) { + inputs_[0] = context; + inputs_[1] = char_code; } - LOperand* char_code() { return inputs_[0]; } + LOperand* context() { return inputs_[0]; } + LOperand* char_code() { return inputs_[1]; } DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code") DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode) @@ -2451,12 +2526,17 @@ class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> { class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 2> { public: - LAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) { + LAllocate(LOperand* context, + LOperand* size, + LOperand* temp1, + LOperand* temp2) { + inputs_[0] = context; inputs_[1] = size; temps_[0] = temp1; temps_[1] = temp2; } + LOperand* context() { return inputs_[0]; } LOperand* size() { return inputs_[1]; } LOperand* temp1() { return temps_[0]; } LOperand* temp2() { return temps_[1]; } @@ -2466,15 +2546,27 @@ class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 2> { }; -class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 0, 0> { +class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> { public: + explicit LRegExpLiteral(LOperand* context) { + inputs_[0] = context; + } + + LOperand* context() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal") DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral) }; -class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 0, 0> { +class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> { public: + explicit LFunctionLiteral(LOperand* context) { + inputs_[0] = context; + } + + LOperand* context() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal") DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral) }; @@ -2493,13 +2585,15 @@ class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> { }; -class LTypeof V8_FINAL : public LTemplateInstruction<1, 1, 0> { +class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> { public: - explicit LTypeof(LOperand* value) { - inputs_[0] = value; + LTypeof(LOperand* context, LOperand* value) { + inputs_[0] = context; + inputs_[1] = value; } - LOperand* value() { return inputs_[0]; } + LOperand* context() { return inputs_[0]; } + LOperand* value() { return inputs_[1]; } DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof") }; @@ -2546,8 +2640,14 @@ class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> { }; -class LStackCheck V8_FINAL : public LTemplateInstruction<0, 0, 0> { +class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> { public: + explicit LStackCheck(LOperand* context) { + inputs_[0] = context; + } + + LOperand* context() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check") DECLARE_HYDROGEN_ACCESSOR(StackCheck) @@ -2558,13 +2658,15 @@ class LStackCheck V8_FINAL : public LTemplateInstruction<0, 0, 0> { }; -class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 1, 0> { +class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> { public: - explicit LForInPrepareMap(LOperand* object) { - inputs_[0] = object; + LForInPrepareMap(LOperand* context, LOperand* object) { + inputs_[0] = context; + inputs_[1] = object; } - LOperand* object() { return inputs_[0]; } + LOperand* context() { return inputs_[0]; } + LOperand* object() { return inputs_[1]; } DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map") }; @@ -2620,8 +2722,8 @@ class LPlatformChunk V8_FINAL : public LChunk { LPlatformChunk(CompilationInfo* info, HGraph* graph) : LChunk(info, graph) { } - int GetNextSpillIndex(bool is_double); - LOperand* GetNextSpillSlot(bool is_double); + int GetNextSpillIndex(RegisterKind kind); + LOperand* GetNextSpillSlot(RegisterKind kind); }; @@ -2645,6 +2747,8 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED { // Build the sequence for the graph. LPlatformChunk* Build(); + LInstruction* CheckElideControlInstruction(HControlInstruction* instr); + // Declare methods that deal with the individual node types. #define DECLARE_DO(type) LInstruction* Do##type(H##type* node); HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) @@ -2778,7 +2882,7 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED { LInstruction* DoArithmeticD(Token::Value op, HArithmeticBinaryOperation* instr); LInstruction* DoArithmeticT(Token::Value op, - HArithmeticBinaryOperation* instr); + HBinaryOperation* instr); LPlatformChunk* chunk_; CompilationInfo* info_; diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc index 7f65023ed0..fbe8e171fa 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.cc +++ b/deps/v8/src/arm/lithium-codegen-arm.cc @@ -98,24 +98,6 @@ void LCodeGen::Abort(BailoutReason reason) { } -void LCodeGen::Comment(const char* format, ...) { - if (!FLAG_code_comments) return; - char buffer[4 * KB]; - StringBuilder builder(buffer, ARRAY_SIZE(buffer)); - va_list arguments; - va_start(arguments, format); - builder.AddFormattedList(format, arguments); - va_end(arguments); - - // Copy the string before recording it in the assembler to avoid - // issues when the stack allocated buffer goes out of scope. - size_t length = builder.position(); - Vector<char> copy = Vector<char>::New(length + 1); - OS::MemCopy(copy.start(), builder.Finalize(), copy.length()); - masm()->RecordComment(copy.start()); -} - - bool LCodeGen::GeneratePrologue() { ASSERT(is_generating()); @@ -139,33 +121,16 @@ bool LCodeGen::GeneratePrologue() { // receiver object). r5 is zero for method calls and non-zero for // function calls. if (!info_->is_classic_mode() || info_->is_native()) { - Label ok; __ cmp(r5, Operand::Zero()); - __ b(eq, &ok); int receiver_offset = scope()->num_parameters() * kPointerSize; __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); - __ str(r2, MemOperand(sp, receiver_offset)); - __ bind(&ok); + __ str(r2, MemOperand(sp, receiver_offset), ne); } } info()->set_prologue_offset(masm_->pc_offset()); if (NeedsEagerFrame()) { - if (info()->IsStub()) { - __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit()); - __ Push(Smi::FromInt(StackFrame::STUB)); - // Adjust FP to point to saved FP. - __ add(fp, sp, Operand(2 * kPointerSize)); - } else { - PredictableCodeSizeScope predictible_code_size_scope( - masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize); - // The following three instructions must remain together and unmodified - // for code aging to work properly. - __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit()); - __ nop(ip.code()); - // Adjust FP to point to saved FP. - __ add(fp, sp, Operand(2 * kPointerSize)); - } + __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME); frame_is_built_ = true; info_->AddNoFrameRange(0, masm_->pc_offset()); } @@ -248,6 +213,8 @@ bool LCodeGen::GeneratePrologue() { // Trace the call. if (FLAG_trace && info()->IsOptimizing()) { + // We have not executed any compiled code yet, so cp still holds the + // incoming context. __ CallRuntime(Runtime::kTraceEnter, 0); } return !is_aborted(); @@ -269,45 +236,15 @@ void LCodeGen::GenerateOsrPrologue() { } -bool LCodeGen::GenerateBody() { - ASSERT(is_generating()); - bool emit_instructions = true; - for (current_instruction_ = 0; - !is_aborted() && current_instruction_ < instructions_->length(); - current_instruction_++) { - LInstruction* instr = instructions_->at(current_instruction_); - - // Don't emit code for basic blocks with a replacement. - if (instr->IsLabel()) { - emit_instructions = !LLabel::cast(instr)->HasReplacement(); - } - if (!emit_instructions) continue; - - if (FLAG_code_comments && instr->HasInterestingComment(this)) { - Comment(";;; <@%d,#%d> %s", - current_instruction_, - instr->hydrogen_value()->id(), - instr->Mnemonic()); - } - - RecordAndUpdatePosition(instr->position()); - - instr->CompileToNative(this); - } - EnsureSpaceForLazyDeopt(); - last_lazy_deopt_pc_ = masm()->pc_offset(); - return !is_aborted(); -} - - bool LCodeGen::GenerateDeferredCode() { ASSERT(is_generating()); if (deferred_.length() > 0) { for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { LDeferredCode* code = deferred_[i]; - int pos = instructions_->at(code->instruction_index())->position(); - RecordAndUpdatePosition(pos); + HValue* value = + instructions_->at(code->instruction_index())->hydrogen_value(); + RecordAndWritePosition(value->position()); Comment(";;; <@%d,#%d> " "-------------------- Deferred %s --------------------", @@ -448,7 +385,7 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) { Abort(kEmitLoadRegisterUnsupportedDoubleImmediate); } else { ASSERT(r.IsSmiOrTagged()); - __ LoadObject(scratch, literal); + __ Move(scratch, literal); } return scratch; } else if (op->IsStackSlot() || op->IsArgument()) { @@ -727,13 +664,11 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code, LInstruction* instr, SafepointMode safepoint_mode, TargetAddressStorageMode storage_mode) { - EnsureSpaceForLazyDeopt(); + EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); ASSERT(instr != NULL); // Block literal pool emission to ensure nop indicating no inlined smi code // is in the correct position. Assembler::BlockConstPoolScope block_const_pool(masm()); - LPointerMap* pointers = instr->pointer_map(); - RecordPosition(pointers->position()); __ Call(code, mode, TypeFeedbackId::None(), al, storage_mode); RecordSafepointWithLazyDeopt(instr, safepoint_mode); @@ -748,20 +683,36 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code, void LCodeGen::CallRuntime(const Runtime::Function* function, int num_arguments, - LInstruction* instr) { + LInstruction* instr, + SaveFPRegsMode save_doubles) { ASSERT(instr != NULL); - LPointerMap* pointers = instr->pointer_map(); - ASSERT(pointers != NULL); - RecordPosition(pointers->position()); - __ CallRuntime(function, num_arguments); + __ CallRuntime(function, num_arguments, save_doubles); + RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); } +void LCodeGen::LoadContextFromDeferred(LOperand* context) { + if (context->IsRegister()) { + __ Move(cp, ToRegister(context)); + } else if (context->IsStackSlot()) { + __ ldr(cp, ToMemOperand(context)); + } else if (context->IsConstantOperand()) { + HConstant* constant = + chunk_->LookupConstant(LConstantOperand::cast(context)); + __ Move(cp, Handle<Object>::cast(constant->handle(isolate()))); + } else { + UNREACHABLE(); + } +} + + void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, int argc, - LInstruction* instr) { + LInstruction* instr, + LOperand* context) { + LoadContextFromDeferred(context); __ CallRuntimeSaveDoubles(id); RecordSafepointWithRegisters( instr->pointer_map(), argc, Safepoint::kNoLazyDeopt); @@ -862,26 +813,31 @@ void LCodeGen::DeoptimizeIf(Condition condition, void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) { ZoneList<Handle<Map> > maps(1, zone()); + ZoneList<Handle<JSObject> > objects(1, zone()); int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT); for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) { - RelocInfo::Mode mode = it.rinfo()->rmode(); - if (mode == RelocInfo::EMBEDDED_OBJECT && - it.rinfo()->target_object()->IsMap()) { - Handle<Map> map(Map::cast(it.rinfo()->target_object())); - if (map->CanTransition()) { + if (Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) { + if (it.rinfo()->target_object()->IsMap()) { + Handle<Map> map(Map::cast(it.rinfo()->target_object())); maps.Add(map, zone()); + } else if (it.rinfo()->target_object()->IsJSObject()) { + Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object())); + objects.Add(object, zone()); } } } #ifdef VERIFY_HEAP - // This disables verification of weak embedded maps after full GC. + // This disables verification of weak embedded objects after full GC. // AddDependentCode can cause a GC, which would observe the state where // this code is not yet in the depended code lists of the embedded maps. - NoWeakEmbeddedMapsVerificationScope disable_verification_of_embedded_maps; + NoWeakObjectVerificationScope disable_verification_of_embedded_objects; #endif for (int i = 0; i < maps.length(); i++) { maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code); } + for (int i = 0; i < objects.length(); i++) { + AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code); + } } @@ -977,10 +933,6 @@ void LCodeGen::RecordSafepoint( safepoint.DefinePointerRegister(ToRegister(pointer), zone()); } } - if (kind & Safepoint::kWithRegisters) { - // Register cp always contains a pointer to the context. - safepoint.DefinePointerRegister(cp, zone()); - } } @@ -991,7 +943,7 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers, void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) { - LPointerMap empty_pointers(RelocInfo::kNoPosition, zone()); + LPointerMap empty_pointers(zone()); RecordSafepoint(&empty_pointers, deopt_mode); } @@ -1013,17 +965,10 @@ void LCodeGen::RecordSafepointWithRegistersAndDoubles( } -void LCodeGen::RecordPosition(int position) { +void LCodeGen::RecordAndWritePosition(int position) { if (position == RelocInfo::kNoPosition) return; masm()->positions_recorder()->RecordPosition(position); -} - - -void LCodeGen::RecordAndUpdatePosition(int position) { - if (position >= 0 && position != old_position_) { - masm()->positions_recorder()->RecordPosition(position); - old_position_ = position; - } + masm()->positions_recorder()->WriteRecordedPositions(); } @@ -1073,6 +1018,7 @@ void LCodeGen::DoParameter(LParameter* instr) { void LCodeGen::DoCallStub(LCallStub* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); ASSERT(ToRegister(instr->result()).is(r0)); switch (instr->hydrogen()->major_key()) { case CodeStub::RegExpConstructResult: { @@ -1090,11 +1036,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) { CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); break; } - case CodeStub::NumberToString: { - NumberToStringStub stub; - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); - break; - } case CodeStub::StringCompare: { StringCompareStub stub; CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); @@ -1383,7 +1324,8 @@ void LCodeGen::EmitSignedIntegerDivisionByConstant( void LCodeGen::DoDivI(LDivI* instr) { if (instr->hydrogen()->HasPowerOf2Divisor()) { - Register dividend = ToRegister(instr->left()); + const Register dividend = ToRegister(instr->left()); + const Register result = ToRegister(instr->result()); int32_t divisor = instr->hydrogen()->right()->GetInteger32Constant(); int32_t test_value = 0; int32_t power = 0; @@ -1394,7 +1336,7 @@ void LCodeGen::DoDivI(LDivI* instr) { } else { // Check for (0 / -x) that will produce negative zero. if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - __ tst(dividend, Operand(dividend)); + __ cmp(dividend, Operand::Zero()); DeoptimizeIf(eq, instr->environment()); } // Check for (kMinInt / -1). @@ -1409,20 +1351,26 @@ void LCodeGen::DoDivI(LDivI* instr) { if (test_value != 0) { if (instr->hydrogen()->CheckFlag( HInstruction::kAllUsesTruncatingToInt32)) { - __ cmp(dividend, Operand(0)); - __ rsb(dividend, dividend, Operand(0), LeaveCC, lt); - __ mov(dividend, Operand(dividend, ASR, power)); - if (divisor > 0) __ rsb(dividend, dividend, Operand(0), LeaveCC, lt); - if (divisor < 0) __ rsb(dividend, dividend, Operand(0), LeaveCC, gt); + __ sub(result, dividend, Operand::Zero(), SetCC); + __ rsb(result, result, Operand::Zero(), LeaveCC, lt); + __ mov(result, Operand(result, ASR, power)); + if (divisor > 0) __ rsb(result, result, Operand::Zero(), LeaveCC, lt); + if (divisor < 0) __ rsb(result, result, Operand::Zero(), LeaveCC, gt); return; // Don't fall through to "__ rsb" below. } else { // Deoptimize if remainder is not 0. __ tst(dividend, Operand(test_value)); DeoptimizeIf(ne, instr->environment()); - __ mov(dividend, Operand(dividend, ASR, power)); + __ mov(result, Operand(dividend, ASR, power)); + if (divisor < 0) __ rsb(result, result, Operand(0)); + } + } else { + if (divisor < 0) { + __ rsb(result, dividend, Operand(0)); + } else { + __ Move(result, dividend); } } - if (divisor < 0) __ rsb(dividend, dividend, Operand(0)); return; } @@ -1439,12 +1387,15 @@ void LCodeGen::DoDivI(LDivI* instr) { // Check for (0 / -x) that will produce negative zero. if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - Label left_not_zero; + Label positive; + if (!instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { + // Do the test only if it hadn't be done above. + __ cmp(right, Operand::Zero()); + } + __ b(pl, &positive); __ cmp(left, Operand::Zero()); - __ b(ne, &left_not_zero); - __ cmp(right, Operand::Zero()); - DeoptimizeIf(mi, instr->environment()); - __ bind(&left_not_zero); + DeoptimizeIf(eq, instr->environment()); + __ bind(&positive); } // Check for (kMinInt / -1). @@ -1886,7 +1837,7 @@ void LCodeGen::DoConstantE(LConstantE* instr) { void LCodeGen::DoConstantT(LConstantT* instr) { Handle<Object> value = instr->value(isolate()); AllowDeferredHandleDereference smi_check; - __ LoadObject(ToRegister(instr->result()), value); + __ Move(ToRegister(instr->result()), value); } @@ -1975,32 +1926,42 @@ void LCodeGen::DoDateField(LDateField* instr) { void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { Register string = ToRegister(instr->string()); - Register index = ToRegister(instr->index()); + LOperand* index_op = instr->index(); Register value = ToRegister(instr->value()); + Register scratch = scratch0(); String::Encoding encoding = instr->encoding(); if (FLAG_debug_code) { - __ ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset)); - __ ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset)); + __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset)); + __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); - __ and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask)); + __ and_(scratch, scratch, + Operand(kStringRepresentationMask | kStringEncodingMask)); static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; - __ cmp(ip, Operand(encoding == String::ONE_BYTE_ENCODING - ? one_byte_seq_type : two_byte_seq_type)); + __ cmp(scratch, Operand(encoding == String::ONE_BYTE_ENCODING + ? one_byte_seq_type : two_byte_seq_type)); __ Check(eq, kUnexpectedStringType); } - __ add(ip, - string, - Operand(SeqString::kHeaderSize - kHeapObjectTag)); - if (encoding == String::ONE_BYTE_ENCODING) { - __ strb(value, MemOperand(ip, index)); + if (index_op->IsConstantOperand()) { + int constant_index = ToInteger32(LConstantOperand::cast(index_op)); + if (encoding == String::ONE_BYTE_ENCODING) { + __ strb(value, + FieldMemOperand(string, SeqString::kHeaderSize + constant_index)); + } else { + __ strh(value, + FieldMemOperand(string, SeqString::kHeaderSize + constant_index * 2)); + } } else { - // MemOperand with ip as the base register is not allowed for strh, so - // we do the address calculation explicitly. - __ add(ip, ip, Operand(index, LSL, 1)); - __ strh(value, MemOperand(ip)); + Register index = ToRegister(index_op); + if (encoding == String::ONE_BYTE_ENCODING) { + __ add(scratch, string, Operand(index)); + __ strb(value, FieldMemOperand(scratch, SeqString::kHeaderSize)); + } else { + __ add(scratch, string, Operand(index, LSL, 1)); + __ strh(value, FieldMemOperand(scratch, SeqString::kHeaderSize)); + } } } @@ -2008,6 +1969,7 @@ void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { void LCodeGen::DoThrow(LThrow* instr) { Register input_reg = EmitLoadRegister(instr->value(), ip); __ push(input_reg); + ASSERT(ToRegister(instr->context()).is(cp)); CallRuntime(Runtime::kThrow, 1, instr); if (FLAG_debug_code) { @@ -2145,6 +2107,7 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) { void LCodeGen::DoArithmeticT(LArithmeticT* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); ASSERT(ToRegister(instr->left()).is(r1)); ASSERT(ToRegister(instr->right()).is(r0)); ASSERT(ToRegister(instr->result()).is(r0)); @@ -2158,13 +2121,6 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) { } -int LCodeGen::GetNextEmittedBlock() const { - for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) { - if (!chunk_->GetLabel(i)->HasReplacement()) return i; - } - return -1; -} - template<class InstrType> void LCodeGen::EmitBranch(InstrType instr, Condition condition) { int left_block = instr->TrueDestination(chunk_); @@ -2197,25 +2153,6 @@ void LCodeGen::DoDebugBreak(LDebugBreak* instr) { } -void LCodeGen::DoIsNumberAndBranch(LIsNumberAndBranch* instr) { - Representation r = instr->hydrogen()->value()->representation(); - if (r.IsSmiOrInteger32() || r.IsDouble()) { - EmitBranch(instr, al); - } else { - ASSERT(r.IsTagged()); - Register reg = ToRegister(instr->value()); - HType type = instr->hydrogen()->value()->type(); - if (type.IsTaggedNumber()) { - EmitBranch(instr, al); - } - __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); - __ ldr(scratch0(), FieldMemOperand(reg, HeapObject::kMapOffset)); - __ CompareRoot(scratch0(), Heap::kHeapNumberMapRootIndex); - EmitBranch(instr, eq); - } -} - - void LCodeGen::DoBranch(LBranch* instr) { Representation r = instr->hydrogen()->value()->representation(); if (r.IsInteger32() || r.IsSmi()) { @@ -2371,6 +2308,10 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { case Token::EQ_STRICT: cond = eq; break; + case Token::NE: + case Token::NE_STRICT: + cond = ne; + break; case Token::LT: cond = is_unsigned ? lo : lt; break; @@ -2575,6 +2516,7 @@ static Condition ComputeCompareCondition(Token::Value op) { void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); Token::Value op = instr->op(); Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); @@ -2735,6 +2677,7 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { void LCodeGen::DoInstanceOf(LInstanceOf* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); ASSERT(ToRegister(instr->left()).is(r0)); // Object is in r0. ASSERT(ToRegister(instr->right()).is(r1)); // Function is in r1. @@ -2844,13 +2787,14 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, InstanceofStub stub(flags); PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); + LoadContextFromDeferred(instr->context()); // Get the temp register reserved by the instruction. This needs to be r4 as // its slot of the pushing of safepoint registers is used to communicate the // offset to the location of the map check. Register temp = ToRegister(instr->temp()); ASSERT(temp.is(r4)); - __ LoadHeapObject(InstanceofStub::right(), instr->function()); + __ Move(InstanceofStub::right(), instr->function()); static const int kAdditionalDelta = 5; // Make sure that code size is predicable, since we use specific constants // offsets in the code to find embedded values.. @@ -2879,15 +2823,8 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, } -void LCodeGen::DoInstanceSize(LInstanceSize* instr) { - Register object = ToRegister(instr->object()); - Register result = ToRegister(instr->result()); - __ ldr(result, FieldMemOperand(object, HeapObject::kMapOffset)); - __ ldrb(result, FieldMemOperand(result, Map::kInstanceSizeOffset)); -} - - void LCodeGen::DoCmpT(LCmpT* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); Token::Value op = instr->op(); Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); @@ -2908,8 +2845,11 @@ void LCodeGen::DoCmpT(LCmpT* instr) { void LCodeGen::DoReturn(LReturn* instr) { if (FLAG_trace && info()->IsOptimizing()) { // Push the return value on the stack as the parameter. - // Runtime::TraceExit returns its parameter in r0. + // Runtime::TraceExit returns its parameter in r0. We're leaving the code + // managed by the register allocator and tearing down the frame, it's + // safe to write to the context register. __ push(r0); + __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); __ CallRuntime(Runtime::kTraceExit, 1); } if (info()->saves_caller_doubles()) { @@ -2953,7 +2893,7 @@ void LCodeGen::DoReturn(LReturn* instr) { void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { Register result = ToRegister(instr->result()); - __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell()))); + __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell().handle()))); __ ldr(result, FieldMemOperand(ip, Cell::kValueOffset)); if (instr->hydrogen()->RequiresHoleCheck()) { __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); @@ -2964,6 +2904,7 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); ASSERT(ToRegister(instr->global_object()).is(r0)); ASSERT(ToRegister(instr->result()).is(r0)); @@ -2980,7 +2921,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { Register cell = scratch0(); // Load the cell. - __ mov(cell, Operand(instr->hydrogen()->cell())); + __ mov(cell, Operand(instr->hydrogen()->cell().handle())); // If the cell we are storing to contains the hole it could have // been deleted from the property dictionary. In that case, we need @@ -3001,6 +2942,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); ASSERT(ToRegister(instr->global_object()).is(r1)); ASSERT(ToRegister(instr->value()).is(r0)); @@ -3073,7 +3015,12 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { if (access.IsExternalMemory()) { Register result = ToRegister(instr->result()); - __ ldr(result, MemOperand(object, offset)); + MemOperand operand = MemOperand(object, offset); + if (access.representation().IsByte()) { + __ ldrb(result, operand); + } else { + __ ldr(result, operand); + } return; } @@ -3084,16 +3031,21 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { } Register result = ToRegister(instr->result()); - if (access.IsInobject()) { - __ ldr(result, FieldMemOperand(object, offset)); - } else { + if (!access.IsInobject()) { __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); - __ ldr(result, FieldMemOperand(result, offset)); + object = result; + } + MemOperand operand = FieldMemOperand(object, offset); + if (access.representation().IsByte()) { + __ ldrb(result, operand); + } else { + __ ldr(result, operand); } } void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); ASSERT(ToRegister(instr->object()).is(r0)); ASSERT(ToRegister(instr->result()).is(r0)); @@ -3148,6 +3100,12 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { } +void LCodeGen::DoLoadRoot(LLoadRoot* instr) { + Register result = ToRegister(instr->result()); + __ LoadRoot(result, instr->index()); +} + + void LCodeGen::DoLoadExternalArrayPointer( LLoadExternalArrayPointer* instr) { Register to_reg = ToRegister(instr->result()); @@ -3265,27 +3223,30 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { Register scratch = scratch0(); int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); - int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) - ? (element_size_shift - kSmiTagSize) : element_size_shift; - int constant_key = 0; + + int base_offset = + FixedDoubleArray::kHeaderSize - kHeapObjectTag + + (instr->additional_index() << element_size_shift); if (key_is_constant) { - constant_key = ToInteger32(LConstantOperand::cast(instr->key())); + int constant_key = ToInteger32(LConstantOperand::cast(instr->key())); if (constant_key & 0xF0000000) { Abort(kArrayIndexConstantValueTooBig); } - } else { - key = ToRegister(instr->key()); + base_offset += constant_key << element_size_shift; } + __ add(scratch, elements, Operand(base_offset)); - int base_offset = (FixedDoubleArray::kHeaderSize - kHeapObjectTag) + - ((constant_key + instr->additional_index()) << element_size_shift); if (!key_is_constant) { - __ add(elements, elements, Operand(key, LSL, shift_size)); + key = ToRegister(instr->key()); + int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) + ? (element_size_shift - kSmiTagSize) : element_size_shift; + __ add(scratch, scratch, Operand(key, LSL, shift_size)); } - __ add(elements, elements, Operand(base_offset)); - __ vldr(result, elements, 0); + + __ vldr(result, scratch, 0); + if (instr->hydrogen()->RequiresHoleCheck()) { - __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); + __ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32))); __ cmp(scratch, Operand(kHoleNanUpper32)); DeoptimizeIf(eq, instr->environment()); } @@ -3305,7 +3266,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { instr->additional_index()); store_base = elements; } else { - Register key = EmitLoadRegister(instr->key(), scratch0()); + Register key = ToRegister(instr->key()); // Even though the HLoadKeyed instruction forces the input // representation for the key to be an integer, the input gets replaced // during bound check elimination with the index argument to the bounds @@ -3381,6 +3342,7 @@ MemOperand LCodeGen::PrepareKeyedOperand(Register key, void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); ASSERT(ToRegister(instr->object()).is(r1)); ASSERT(ToRegister(instr->key()).is(r0)); @@ -3517,7 +3479,6 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) { __ bind(&invoke); ASSERT(instr->HasPointerMap()); LPointerMap* pointers = instr->pointer_map(); - RecordPosition(pointers->position()); SafepointGenerator safepoint_generator( this, pointers, Safepoint::kLazyDeopt); // The number of arguments is stored in receiver which is r0, as expected @@ -3525,7 +3486,6 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) { ParameterCount actual(receiver); __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator, CALL_AS_METHOD); - __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); } @@ -3554,11 +3514,11 @@ void LCodeGen::DoThisFunction(LThisFunction* instr) { void LCodeGen::DoContext(LContext* instr) { // If there is a non-return use, the context must be moved to a register. Register result = ToRegister(instr->result()); - for (HUseIterator it(instr->hydrogen()->uses()); !it.Done(); it.Advance()) { - if (!it.value()->IsReturn()) { - __ mov(result, cp); - return; - } + if (info()->IsOptimizing()) { + __ ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset)); + } else { + // If there is no frame, the context must be in cp. + ASSERT(result.is(cp)); } } @@ -3572,8 +3532,9 @@ void LCodeGen::DoOuterContext(LOuterContext* instr) { void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); __ push(cp); // The context is the first argument. - __ LoadHeapObject(scratch0(), instr->hydrogen()->pairs()); + __ Move(scratch0(), instr->hydrogen()->pairs()); __ push(scratch0()); __ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags()))); __ push(scratch0()); @@ -3582,8 +3543,9 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { void LCodeGen::DoGlobalObject(LGlobalObject* instr) { + Register context = ToRegister(instr->context()); Register result = ToRegister(instr->result()); - __ ldr(result, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); + __ ldr(result, ContextOperand(context, Context::GLOBAL_OBJECT_INDEX)); } @@ -3606,11 +3568,10 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function, dont_adapt_arguments || formal_parameter_count == arity; LPointerMap* pointers = instr->pointer_map(); - RecordPosition(pointers->position()); if (can_invoke_directly) { if (r1_state == R1_UNINITIALIZED) { - __ LoadHeapObject(r1, function); + __ Move(r1, function); } // Change context. @@ -3636,9 +3597,6 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function, __ InvokeFunction( function, expected, count, CALL_FUNCTION, generator, call_kind); } - - // Restore context. - __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); } @@ -3654,6 +3612,8 @@ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) { void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { + ASSERT(instr->context() != NULL); + ASSERT(ToRegister(instr->context()).is(cp)); Register input = ToRegister(instr->value()); Register result = ToRegister(instr->result()); Register scratch = scratch0(); @@ -3697,7 +3657,8 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { // Slow case: Call the runtime system to do the number allocation. __ bind(&slow); - CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); + CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr, + instr->context()); // Set the pointer to the new heap number in tmp. if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0)); // Restore input_reg after call to runtime. @@ -3872,9 +3833,9 @@ void LCodeGen::DoPower(LPower* instr) { } else if (exponent_type.IsTagged()) { Label no_deopt; __ JumpIfSmi(r2, &no_deopt); - __ ldr(r7, FieldMemOperand(r2, HeapObject::kMapOffset)); + __ ldr(r6, FieldMemOperand(r2, HeapObject::kMapOffset)); __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); - __ cmp(r7, Operand(ip)); + __ cmp(r6, Operand(ip)); DeoptimizeIf(ne, instr->environment()); __ bind(&no_deopt); MathPowStub stub(MathPowStub::TAGGED); @@ -3968,6 +3929,9 @@ void LCodeGen::DoMathExp(LMathExp* instr) { void LCodeGen::DoMathLog(LMathLog* instr) { ASSERT(ToDoubleRegister(instr->result()).is(d2)); + // Set the context register to a GC-safe fake value. Clobbering it is + // OK because this instruction is marked as a call. + __ mov(cp, Operand::Zero()); TranscendentalCacheStub stub(TranscendentalCache::LOG, TranscendentalCacheStub::UNTAGGED); CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); @@ -3976,6 +3940,9 @@ void LCodeGen::DoMathLog(LMathLog* instr) { void LCodeGen::DoMathTan(LMathTan* instr) { ASSERT(ToDoubleRegister(instr->result()).is(d2)); + // Set the context register to a GC-safe fake value. Clobbering it is + // OK because this instruction is marked as a call. + __ mov(cp, Operand::Zero()); TranscendentalCacheStub stub(TranscendentalCache::TAN, TranscendentalCacheStub::UNTAGGED); CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); @@ -3984,6 +3951,9 @@ void LCodeGen::DoMathTan(LMathTan* instr) { void LCodeGen::DoMathCos(LMathCos* instr) { ASSERT(ToDoubleRegister(instr->result()).is(d2)); + // Set the context register to a GC-safe fake value. Clobbering it is + // OK because this instruction is marked as a call. + __ mov(cp, Operand::Zero()); TranscendentalCacheStub stub(TranscendentalCache::COS, TranscendentalCacheStub::UNTAGGED); CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); @@ -3992,6 +3962,9 @@ void LCodeGen::DoMathCos(LMathCos* instr) { void LCodeGen::DoMathSin(LMathSin* instr) { ASSERT(ToDoubleRegister(instr->result()).is(d2)); + // Set the context register to a GC-safe fake value. Clobbering it is + // OK because this instruction is marked as a call. + __ mov(cp, Operand::Zero()); TranscendentalCacheStub stub(TranscendentalCache::SIN, TranscendentalCacheStub::UNTAGGED); CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); @@ -3999,17 +3972,16 @@ void LCodeGen::DoMathSin(LMathSin* instr) { void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); ASSERT(ToRegister(instr->function()).is(r1)); ASSERT(instr->HasPointerMap()); Handle<JSFunction> known_function = instr->hydrogen()->known_function(); if (known_function.is_null()) { LPointerMap* pointers = instr->pointer_map(); - RecordPosition(pointers->position()); SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); ParameterCount count(instr->arity()); __ InvokeFunction(r1, count, CALL_FUNCTION, generator, CALL_AS_METHOD); - __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); } else { CallKnownFunction(known_function, instr->hydrogen()->formal_parameter_count(), @@ -4022,17 +3994,18 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { void LCodeGen::DoCallKeyed(LCallKeyed* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); ASSERT(ToRegister(instr->result()).is(r0)); int arity = instr->arity(); Handle<Code> ic = isolate()->stub_cache()->ComputeKeyedCallInitialize(arity); CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); - __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); } void LCodeGen::DoCallNamed(LCallNamed* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); ASSERT(ToRegister(instr->result()).is(r0)); int arity = instr->arity(); @@ -4041,23 +4014,22 @@ void LCodeGen::DoCallNamed(LCallNamed* instr) { isolate()->stub_cache()->ComputeCallInitialize(arity, mode); __ mov(r2, Operand(instr->name())); CallCode(ic, mode, instr, NEVER_INLINE_TARGET_ADDRESS); - // Restore context register. - __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); } void LCodeGen::DoCallFunction(LCallFunction* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); ASSERT(ToRegister(instr->function()).is(r1)); ASSERT(ToRegister(instr->result()).is(r0)); int arity = instr->arity(); CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS); CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); - __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); } void LCodeGen::DoCallGlobal(LCallGlobal* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); ASSERT(ToRegister(instr->result()).is(r0)); int arity = instr->arity(); @@ -4066,7 +4038,6 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) { isolate()->stub_cache()->ComputeCallInitialize(arity, mode); __ mov(r2, Operand(instr->name())); CallCode(ic, mode, instr, NEVER_INLINE_TARGET_ADDRESS); - __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); } @@ -4082,6 +4053,7 @@ void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) { void LCodeGen::DoCallNew(LCallNew* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); ASSERT(ToRegister(instr->constructor()).is(r1)); ASSERT(ToRegister(instr->result()).is(r0)); @@ -4095,6 +4067,7 @@ void LCodeGen::DoCallNew(LCallNew* instr) { void LCodeGen::DoCallNewArray(LCallNewArray* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); ASSERT(ToRegister(instr->constructor()).is(r1)); ASSERT(ToRegister(instr->result()).is(r0)); @@ -4169,7 +4142,12 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { if (access.IsExternalMemory()) { Register value = ToRegister(instr->value()); - __ str(value, MemOperand(object, offset)); + MemOperand operand = MemOperand(object, offset); + if (representation.IsByte()) { + __ strb(value, operand); + } else { + __ str(value, operand); + } return; } @@ -4214,7 +4192,12 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { instr->hydrogen()->value()->IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; if (access.IsInobject()) { - __ str(value, FieldMemOperand(object, offset)); + MemOperand operand = FieldMemOperand(object, offset); + if (representation.IsByte()) { + __ strb(value, operand); + } else { + __ str(value, operand); + } if (instr->hydrogen()->NeedsWriteBarrier()) { // Update the write barrier for the object for in-object properties. __ RecordWriteField(object, @@ -4228,7 +4211,12 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { } } else { __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset)); - __ str(value, FieldMemOperand(scratch, offset)); + MemOperand operand = FieldMemOperand(scratch, offset); + if (representation.IsByte()) { + __ strb(value, operand); + } else { + __ str(value, operand); + } if (instr->hydrogen()->NeedsWriteBarrier()) { // Update the write barrier for the properties array. // object is used as a scratch register. @@ -4246,6 +4234,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); ASSERT(ToRegister(instr->object()).is(r1)); ASSERT(ToRegister(instr->value()).is(r0)); @@ -4311,16 +4300,23 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { + Register address = scratch0(); DwVfpRegister value(ToDoubleRegister(instr->value())); - Operand operand(key_is_constant - ? Operand(constant_key << element_size_shift) - : Operand(key, LSL, shift_size)); - __ add(scratch0(), external_pointer, operand); + if (key_is_constant) { + if (constant_key != 0) { + __ add(address, external_pointer, + Operand(constant_key << element_size_shift)); + } else { + address = external_pointer; + } + } else { + __ add(address, external_pointer, Operand(key, LSL, shift_size)); + } if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { __ vcvt_f32_f64(double_scratch0().low(), value); - __ vstr(double_scratch0().low(), scratch0(), additional_offset); + __ vstr(double_scratch0().low(), address, additional_offset); } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS - __ vstr(value, scratch0(), additional_offset); + __ vstr(value, address, additional_offset); } } else { Register value(ToRegister(instr->value())); @@ -4362,32 +4358,28 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { DwVfpRegister value = ToDoubleRegister(instr->value()); Register elements = ToRegister(instr->elements()); - Register key = no_reg; Register scratch = scratch0(); + DwVfpRegister double_scratch = double_scratch0(); bool key_is_constant = instr->key()->IsConstantOperand(); - int constant_key = 0; // Calculate the effective address of the slot in the array to store the // double value. + int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); if (key_is_constant) { - constant_key = ToInteger32(LConstantOperand::cast(instr->key())); + int constant_key = ToInteger32(LConstantOperand::cast(instr->key())); if (constant_key & 0xF0000000) { Abort(kArrayIndexConstantValueTooBig); } + __ add(scratch, elements, + Operand((constant_key << element_size_shift) + + FixedDoubleArray::kHeaderSize - kHeapObjectTag)); } else { - key = ToRegister(instr->key()); - } - int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); - int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) - ? (element_size_shift - kSmiTagSize) : element_size_shift; - Operand operand = key_is_constant - ? Operand((constant_key << element_size_shift) + - FixedDoubleArray::kHeaderSize - kHeapObjectTag) - : Operand(key, LSL, shift_size); - __ add(scratch, elements, operand); - if (!key_is_constant) { - __ add(scratch, scratch, + int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) + ? (element_size_shift - kSmiTagSize) : element_size_shift; + __ add(scratch, elements, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); + __ add(scratch, scratch, + Operand(ToRegister(instr->key()), LSL, shift_size)); } if (instr->NeedsCanonicalization()) { @@ -4397,9 +4389,12 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { __ tst(ip, Operand(kVFPDefaultNaNModeControlBit)); __ Assert(ne, kDefaultNaNModeNotSet); } - __ VFPCanonicalizeNaN(value); + __ VFPCanonicalizeNaN(double_scratch, value); + __ vstr(double_scratch, scratch, + instr->additional_index() << element_size_shift); + } else { + __ vstr(value, scratch, instr->additional_index() << element_size_shift); } - __ vstr(value, scratch, instr->additional_index() << element_size_shift); } @@ -4463,6 +4458,7 @@ void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) { void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); ASSERT(ToRegister(instr->object()).is(r2)); ASSERT(ToRegister(instr->key()).is(r1)); ASSERT(ToRegister(instr->value()).is(r0)); @@ -4496,6 +4492,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg, scratch, GetLinkRegisterState(), kDontSaveFPRegs); } else { + ASSERT(ToRegister(instr->context()).is(cp)); PushSafepointRegistersScope scope( this, Safepoint::kWithRegistersAndDoubles); __ Move(r0, object_reg); @@ -4512,12 +4509,15 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { Register object = ToRegister(instr->object()); Register temp = ToRegister(instr->temp()); - __ TestJSArrayForAllocationMemento(object, temp); + Label no_memento_found; + __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); DeoptimizeIf(eq, instr->environment()); + __ bind(&no_memento_found); } void LCodeGen::DoStringAdd(LStringAdd* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); __ push(ToRegister(instr->left())); __ push(ToRegister(instr->right())); StringAddStub stub(instr->hydrogen()->flags()); @@ -4573,7 +4573,8 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { __ SmiTag(index); __ push(index); } - CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr); + CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr, + instr->context()); __ AssertSmi(r0); __ SmiUntag(r0); __ StoreToSafepointRegisterSlot(r0, result); @@ -4625,7 +4626,7 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) { PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); __ SmiTag(char_code); __ push(char_code); - CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr); + CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context()); __ StoreToSafepointRegisterSlot(r0, result); } @@ -4649,9 +4650,7 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) { LOperand* input = instr->value(); - ASSERT(input->IsRegister()); LOperand* output = instr->result(); - ASSERT(output->IsRegister()); __ SmiTag(ToRegister(output), ToRegister(input), SetCC); if (!instr->hydrogen()->value()->HasRange() || !instr->hydrogen()->value()->range()->IsInSmiRange()) { @@ -4670,6 +4669,18 @@ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { } +void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) { + LOperand* input = instr->value(); + LOperand* output = instr->result(); + if (!instr->hydrogen()->value()->HasRange() || + !instr->hydrogen()->value()->range()->IsInSmiRange()) { + __ tst(ToRegister(input), Operand(0xc0000000)); + DeoptimizeIf(ne, instr->environment()); + } + __ SmiTag(ToRegister(output), ToRegister(input)); +} + + void LCodeGen::DoNumberTagI(LNumberTagI* instr) { class DeferredNumberTagI V8_FINAL : public LDeferredCode { public: @@ -4764,7 +4775,15 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, // integer value. __ mov(ip, Operand::Zero()); __ StoreToSafepointRegisterSlot(ip, dst); - CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); + // NumberTagI and NumberTagD use the context from the frame, rather than + // the environment's HContext or HInlinedContext value. + // They only call Runtime::kAllocateHeapNumber. + // The corresponding HChange instructions are added in a phase that does + // not have easy access to the local context. + __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); + __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); + RecordSafepointWithRegisters( + instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); __ Move(dst, r0); __ sub(dst, dst, Operand(kHeapObjectTag)); @@ -4820,7 +4839,15 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { __ mov(reg, Operand::Zero()); PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); - CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); + // NumberTagI and NumberTagD use the context from the frame, rather than + // the environment's HContext or HInlinedContext value. + // They only call Runtime::kAllocateHeapNumber. + // The corresponding HChange instructions are added in a phase that does + // not have easy access to the local context. + __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); + __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); + RecordSafepointWithRegisters( + instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); __ sub(r0, r0, Operand(kHeapObjectTag)); __ StoreToSafepointRegisterSlot(r0, reg); } @@ -4855,36 +4882,20 @@ void LCodeGen::EmitNumberUntagD(Register input_reg, Register scratch = scratch0(); SwVfpRegister flt_scratch = double_scratch0().low(); ASSERT(!result_reg.is(double_scratch0())); - - Label load_smi, heap_number, done; - + Label convert, load_smi, done; if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { // Smi check. __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); - // Heap number map check. __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); __ cmp(scratch, Operand(ip)); - if (!can_convert_undefined_to_nan) { - DeoptimizeIf(ne, env); + if (can_convert_undefined_to_nan) { + __ b(ne, &convert); } else { - Label heap_number, convert; - __ b(eq, &heap_number); - - // Convert undefined (and hole) to NaN. - __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); - __ cmp(input_reg, Operand(ip)); DeoptimizeIf(ne, env); - - __ bind(&convert); - __ LoadRoot(scratch, Heap::kNanValueRootIndex); - __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag); - __ jmp(&done); - - __ bind(&heap_number); } - // Heap number to double register conversion. + // load heap number __ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag); if (deoptimize_on_minus_zero) { __ VmovLow(scratch, result_reg); @@ -4895,11 +4906,20 @@ void LCodeGen::EmitNumberUntagD(Register input_reg, DeoptimizeIf(eq, env); } __ jmp(&done); + if (can_convert_undefined_to_nan) { + __ bind(&convert); + // Convert undefined (and hole) to NaN. + __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); + __ cmp(input_reg, Operand(ip)); + DeoptimizeIf(ne, env); + __ LoadRoot(scratch, Heap::kNanValueRootIndex); + __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag); + __ jmp(&done); + } } else { __ SmiUntag(scratch, input_reg); ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); } - // Smi to double register conversion __ bind(&load_smi); // scratch: untagged value of input_reg @@ -4935,18 +4955,33 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { if (instr->truncating()) { // Performs a truncating conversion of a floating point number as used by // the JS bitwise operations. - Label heap_number; - __ b(eq, &heap_number); - // Check for undefined. Undefined is converted to zero for truncating - // conversions. + Label no_heap_number, check_bools, check_false; + __ b(ne, &no_heap_number); + __ TruncateHeapNumberToI(input_reg, scratch2); + __ b(&done); + + // Check for Oddballs. Undefined/False is converted to zero and True to one + // for truncating conversions. + __ bind(&no_heap_number); __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); __ cmp(scratch2, Operand(ip)); - DeoptimizeIf(ne, instr->environment()); + __ b(ne, &check_bools); __ mov(input_reg, Operand::Zero()); __ b(&done); - __ bind(&heap_number); - __ TruncateHeapNumberToI(input_reg, scratch2); + __ bind(&check_bools); + __ LoadRoot(ip, Heap::kTrueValueRootIndex); + __ cmp(scratch2, Operand(ip)); + __ b(ne, &check_false); + __ mov(input_reg, Operand(1)); + __ b(&done); + + __ bind(&check_false); + __ LoadRoot(ip, Heap::kFalseValueRootIndex); + __ cmp(scratch2, Operand(ip)); + DeoptimizeIf(ne, instr->environment()); + __ mov(input_reg, Operand::Zero()); + __ b(&done); } else { // Deoptimize if we don't have a heap number. DeoptimizeIf(ne, instr->environment()); @@ -4987,15 +5022,19 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) { Register input_reg = ToRegister(input); - DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr); + if (instr->hydrogen()->value()->representation().IsSmi()) { + __ SmiUntag(input_reg); + } else { + DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr); - // Optimistically untag the input. - // If the input is a HeapObject, SmiUntag will set the carry flag. - __ SmiUntag(input_reg, SetCC); - // Branch to deferred code if the input was tagged. - // The deferred code will take care of restoring the tag. - __ b(cs, deferred->entry()); - __ bind(deferred->exit()); + // Optimistically untag the input. + // If the input is a HeapObject, SmiUntag will set the carry flag. + __ SmiUntag(input_reg, SetCC); + // Branch to deferred code if the input was tagged. + // The deferred code will take care of restoring the tag. + __ b(cs, deferred->entry()); + __ bind(deferred->exit()); + } } @@ -5133,7 +5172,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { void LCodeGen::DoCheckValue(LCheckValue* instr) { Register reg = ToRegister(instr->value()); - Handle<HeapObject> object = instr->hydrogen()->object(); + Handle<HeapObject> object = instr->hydrogen()->object().handle(); AllowDeferredHandleDereference smi_check; if (isolate()->heap()->InNewSpace(*object)) { Register reg = ToRegister(instr->value()); @@ -5152,7 +5191,10 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { { PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); __ push(object); - CallRuntimeFromDeferred(Runtime::kMigrateInstance, 1, instr); + __ mov(cp, Operand::Zero()); + __ CallRuntimeSaveDoubles(Runtime::kMigrateInstance); + RecordSafepointWithRegisters( + instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); __ StoreToSafepointRegisterSlot(r0, scratch0()); } __ tst(scratch0(), Operand(kSmiTagMask)); @@ -5185,7 +5227,6 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) { ASSERT(input->IsRegister()); Register reg = ToRegister(input); - SmallMapList* map_set = instr->hydrogen()->map_set(); __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); DeferredCheckMaps* deferred = NULL; @@ -5194,14 +5235,15 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) { __ bind(deferred->check_maps()); } + UniqueSet<Map> map_set = instr->hydrogen()->map_set(); Label success; - for (int i = 0; i < map_set->length() - 1; i++) { - Handle<Map> map = map_set->at(i); + for (int i = 0; i < map_set.size() - 1; i++) { + Handle<Map> map = map_set.at(i).handle(); __ CompareMap(map_reg, map, &success); __ b(eq, &success); } - Handle<Map> map = map_set->last(); + Handle<Map> map = map_set.at(map_set.size() - 1).handle(); __ CompareMap(map_reg, map, &success); if (instr->hydrogen()->has_migration_target()) { __ b(ne, deferred->entry()); @@ -5355,12 +5397,15 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) { if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation()); ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); - CallRuntimeFromDeferred(Runtime::kAllocateInOldPointerSpace, 1, instr); + CallRuntimeFromDeferred(Runtime::kAllocateInOldPointerSpace, 1, instr, + instr->context()); } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); - CallRuntimeFromDeferred(Runtime::kAllocateInOldDataSpace, 1, instr); + CallRuntimeFromDeferred(Runtime::kAllocateInOldDataSpace, 1, instr, + instr->context()); } else { - CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr); + CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr, + instr->context()); } __ StoreToSafepointRegisterSlot(r0, result); } @@ -5374,26 +5419,27 @@ void LCodeGen::DoToFastProperties(LToFastProperties* instr) { void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); Label materialized; // Registers will be used as follows: - // r7 = literals array. + // r6 = literals array. // r1 = regexp literal. // r0 = regexp literal clone. - // r2 and r4-r6 are used as temporaries. + // r2-5 are used as temporaries. int literal_offset = FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index()); - __ LoadHeapObject(r7, instr->hydrogen()->literals()); - __ ldr(r1, FieldMemOperand(r7, literal_offset)); + __ Move(r6, instr->hydrogen()->literals()); + __ ldr(r1, FieldMemOperand(r6, literal_offset)); __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); __ cmp(r1, ip); __ b(ne, &materialized); // Create regexp literal using runtime function // Result will be in r0. - __ mov(r6, Operand(Smi::FromInt(instr->hydrogen()->literal_index()))); - __ mov(r5, Operand(instr->hydrogen()->pattern())); - __ mov(r4, Operand(instr->hydrogen()->flags())); - __ Push(r7, r6, r5, r4); + __ mov(r5, Operand(Smi::FromInt(instr->hydrogen()->literal_index()))); + __ mov(r4, Operand(instr->hydrogen()->pattern())); + __ mov(r3, Operand(instr->hydrogen()->flags())); + __ Push(r6, r5, r4, r3); CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr); __ mov(r1, r0); @@ -5417,6 +5463,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); // Use the fast case closure allocation code that allocates in new // space for nested functions that don't need literals cloning. bool pretenure = instr->hydrogen()->pretenure(); @@ -5560,16 +5607,15 @@ void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) { } -void LCodeGen::EnsureSpaceForLazyDeopt() { +void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { if (info()->IsStub()) return; // Ensure that we have enough space after the previous lazy-bailout // instruction for patching the code here. int current_pc = masm()->pc_offset(); - int patch_size = Deoptimizer::patch_size(); - if (current_pc < last_lazy_deopt_pc_ + patch_size) { + if (current_pc < last_lazy_deopt_pc_ + space_needed) { // Block literal pool emission for duration of padding. Assembler::BlockConstPoolScope block_const_pool(masm()); - int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc; + int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; ASSERT_EQ(0, padding_size % Assembler::kInstrSize); while (padding_size > 0) { __ nop(); @@ -5580,7 +5626,7 @@ void LCodeGen::EnsureSpaceForLazyDeopt() { void LCodeGen::DoLazyBailout(LLazyBailout* instr) { - EnsureSpaceForLazyDeopt(); + EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); last_lazy_deopt_pc_ = masm()->pc_offset(); ASSERT(instr->HasEnvironment()); LEnvironment* env = instr->environment(); @@ -5611,6 +5657,7 @@ void LCodeGen::DoDummyUse(LDummyUse* instr) { void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); + LoadContextFromDeferred(instr->context()); __ CallRuntimeSaveDoubles(Runtime::kStackGuard); RecordSafepointWithLazyDeopt( instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); @@ -5644,10 +5691,12 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) { __ cmp(sp, Operand(ip)); __ b(hs, &done); PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize); + ASSERT(instr->context()->IsRegister()); + ASSERT(ToRegister(instr->context()).is(cp)); CallCode(isolate()->builtins()->StackCheck(), - RelocInfo::CODE_TARGET, - instr); - EnsureSpaceForLazyDeopt(); + RelocInfo::CODE_TARGET, + instr); + EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); last_lazy_deopt_pc_ = masm()->pc_offset(); __ bind(&done); RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); @@ -5660,7 +5709,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) { __ LoadRoot(ip, Heap::kStackLimitRootIndex); __ cmp(sp, Operand(ip)); __ b(lo, deferred_stack_check->entry()); - EnsureSpaceForLazyDeopt(); + EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); last_lazy_deopt_pc_ = masm()->pc_offset(); __ bind(instr->done_label()); deferred_stack_check->SetExit(instr->done_label()); diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h index 4b6b5ca8e3..a9b85c89cc 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.h +++ b/deps/v8/src/arm/lithium-codegen-arm.h @@ -32,6 +32,7 @@ #include "arm/lithium-gap-resolver-arm.h" #include "deoptimizer.h" +#include "lithium-codegen.h" #include "safepoint-table.h" #include "scopes.h" #include "v8utils.h" @@ -43,43 +44,26 @@ namespace internal { class LDeferredCode; class SafepointGenerator; -class LCodeGen V8_FINAL BASE_EMBEDDED { +class LCodeGen: public LCodeGenBase { public: LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) - : zone_(info->zone()), - chunk_(static_cast<LPlatformChunk*>(chunk)), - masm_(assembler), - info_(info), - current_block_(-1), - current_instruction_(-1), - instructions_(chunk->instructions()), + : LCodeGenBase(chunk, assembler, info), deoptimizations_(4, info->zone()), deopt_jump_table_(4, info->zone()), deoptimization_literals_(8, info->zone()), inlined_function_count_(0), scope_(info->scope()), - status_(UNUSED), translations_(info->zone()), deferred_(8, info->zone()), osr_pc_offset_(-1), - last_lazy_deopt_pc_(0), frame_is_built_(false), safepoints_(info->zone()), resolver_(this), - expected_safepoint_kind_(Safepoint::kSimple), - old_position_(RelocInfo::kNoPosition) { + expected_safepoint_kind_(Safepoint::kSimple) { PopulateDeoptimizationLiteralsWithInlinedFunctions(); } - // Simple accessors. - MacroAssembler* masm() const { return masm_; } - CompilationInfo* info() const { return info_; } - Isolate* isolate() const { return info_->isolate(); } - Factory* factory() const { return isolate()->factory(); } - Heap* heap() const { return isolate()->heap(); } - Zone* zone() const { return zone_; } - int LookupDestination(int block_id) const { return chunk()->LookupDestination(block_id); } @@ -178,30 +162,15 @@ class LCodeGen V8_FINAL BASE_EMBEDDED { #undef DECLARE_DO private: - enum Status { - UNUSED, - GENERATING, - DONE, - ABORTED - }; - - bool is_unused() const { return status_ == UNUSED; } - bool is_generating() const { return status_ == GENERATING; } - bool is_done() const { return status_ == DONE; } - bool is_aborted() const { return status_ == ABORTED; } - StrictModeFlag strict_mode_flag() const { return info()->is_classic_mode() ? kNonStrictMode : kStrictMode; } - LPlatformChunk* chunk() const { return chunk_; } Scope* scope() const { return scope_; } - HGraph* graph() const { return chunk()->graph(); } Register scratch0() { return r9; } LowDwVfpRegister double_scratch0() { return kScratchDoubleReg; } - int GetNextEmittedBlock() const; LInstruction* GetNextInstruction(); void EmitClassOfTest(Label* if_true, @@ -214,14 +183,12 @@ class LCodeGen V8_FINAL BASE_EMBEDDED { int GetStackSlotCount() const { return chunk()->spill_slot_count(); } void Abort(BailoutReason reason); - void FPRINTF_CHECKING Comment(const char* format, ...); void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } // Code generation passes. Returns true if code generation should // continue. bool GeneratePrologue(); - bool GenerateBody(); bool GenerateDeferredCode(); bool GenerateDeoptJumpTable(); bool GenerateSafepointTable(); @@ -249,7 +216,8 @@ class LCodeGen V8_FINAL BASE_EMBEDDED { void CallRuntime(const Runtime::Function* function, int num_arguments, - LInstruction* instr); + LInstruction* instr, + SaveFPRegsMode save_doubles = kDontSaveFPRegs); void CallRuntime(Runtime::FunctionId id, int num_arguments, @@ -258,9 +226,11 @@ class LCodeGen V8_FINAL BASE_EMBEDDED { CallRuntime(function, num_arguments, instr); } + void LoadContextFromDeferred(LOperand* context); void CallRuntimeFromDeferred(Runtime::FunctionId id, int argc, - LInstruction* instr); + LInstruction* instr, + LOperand* context); enum R1State { R1_UNINITIALIZED, @@ -276,8 +246,6 @@ class LCodeGen V8_FINAL BASE_EMBEDDED { CallKind call_kind, R1State r1_state); - void LoadHeapObject(Register result, Handle<HeapObject> object); - void RecordSafepointWithLazyDeopt(LInstruction* instr, SafepointMode safepoint_mode); @@ -320,8 +288,8 @@ class LCodeGen V8_FINAL BASE_EMBEDDED { void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers, int arguments, Safepoint::DeoptMode mode); - void RecordPosition(int position); - void RecordAndUpdatePosition(int position); + + void RecordAndWritePosition(int position) V8_OVERRIDE; static Condition TokenToCondition(Token::Value op, bool is_unsigned); void EmitGoto(int block); @@ -383,7 +351,7 @@ class LCodeGen V8_FINAL BASE_EMBEDDED { Register scratch, LEnvironment* environment); - void EnsureSpaceForLazyDeopt(); + void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE; void DoLoadKeyedExternalArray(LLoadKeyed* instr); void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr); void DoLoadKeyedFixedArray(LLoadKeyed* instr); @@ -391,24 +359,14 @@ class LCodeGen V8_FINAL BASE_EMBEDDED { void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr); void DoStoreKeyedFixedArray(LStoreKeyed* instr); - Zone* zone_; - LPlatformChunk* const chunk_; - MacroAssembler* const masm_; - CompilationInfo* const info_; - - int current_block_; - int current_instruction_; - const ZoneList<LInstruction*>* instructions_; ZoneList<LEnvironment*> deoptimizations_; ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_; ZoneList<Handle<Object> > deoptimization_literals_; int inlined_function_count_; Scope* const scope_; - Status status_; TranslationBuffer translations_; ZoneList<LDeferredCode*> deferred_; int osr_pc_offset_; - int last_lazy_deopt_pc_; bool frame_is_built_; // Builder that keeps track of safepoints in the code. The table @@ -420,8 +378,6 @@ class LCodeGen V8_FINAL BASE_EMBEDDED { Safepoint::Kind expected_safepoint_kind_; - int old_position_; - class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED { public: PushSafepointRegistersScope(LCodeGen* codegen, diff --git a/deps/v8/src/arm/lithium-gap-resolver-arm.cc b/deps/v8/src/arm/lithium-gap-resolver-arm.cc index 88ac7a2a21..0c6b2adadf 100644 --- a/deps/v8/src/arm/lithium-gap-resolver-arm.cc +++ b/deps/v8/src/arm/lithium-gap-resolver-arm.cc @@ -252,7 +252,7 @@ void LGapResolver::EmitMove(int index) { if (cgen_->IsInteger32(constant_source)) { __ mov(dst, Operand(cgen_->ToRepresentation(constant_source, r))); } else { - __ LoadObject(dst, cgen_->ToHandle(constant_source)); + __ Move(dst, cgen_->ToHandle(constant_source)); } } else if (destination->IsDoubleRegister()) { DwVfpRegister result = cgen_->ToDoubleRegister(destination); @@ -267,7 +267,7 @@ void LGapResolver::EmitMove(int index) { __ mov(kSavedValueRegister, Operand(cgen_->ToRepresentation(constant_source, r))); } else { - __ LoadObject(kSavedValueRegister, + __ Move(kSavedValueRegister, cgen_->ToHandle(constant_source)); } __ str(kSavedValueRegister, cgen_->ToMemOperand(destination)); diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index 7df785776d..d8771cb702 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -35,6 +35,7 @@ #include "codegen.h" #include "cpu-profiler.h" #include "debug.h" +#include "isolate-inl.h" #include "runtime.h" namespace v8 { @@ -233,7 +234,19 @@ void MacroAssembler::Push(Handle<Object> handle) { void MacroAssembler::Move(Register dst, Handle<Object> value) { - mov(dst, Operand(value)); + AllowDeferredHandleDereference smi_check; + if (value->IsSmi()) { + mov(dst, Operand(value)); + } else { + ASSERT(value->IsHeapObject()); + if (isolate()->heap()->InNewSpace(*value)) { + Handle<Cell> cell = isolate()->factory()->NewCell(value); + mov(dst, Operand(cell)); + ldr(dst, FieldMemOperand(dst, Cell::kValueOffset)); + } else { + mov(dst, Operand(value)); + } + } } @@ -394,19 +407,6 @@ void MacroAssembler::StoreRoot(Register source, } -void MacroAssembler::LoadHeapObject(Register result, - Handle<HeapObject> object) { - AllowDeferredHandleDereference using_raw_address; - if (isolate()->heap()->InNewSpace(*object)) { - Handle<Cell> cell = isolate()->factory()->NewCell(object); - mov(result, Operand(cell)); - ldr(result, FieldMemOperand(result, Cell::kValueOffset)); - } else { - mov(result, Operand(object)); - } -} - - void MacroAssembler::InNewSpace(Register object, Register scratch, Condition cond, @@ -478,11 +478,6 @@ void MacroAssembler::RecordWrite(Register object, SaveFPRegsMode fp_mode, RememberedSetAction remembered_set_action, SmiCheck smi_check) { - // The compiled code assumes that record write doesn't change the - // context register, so we check that none of the clobbered - // registers are cp. - ASSERT(!address.is(cp) && !value.is(cp)); - if (emit_debug_code()) { ldr(ip, MemOperand(address)); cmp(ip, value); @@ -733,9 +728,11 @@ void MacroAssembler::VFPEnsureFPSCRState(Register scratch) { bind(&fpscr_done); } -void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister value, + +void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst, + const DwVfpRegister src, const Condition cond) { - vsub(value, value, kDoubleRegZero, cond); + vsub(dst, src, kDoubleRegZero, cond); } @@ -919,6 +916,33 @@ void MacroAssembler::LoadNumberAsInt32(Register object, } +void MacroAssembler::Prologue(PrologueFrameMode frame_mode) { + if (frame_mode == BUILD_STUB_FRAME) { + stm(db_w, sp, cp.bit() | fp.bit() | lr.bit()); + Push(Smi::FromInt(StackFrame::STUB)); + // Adjust FP to point to saved FP. + add(fp, sp, Operand(2 * kPointerSize)); + } else { + PredictableCodeSizeScope predictible_code_size_scope( + this, kNoCodeAgeSequenceLength * Assembler::kInstrSize); + // The following three instructions must remain together and unmodified + // for code aging to work properly. + if (isolate()->IsCodePreAgingActive()) { + // Pre-age the code. + Code* stub = Code::GetPreAgedCodeAgeStub(isolate()); + add(r0, pc, Operand(-8)); + ldr(pc, MemOperand(pc, -4)); + dd(reinterpret_cast<uint32_t>(stub->instruction_start())); + } else { + stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit()); + nop(ip.code()); + // Adjust FP to point to saved FP. + add(fp, sp, Operand(2 * kPointerSize)); + } + } +} + + void MacroAssembler::EnterFrame(StackFrame::Type type) { // r0-r3: preserved stm(db_w, sp, cp.bit() | fp.bit() | lr.bit()); @@ -1020,7 +1044,8 @@ int MacroAssembler::ActivationFrameAlignment() { void MacroAssembler::LeaveExitFrame(bool save_doubles, - Register argument_count) { + Register argument_count, + bool restore_context) { // Optionally restore all double registers. if (save_doubles) { // Calculate the stack location of the saved doubles and restore them. @@ -1035,10 +1060,14 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); str(r3, MemOperand(ip)); + // Restore current context from top and clear it in debug mode. - mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); - ldr(cp, MemOperand(ip)); + if (restore_context) { + mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); + ldr(cp, MemOperand(ip)); + } #ifdef DEBUG + mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); str(r3, MemOperand(ip)); #endif @@ -1256,7 +1285,7 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function, ASSERT(flag == JUMP_FUNCTION || has_frame()); // Get the function and setup the context. - LoadHeapObject(r1, function); + Move(r1, function); ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); // We call indirectly through the code field in the function to @@ -1330,7 +1359,7 @@ void MacroAssembler::PushTryHandler(StackHandler::Kind kind, STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); - // For the JSEntry handler, we must preserve r0-r4, r5-r7 are available. + // For the JSEntry handler, we must preserve r0-r4, r5-r6 are available. // We will build up the handler from the bottom by pushing on the stack. // Set up the code object (r5) and the state (r6) for pushing. unsigned state = @@ -1341,9 +1370,9 @@ void MacroAssembler::PushTryHandler(StackHandler::Kind kind, // Push the frame pointer, context, state, and code object. if (kind == StackHandler::JS_ENTRY) { - mov(r7, Operand(Smi::FromInt(0))); // Indicates no context. + mov(cp, Operand(Smi::FromInt(0))); // Indicates no context. mov(ip, Operand::Zero()); // NULL frame pointer. - stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | ip.bit()); + stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | ip.bit()); } else { stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | fp.bit()); } @@ -2280,12 +2309,14 @@ static int AddressOffset(ExternalReference ref0, ExternalReference ref1) { } -void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function, - Address function_address, - ExternalReference thunk_ref, - Register thunk_last_arg, - int stack_space, - int return_value_offset) { +void MacroAssembler::CallApiFunctionAndReturn( + ExternalReference function, + Address function_address, + ExternalReference thunk_ref, + Register thunk_last_arg, + int stack_space, + MemOperand return_value_operand, + MemOperand* context_restore_operand) { ExternalReference next_address = ExternalReference::handle_scope_next_address(isolate()); const int kNextOffset = 0; @@ -2296,13 +2327,15 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function, ExternalReference::handle_scope_level_address(isolate()), next_address); + ASSERT(!thunk_last_arg.is(r3)); + // Allocate HandleScope in callee-save registers. - mov(r7, Operand(next_address)); - ldr(r4, MemOperand(r7, kNextOffset)); - ldr(r5, MemOperand(r7, kLimitOffset)); - ldr(r6, MemOperand(r7, kLevelOffset)); + mov(r9, Operand(next_address)); + ldr(r4, MemOperand(r9, kNextOffset)); + ldr(r5, MemOperand(r9, kLimitOffset)); + ldr(r6, MemOperand(r9, kLevelOffset)); add(r6, r6, Operand(1)); - str(r6, MemOperand(r7, kLevelOffset)); + str(r6, MemOperand(r9, kLevelOffset)); if (FLAG_log_timer_events) { FrameScope frame(this, StackFrame::MANUAL); @@ -2313,7 +2346,6 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function, PopSafepointRegisters(); } - ASSERT(!thunk_last_arg.is(r3)); Label profiler_disabled; Label end_profiler_check; bool* is_profiling_flag = @@ -2349,24 +2381,25 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function, } Label promote_scheduled_exception; + Label exception_handled; Label delete_allocated_handles; Label leave_exit_frame; Label return_value_loaded; // load value from ReturnValue - ldr(r0, MemOperand(fp, return_value_offset*kPointerSize)); + ldr(r0, return_value_operand); bind(&return_value_loaded); // No more valid handles (the result handle was the last one). Restore // previous handle scope. - str(r4, MemOperand(r7, kNextOffset)); + str(r4, MemOperand(r9, kNextOffset)); if (emit_debug_code()) { - ldr(r1, MemOperand(r7, kLevelOffset)); + ldr(r1, MemOperand(r9, kLevelOffset)); cmp(r1, r6); Check(eq, kUnexpectedLevelAfterReturnFromApiCall); } sub(r6, r6, Operand(1)); - str(r6, MemOperand(r7, kLevelOffset)); - ldr(ip, MemOperand(r7, kLimitOffset)); + str(r6, MemOperand(r9, kLevelOffset)); + ldr(ip, MemOperand(r9, kLimitOffset)); cmp(r5, ip); b(ne, &delete_allocated_handles); @@ -2377,21 +2410,29 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function, ldr(r5, MemOperand(ip)); cmp(r4, r5); b(ne, &promote_scheduled_exception); + bind(&exception_handled); + bool restore_context = context_restore_operand != NULL; + if (restore_context) { + ldr(cp, *context_restore_operand); + } // LeaveExitFrame expects unwind space to be in a register. mov(r4, Operand(stack_space)); - LeaveExitFrame(false, r4); + LeaveExitFrame(false, r4, !restore_context); mov(pc, lr); bind(&promote_scheduled_exception); - TailCallExternalReference( - ExternalReference(Runtime::kPromoteScheduledException, isolate()), - 0, - 1); + { + FrameScope frame(this, StackFrame::INTERNAL); + CallExternalReference( + ExternalReference(Runtime::kPromoteScheduledException, isolate()), + 0); + } + jmp(&exception_handled); // HandleScope limit has changed. Delete allocated extensions. bind(&delete_allocated_handles); - str(r5, MemOperand(r7, kLimitOffset)); + str(r5, MemOperand(r9, kLimitOffset)); mov(r4, r0); PrepareCallCFunction(1, r5); mov(r0, Operand(ExternalReference::isolate_address(isolate()))); @@ -2603,7 +2644,8 @@ void MacroAssembler::GetLeastBitsFromInt32(Register dst, void MacroAssembler::CallRuntime(const Runtime::Function* f, - int num_arguments) { + int num_arguments, + SaveFPRegsMode save_doubles) { // All parameters are on the stack. r0 has the return value after call. // If the expected number of arguments of the runtime function is @@ -2620,21 +2662,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, // smarter. mov(r0, Operand(num_arguments)); mov(r1, Operand(ExternalReference(f, isolate()))); - CEntryStub stub(1); - CallStub(&stub); -} - - -void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) { - CallRuntime(Runtime::FunctionForId(fid), num_arguments); -} - - -void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) { - const Runtime::Function* function = Runtime::FunctionForId(id); - mov(r0, Operand(function->nargs)); - mov(r1, Operand(ExternalReference(function, isolate()))); - CEntryStub stub(1, kSaveFPRegs); + CEntryStub stub(1, save_doubles); CallStub(&stub); } @@ -3079,6 +3107,88 @@ void MacroAssembler::JumpIfNotHeapNumber(Register object, } +void MacroAssembler::LookupNumberStringCache(Register object, + Register result, + Register scratch1, + Register scratch2, + Register scratch3, + Label* not_found) { + // Use of registers. Register result is used as a temporary. + Register number_string_cache = result; + Register mask = scratch3; + + // Load the number string cache. + LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); + + // Make the hash mask from the length of the number string cache. It + // contains two elements (number and string) for each cache entry. + ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset)); + // Divide length by two (length is a smi). + mov(mask, Operand(mask, ASR, kSmiTagSize + 1)); + sub(mask, mask, Operand(1)); // Make mask. + + // Calculate the entry in the number string cache. The hash value in the + // number string cache for smis is just the smi value, and the hash for + // doubles is the xor of the upper and lower words. See + // Heap::GetNumberStringCache. + Label is_smi; + Label load_result_from_cache; + JumpIfSmi(object, &is_smi); + CheckMap(object, + scratch1, + Heap::kHeapNumberMapRootIndex, + not_found, + DONT_DO_SMI_CHECK); + + STATIC_ASSERT(8 == kDoubleSize); + add(scratch1, + object, + Operand(HeapNumber::kValueOffset - kHeapObjectTag)); + ldm(ia, scratch1, scratch1.bit() | scratch2.bit()); + eor(scratch1, scratch1, Operand(scratch2)); + and_(scratch1, scratch1, Operand(mask)); + + // Calculate address of entry in string cache: each entry consists + // of two pointer sized fields. + add(scratch1, + number_string_cache, + Operand(scratch1, LSL, kPointerSizeLog2 + 1)); + + Register probe = mask; + ldr(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize)); + JumpIfSmi(probe, not_found); + sub(scratch2, object, Operand(kHeapObjectTag)); + vldr(d0, scratch2, HeapNumber::kValueOffset); + sub(probe, probe, Operand(kHeapObjectTag)); + vldr(d1, probe, HeapNumber::kValueOffset); + VFPCompareAndSetFlags(d0, d1); + b(ne, not_found); // The cache did not contain this value. + b(&load_result_from_cache); + + bind(&is_smi); + Register scratch = scratch1; + and_(scratch, mask, Operand(object, ASR, 1)); + // Calculate address of entry in string cache: each entry consists + // of two pointer sized fields. + add(scratch, + number_string_cache, + Operand(scratch, LSL, kPointerSizeLog2 + 1)); + + // Check if the entry is the smi we are looking for. + ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize)); + cmp(object, probe); + b(ne, not_found); + + // Get the result from the cache. + bind(&load_result_from_cache); + ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize)); + IncrementCounter(isolate()->counters()->number_to_string_native(), + 1, + scratch1, + scratch2); +} + + void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings( Register first, Register second, @@ -3191,20 +3301,19 @@ void MacroAssembler::CopyBytes(Register src, Register dst, Register length, Register scratch) { - Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done; + Label align_loop_1, word_loop, byte_loop, byte_loop_1, done; // Align src before copying in word size chunks. - bind(&align_loop); - cmp(length, Operand::Zero()); - b(eq, &done); + cmp(length, Operand(kPointerSize)); + b(le, &byte_loop); + bind(&align_loop_1); tst(src, Operand(kPointerSize - 1)); b(eq, &word_loop); ldrb(scratch, MemOperand(src, 1, PostIndex)); strb(scratch, MemOperand(dst, 1, PostIndex)); sub(length, length, Operand(1), SetCC); - b(ne, &byte_loop_1); - + b(&align_loop_1); // Copy bytes in word size chunks. bind(&word_loop); if (emit_debug_code()) { @@ -3776,8 +3885,8 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) { void MacroAssembler::TestJSArrayForAllocationMemento( Register receiver_reg, - Register scratch_reg) { - Label no_memento_available; + Register scratch_reg, + Label* no_memento_found) { ExternalReference new_space_start = ExternalReference::new_space_start(isolate()); ExternalReference new_space_allocation_top = @@ -3785,15 +3894,14 @@ void MacroAssembler::TestJSArrayForAllocationMemento( add(scratch_reg, receiver_reg, Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag)); cmp(scratch_reg, Operand(new_space_start)); - b(lt, &no_memento_available); + b(lt, no_memento_found); mov(ip, Operand(new_space_allocation_top)); ldr(ip, MemOperand(ip)); cmp(scratch_reg, ip); - b(gt, &no_memento_available); + b(gt, no_memento_found); ldr(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize)); cmp(scratch_reg, - Operand(Handle<Map>(isolate()->heap()->allocation_memento_map()))); - bind(&no_memento_available); + Operand(isolate()->factory()->allocation_memento_map())); } diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index 9abd5a0c3d..32471443bb 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -45,8 +45,9 @@ inline MemOperand FieldMemOperand(Register object, int offset) { // Give alias names to registers -const Register cp = { 8 }; // JavaScript context pointer -const Register kRootRegister = { 10 }; // Roots array pointer. +const Register pp = { kRegister_r7_Code }; // Constant pool pointer. +const Register cp = { kRegister_r8_Code }; // JavaScript context pointer. +const Register kRootRegister = { kRegister_r10_Code }; // Roots array pointer. // Flags used for AllocateHeapNumber enum TaggingMode { @@ -169,17 +170,6 @@ class MacroAssembler: public Assembler { Heap::RootListIndex index, Condition cond = al); - void LoadHeapObject(Register dst, Handle<HeapObject> object); - - void LoadObject(Register result, Handle<Object> object) { - AllowDeferredHandleDereference heap_object_check; - if (object->IsHeapObject()) { - LoadHeapObject(result, Handle<HeapObject>::cast(object)); - } else { - Move(result, object); - } - } - // --------------------------------------------------------------------------- // GC Support @@ -469,8 +459,13 @@ class MacroAssembler: public Assembler { void VFPEnsureFPSCRState(Register scratch); // If the value is a NaN, canonicalize the value else, do nothing. - void VFPCanonicalizeNaN(const DwVfpRegister value, + void VFPCanonicalizeNaN(const DwVfpRegister dst, + const DwVfpRegister src, const Condition cond = al); + void VFPCanonicalizeNaN(const DwVfpRegister value, + const Condition cond = al) { + VFPCanonicalizeNaN(value, value, cond); + } // Compare double values and move the result to the normal condition flags. void VFPCompareAndSetFlags(const DwVfpRegister src1, @@ -533,6 +528,8 @@ class MacroAssembler: public Assembler { LowDwVfpRegister double_scratch1, Label* not_int32); + // Generates function and stub prologue code. + void Prologue(PrologueFrameMode frame_mode); // Enter exit frame. // stack_space - extra stack space, used for alignment before call to C. @@ -541,7 +538,9 @@ class MacroAssembler: public Assembler { // Leave the current exit frame. Expects the return value in r0. // Expect the number of values, pushed prior to the exit frame, to // remove in a register (or no_reg, if there is nothing to remove). - void LeaveExitFrame(bool save_doubles, Register argument_count); + void LeaveExitFrame(bool save_doubles, + Register argument_count, + bool restore_context); // Get the actual activation frame alignment for target environment. static int ActivationFrameAlignment(); @@ -1037,11 +1036,18 @@ class MacroAssembler: public Assembler { void TailCallStub(CodeStub* stub, Condition cond = al); // Call a runtime routine. - void CallRuntime(const Runtime::Function* f, int num_arguments); - void CallRuntimeSaveDoubles(Runtime::FunctionId id); + void CallRuntime(const Runtime::Function* f, + int num_arguments, + SaveFPRegsMode save_doubles = kDontSaveFPRegs); + void CallRuntimeSaveDoubles(Runtime::FunctionId id) { + const Runtime::Function* function = Runtime::FunctionForId(id); + CallRuntime(function, function->nargs, kSaveFPRegs); + } // Convenience function: Same as above, but takes the fid instead. - void CallRuntime(Runtime::FunctionId fid, int num_arguments); + void CallRuntime(Runtime::FunctionId id, int num_arguments) { + CallRuntime(Runtime::FunctionForId(id), num_arguments); + } // Convenience function: call an external reference. void CallExternalReference(const ExternalReference& ext, @@ -1111,7 +1117,8 @@ class MacroAssembler: public Assembler { ExternalReference thunk_ref, Register thunk_last_arg, int stack_space, - int return_value_offset_from_fp); + MemOperand return_value_operand, + MemOperand* context_restore_operand); // Jump to a runtime routine. void JumpToExternalReference(const ExternalReference& builtin); @@ -1286,6 +1293,18 @@ class MacroAssembler: public Assembler { // --------------------------------------------------------------------------- // String utilities + // Generate code to do a lookup in the number string cache. If the number in + // the register object is found in the cache the generated code falls through + // with the result in the result register. The object and the result register + // can be the same. If the number is not found in the cache the code jumps to + // the label not_found with only the content of register object unchanged. + void LookupNumberStringCache(Register object, + Register result, + Register scratch1, + Register scratch2, + Register scratch3, + Label* not_found); + // Checks if both objects are sequential ASCII strings and jumps to label // if either is not. Assumes that neither object is a smi. void JumpIfNonSmisNotBothSequentialAsciiStrings(Register object1, @@ -1360,9 +1379,20 @@ class MacroAssembler: public Assembler { // to another type. // On entry, receiver_reg should point to the array object. // scratch_reg gets clobbered. - // If allocation info is present, condition flags are set to eq + // If allocation info is present, condition flags are set to eq. void TestJSArrayForAllocationMemento(Register receiver_reg, - Register scratch_reg); + Register scratch_reg, + Label* no_memento_found); + + void JumpIfJSArrayHasAllocationMemento(Register receiver_reg, + Register scratch_reg, + Label* memento_found) { + Label no_memento_found; + TestJSArrayForAllocationMemento(receiver_reg, scratch_reg, + &no_memento_found); + b(eq, memento_found); + bind(&no_memento_found); + } private: void CallCFunctionHelper(Register function, diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.h b/deps/v8/src/arm/regexp-macro-assembler-arm.h index 9f07489e1f..8d9d515c76 100644 --- a/deps/v8/src/arm/regexp-macro-assembler-arm.h +++ b/deps/v8/src/arm/regexp-macro-assembler-arm.h @@ -223,11 +223,6 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler { // are always 0..num_saved_registers_-1) int num_saved_registers_; - // Manage a small pre-allocated pool for writing label targets - // to for pushing backtrack addresses. - int backtrack_constant_pool_offset_; - int backtrack_constant_pool_capacity_; - // Labels used internally. Label entry_label_; Label start_label_; diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc index def1818630..461d032b99 100644 --- a/deps/v8/src/arm/simulator-arm.cc +++ b/deps/v8/src/arm/simulator-arm.cc @@ -912,6 +912,12 @@ double Simulator::get_double_from_register_pair(int reg) { } +void Simulator::set_register_pair_from_double(int reg, double* value) { + ASSERT((reg >= 0) && (reg < num_registers) && ((reg % 2) == 0)); + memcpy(registers_ + reg, value, sizeof(*value)); +} + + void Simulator::set_dw_register(int dreg, const int* dbl) { ASSERT((dreg >= 0) && (dreg < num_d_registers)); registers_[dreg] = dbl[0]; @@ -1026,27 +1032,22 @@ ReturnType Simulator::GetFromVFPRegister(int reg_index) { } -// Runtime FP routines take up to two double arguments and zero -// or one integer arguments. All are consructed here. -// from r0-r3 or d0 and d1. +// Runtime FP routines take: +// - two double arguments +// - one double argument and zero or one integer arguments. +// All are consructed here from r0-r3 or d0, d1 and r0. void Simulator::GetFpArgs(double* x, double* y, int32_t* z) { if (use_eabi_hardfloat()) { - *x = vfp_registers_[0]; - *y = vfp_registers_[1]; - *z = registers_[1]; + *x = get_double_from_d_register(0); + *y = get_double_from_d_register(1); + *z = get_register(0); } else { - // We use a char buffer to get around the strict-aliasing rules which - // otherwise allow the compiler to optimize away the copy. - char buffer[sizeof(*x)]; // Registers 0 and 1 -> x. - OS::MemCopy(buffer, registers_, sizeof(*x)); - OS::MemCopy(x, buffer, sizeof(*x)); + *x = get_double_from_register_pair(0); // Register 2 and 3 -> y. - OS::MemCopy(buffer, registers_ + 2, sizeof(*y)); - OS::MemCopy(y, buffer, sizeof(*y)); + *y = get_double_from_register_pair(2); // Register 2 -> z - memcpy(buffer, registers_ + 2, sizeof(*z)); - memcpy(z, buffer, sizeof(*z)); + *z = get_register(2); } } @@ -1718,32 +1719,6 @@ void Simulator::SoftwareInterrupt(Instruction* instr) { (redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) || (redirection->type() == ExternalReference::BUILTIN_FP_CALL) || (redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL); - if (use_eabi_hardfloat()) { - // With the hard floating point calling convention, double - // arguments are passed in VFP registers. Fetch the arguments - // from there and call the builtin using soft floating point - // convention. - switch (redirection->type()) { - case ExternalReference::BUILTIN_FP_FP_CALL: - case ExternalReference::BUILTIN_COMPARE_CALL: - arg0 = vfp_registers_[0]; - arg1 = vfp_registers_[1]; - arg2 = vfp_registers_[2]; - arg3 = vfp_registers_[3]; - break; - case ExternalReference::BUILTIN_FP_CALL: - arg0 = vfp_registers_[0]; - arg1 = vfp_registers_[1]; - break; - case ExternalReference::BUILTIN_FP_INT_CALL: - arg0 = vfp_registers_[0]; - arg1 = vfp_registers_[1]; - arg2 = get_register(0); - break; - default: - break; - } - } // This is dodgy but it works because the C entry stubs are never moved. // See comment in codegen-arm.cc and bug 1242173. int32_t saved_lr = get_register(lr); @@ -3816,19 +3791,27 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) { } -double Simulator::CallFP(byte* entry, double d0, double d1) { +void Simulator::CallFP(byte* entry, double d0, double d1) { if (use_eabi_hardfloat()) { set_d_register_from_double(0, d0); set_d_register_from_double(1, d1); } else { - int buffer[2]; - ASSERT(sizeof(buffer[0]) * 2 == sizeof(d0)); - OS::MemCopy(buffer, &d0, sizeof(d0)); - set_dw_register(0, buffer); - OS::MemCopy(buffer, &d1, sizeof(d1)); - set_dw_register(2, buffer); + set_register_pair_from_double(0, &d0); + set_register_pair_from_double(2, &d1); } CallInternal(entry); +} + + +int32_t Simulator::CallFPReturnsInt(byte* entry, double d0, double d1) { + CallFP(entry, d0, d1); + int32_t result = get_register(r0); + return result; +} + + +double Simulator::CallFPReturnsDouble(byte* entry, double d0, double d1) { + CallFP(entry, d0, d1); if (use_eabi_hardfloat()) { return get_double_from_d_register(0); } else { diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h index 7fca7432bf..e392c5cb36 100644 --- a/deps/v8/src/arm/simulator-arm.h +++ b/deps/v8/src/arm/simulator-arm.h @@ -163,6 +163,7 @@ class Simulator { void set_register(int reg, int32_t value); int32_t get_register(int reg) const; double get_double_from_register_pair(int reg); + void set_register_pair_from_double(int reg, double* value); void set_dw_register(int dreg, const int* dbl); // Support for VFP. @@ -220,7 +221,9 @@ class Simulator { // which sets up the simulator state and grabs the result on return. int32_t Call(byte* entry, int argument_count, ...); // Alternative: call a 2-argument double function. - double CallFP(byte* entry, double d0, double d1); + void CallFP(byte* entry, double d0, double d1); + int32_t CallFPReturnsInt(byte* entry, double d0, double d1); + double CallFPReturnsDouble(byte* entry, double d0, double d1); // Push an address onto the JS stack. uintptr_t PushAddress(uintptr_t address); @@ -444,6 +447,10 @@ class Simulator { reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \ FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4)) +#define CALL_GENERATED_FP_INT(entry, p0, p1) \ + Simulator::current(Isolate::Current())->CallFPReturnsInt( \ + FUNCTION_ADDR(entry), p0, p1) + #define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \ Simulator::current(Isolate::Current())->Call( \ entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8) diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc index 085af3f2b7..004e067c82 100644 --- a/deps/v8/src/arm/stub-cache-arm.cc +++ b/deps/v8/src/arm/stub-cache-arm.cc @@ -380,31 +380,27 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm, Register receiver, Register scratch1, Register scratch2, - Label* miss, - bool support_wrappers) { + Label* miss) { Label check_wrapper; // Check if the object is a string leaving the instance type in the // scratch1 register. - GenerateStringCheck(masm, receiver, scratch1, scratch2, miss, - support_wrappers ? &check_wrapper : miss); + GenerateStringCheck(masm, receiver, scratch1, scratch2, miss, &check_wrapper); // Load length directly from the string. __ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset)); __ Ret(); - if (support_wrappers) { - // Check if the object is a JSValue wrapper. - __ bind(&check_wrapper); - __ cmp(scratch1, Operand(JS_VALUE_TYPE)); - __ b(ne, miss); + // Check if the object is a JSValue wrapper. + __ bind(&check_wrapper); + __ cmp(scratch1, Operand(JS_VALUE_TYPE)); + __ b(ne, miss); - // Unwrap the value and check if the wrapped value is a string. - __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset)); - GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss); - __ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset)); - __ Ret(); - } + // Unwrap the value and check if the wrapped value is a string. + __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset)); + GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss); + __ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset)); + __ Ret(); } @@ -437,7 +433,7 @@ static void GenerateCheckPropertyCell(MacroAssembler* masm, } -void BaseStoreStubCompiler::GenerateNegativeHolderLookup( +void StoreStubCompiler::GenerateNegativeHolderLookup( MacroAssembler* masm, Handle<JSObject> holder, Register holder_reg, @@ -457,19 +453,19 @@ void BaseStoreStubCompiler::GenerateNegativeHolderLookup( // When leaving generated code after success, the receiver_reg and name_reg // may be clobbered. Upon branch to miss_label, the receiver and name // registers have their original values. -void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm, - Handle<JSObject> object, - LookupResult* lookup, - Handle<Map> transition, - Handle<Name> name, - Register receiver_reg, - Register storage_reg, - Register value_reg, - Register scratch1, - Register scratch2, - Register scratch3, - Label* miss_label, - Label* slow) { +void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm, + Handle<JSObject> object, + LookupResult* lookup, + Handle<Map> transition, + Handle<Name> name, + Register receiver_reg, + Register storage_reg, + Register value_reg, + Register scratch1, + Register scratch2, + Register scratch3, + Label* miss_label, + Label* slow) { // r0 : value Label exit; @@ -481,7 +477,7 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm, if (details.type() == CONSTANT) { Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate()); - __ LoadObject(scratch1, constant); + __ Move(scratch1, constant); __ cmp(value_reg, scratch1); __ b(ne, miss_label); } else if (FLAG_track_fields && representation.IsSmi()) { @@ -621,15 +617,15 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm, // When leaving generated code after success, the receiver_reg and name_reg // may be clobbered. Upon branch to miss_label, the receiver and name // registers have their original values. -void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm, - Handle<JSObject> object, - LookupResult* lookup, - Register receiver_reg, - Register name_reg, - Register value_reg, - Register scratch1, - Register scratch2, - Label* miss_label) { +void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm, + Handle<JSObject> object, + LookupResult* lookup, + Register receiver_reg, + Register name_reg, + Register value_reg, + Register scratch1, + Register scratch2, + Label* miss_label) { // r0 : value Label exit; @@ -740,9 +736,9 @@ void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm, } -void BaseStoreStubCompiler::GenerateRestoreName(MacroAssembler* masm, - Label* label, - Handle<Name> name) { +void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm, + Label* label, + Handle<Name> name) { if (!label->is_unused()) { __ bind(label); __ mov(this->name(), Operand(name)); @@ -843,25 +839,26 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm) { static void GenerateFastApiDirectCall(MacroAssembler* masm, const CallOptimization& optimization, - int argc) { + int argc, + bool restore_context) { // ----------- S t a t e ------------- - // -- sp[0] : holder (set by CheckPrototypes) - // -- sp[4] : callee JS function - // -- sp[8] : call data - // -- sp[12] : isolate - // -- sp[16] : ReturnValue default value - // -- sp[20] : ReturnValue - // -- sp[24] : last JS argument + // -- sp[0] - sp[24] : FunctionCallbackInfo, incl. + // : holder (set by CheckPrototypes) + // -- sp[28] : last JS argument // -- ... - // -- sp[(argc + 5) * 4] : first JS argument - // -- sp[(argc + 6) * 4] : receiver + // -- sp[(argc + 6) * 4] : first JS argument + // -- sp[(argc + 7) * 4] : receiver // ----------------------------------- + typedef FunctionCallbackArguments FCA; + // Save calling context. + __ str(cp, MemOperand(sp, FCA::kContextSaveIndex * kPointerSize)); // Get the function and setup the context. Handle<JSFunction> function = optimization.constant_function(); - __ LoadHeapObject(r5, function); + __ Move(r5, function); __ ldr(cp, FieldMemOperand(r5, JSFunction::kContextOffset)); + __ str(r5, MemOperand(sp, FCA::kCalleeIndex * kPointerSize)); - // Pass the additional arguments. + // Construct the FunctionCallbackInfo. Handle<CallHandlerInfo> api_call_info = optimization.api_call_info(); Handle<Object> call_data(api_call_info->data(), masm->isolate()); if (masm->isolate()->heap()->InNewSpace(*call_data)) { @@ -870,15 +867,18 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm, } else { __ Move(r6, call_data); } - __ mov(r7, Operand(ExternalReference::isolate_address(masm->isolate()))); - // Store JS function, call data, isolate ReturnValue default and ReturnValue. - __ stm(ib, sp, r5.bit() | r6.bit() | r7.bit()); + // Store call data. + __ str(r6, MemOperand(sp, FCA::kDataIndex * kPointerSize)); + // Store isolate. + __ mov(r5, Operand(ExternalReference::isolate_address(masm->isolate()))); + __ str(r5, MemOperand(sp, FCA::kIsolateIndex * kPointerSize)); + // Store ReturnValue default and ReturnValue. __ LoadRoot(r5, Heap::kUndefinedValueRootIndex); - __ str(r5, MemOperand(sp, 4 * kPointerSize)); - __ str(r5, MemOperand(sp, 5 * kPointerSize)); + __ str(r5, MemOperand(sp, FCA::kReturnValueOffset * kPointerSize)); + __ str(r5, MemOperand(sp, FCA::kReturnValueDefaultValueIndex * kPointerSize)); // Prepare arguments. - __ add(r2, sp, Operand(5 * kPointerSize)); + __ mov(r2, sp); // Allocate the v8::Arguments structure in the arguments' space since // it's not controlled by GC. @@ -887,18 +887,18 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm, FrameScope frame_scope(masm, StackFrame::MANUAL); __ EnterExitFrame(false, kApiStackSpace); - // r0 = v8::Arguments& + // r0 = FunctionCallbackInfo& // Arguments is after the return address. __ add(r0, sp, Operand(1 * kPointerSize)); - // v8::Arguments::implicit_args_ + // FunctionCallbackInfo::implicit_args_ __ str(r2, MemOperand(r0, 0 * kPointerSize)); - // v8::Arguments::values_ - __ add(ip, r2, Operand(argc * kPointerSize)); + // FunctionCallbackInfo::values_ + __ add(ip, r2, Operand((kFastApiCallArguments - 1 + argc) * kPointerSize)); __ str(ip, MemOperand(r0, 1 * kPointerSize)); - // v8::Arguments::length_ = argc + // FunctionCallbackInfo::length_ = argc __ mov(ip, Operand(argc)); __ str(ip, MemOperand(r0, 2 * kPointerSize)); - // v8::Arguments::is_construct_call = 0 + // FunctionCallbackInfo::is_construct_call = 0 __ mov(ip, Operand::Zero()); __ str(ip, MemOperand(r0, 3 * kPointerSize)); @@ -916,12 +916,19 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm, masm->isolate()); AllowExternalCallThatCantCauseGC scope(masm); + MemOperand context_restore_operand( + fp, (2 + FCA::kContextSaveIndex) * kPointerSize); + MemOperand return_value_operand(fp, + (2 + FCA::kReturnValueOffset) * kPointerSize); + __ CallApiFunctionAndReturn(ref, function_address, thunk_ref, r1, kStackUnwindSpace, - kFastApiCallArguments + 1); + return_value_operand, + restore_context ? + &context_restore_operand : NULL); } @@ -935,11 +942,12 @@ static void GenerateFastApiCall(MacroAssembler* masm, ASSERT(optimization.is_simple_api_call()); ASSERT(!receiver.is(scratch)); + typedef FunctionCallbackArguments FCA; const int stack_space = kFastApiCallArguments + argc + 1; // Assign stack space for the call arguments. __ sub(sp, sp, Operand(stack_space * kPointerSize)); // Write holder to stack frame. - __ str(receiver, MemOperand(sp, 0)); + __ str(receiver, MemOperand(sp, FCA::kHolderIndex * kPointerSize)); // Write receiver to stack frame. int index = stack_space - 1; __ str(receiver, MemOperand(sp, index * kPointerSize)); @@ -950,7 +958,7 @@ static void GenerateFastApiCall(MacroAssembler* masm, __ str(receiver, MemOperand(sp, index-- * kPointerSize)); } - GenerateFastApiDirectCall(masm, optimization, argc); + GenerateFastApiDirectCall(masm, optimization, argc, true); } @@ -1064,7 +1072,8 @@ class CallInterceptorCompiler BASE_EMBEDDED { // Invoke function. if (can_do_fast_api_call) { - GenerateFastApiDirectCall(masm, optimization, arguments_.immediate()); + GenerateFastApiDirectCall( + masm, optimization, arguments_.immediate(), false); } else { CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_) ? CALL_AS_FUNCTION @@ -1202,8 +1211,9 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object, Register reg = object_reg; int depth = 0; + typedef FunctionCallbackArguments FCA; if (save_at_depth == depth) { - __ str(reg, MemOperand(sp)); + __ str(reg, MemOperand(sp, FCA::kHolderIndex * kPointerSize)); } // Check the maps in the prototype chain. @@ -1262,7 +1272,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object, } if (save_at_depth == depth) { - __ str(reg, MemOperand(sp)); + __ str(reg, MemOperand(sp, FCA::kHolderIndex * kPointerSize)); } // Go to the next object in the prototype chain. @@ -1294,9 +1304,9 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object, } -void BaseLoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, - Label* success, - Label* miss) { +void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, + Label* success, + Label* miss) { if (!miss->is_unused()) { __ b(success); __ bind(miss); @@ -1305,9 +1315,9 @@ void BaseLoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, } -void BaseStoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, - Label* success, - Label* miss) { +void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, + Label* success, + Label* miss) { if (!miss->is_unused()) { __ b(success); GenerateRestoreName(masm(), miss, name); @@ -1316,7 +1326,7 @@ void BaseStoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, } -Register BaseLoadStubCompiler::CallbackHandlerFrontend( +Register LoadStubCompiler::CallbackHandlerFrontend( Handle<JSObject> object, Register object_reg, Handle<JSObject> holder, @@ -1363,7 +1373,7 @@ Register BaseLoadStubCompiler::CallbackHandlerFrontend( } -void BaseLoadStubCompiler::NonexistentHandlerFrontend( +void LoadStubCompiler::NonexistentHandlerFrontend( Handle<JSObject> object, Handle<JSObject> last, Handle<Name> name, @@ -1383,10 +1393,10 @@ void BaseLoadStubCompiler::NonexistentHandlerFrontend( } -void BaseLoadStubCompiler::GenerateLoadField(Register reg, - Handle<JSObject> holder, - PropertyIndex field, - Representation representation) { +void LoadStubCompiler::GenerateLoadField(Register reg, + Handle<JSObject> holder, + PropertyIndex field, + Representation representation) { if (!reg.is(receiver())) __ mov(receiver(), reg); if (kind() == Code::LOAD_IC) { LoadFieldStub stub(field.is_inobject(holder), @@ -1402,36 +1412,36 @@ void BaseLoadStubCompiler::GenerateLoadField(Register reg, } -void BaseLoadStubCompiler::GenerateLoadConstant(Handle<Object> value) { +void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) { // Return the constant value. - __ LoadObject(r0, value); + __ Move(r0, value); __ Ret(); } -void BaseLoadStubCompiler::GenerateLoadCallback( +void LoadStubCompiler::GenerateLoadCallback( const CallOptimization& call_optimization) { GenerateFastApiCall( masm(), call_optimization, receiver(), scratch3(), 0, NULL); } -void BaseLoadStubCompiler::GenerateLoadCallback( +void LoadStubCompiler::GenerateLoadCallback( Register reg, Handle<ExecutableAccessorInfo> callback) { // Build AccessorInfo::args_ list on the stack and push property name below // the exit frame to make GC aware of them and store pointers to them. - STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 0); - STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == -1); - STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == -2); - STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == -3); - STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == -4); - STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == -5); + STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0); + STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1); + STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2); + STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3); + STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4); + STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5); + STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6); ASSERT(!scratch2().is(reg)); ASSERT(!scratch3().is(reg)); ASSERT(!scratch4().is(reg)); __ push(receiver()); - __ mov(scratch2(), sp); // scratch2 = AccessorInfo::args_ if (heap()->InNewSpace(callback->data())) { __ Move(scratch3(), callback); __ ldr(scratch3(), FieldMemOperand(scratch3(), @@ -1445,19 +1455,21 @@ void BaseLoadStubCompiler::GenerateLoadCallback( __ Push(scratch3(), scratch4()); __ mov(scratch4(), Operand(ExternalReference::isolate_address(isolate()))); - __ Push(scratch4(), reg, name()); + __ Push(scratch4(), reg); + __ mov(scratch2(), sp); // scratch2 = PropertyAccessorInfo::args_ + __ push(name()); __ mov(r0, sp); // r0 = Handle<Name> const int kApiStackSpace = 1; FrameScope frame_scope(masm(), StackFrame::MANUAL); __ EnterExitFrame(false, kApiStackSpace); - // Create AccessorInfo instance on the stack above the exit frame with + // Create PropertyAccessorInfo instance on the stack above the exit frame with // scratch2 (internal::Object** args_) as the data. __ str(scratch2(), MemOperand(sp, 1 * kPointerSize)); __ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo& - const int kStackUnwindSpace = kFastApiCallArguments + 1; + const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1; Address getter_address = v8::ToCData<Address>(callback->getter()); ApiFunction fun(getter_address); @@ -1475,11 +1487,12 @@ void BaseLoadStubCompiler::GenerateLoadCallback( thunk_ref, r2, kStackUnwindSpace, - 6); + MemOperand(fp, 6 * kPointerSize), + NULL); } -void BaseLoadStubCompiler::GenerateLoadInterceptor( +void LoadStubCompiler::GenerateLoadInterceptor( Register holder_reg, Handle<JSObject> object, Handle<JSObject> interceptor_holder, @@ -1839,15 +1852,15 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) { Label fast_object, not_fast_object; - __ CheckFastObjectElements(r3, r7, ¬_fast_object); + __ CheckFastObjectElements(r3, r9, ¬_fast_object); __ jmp(&fast_object); // In case of fast smi-only, convert to fast object, otherwise bail out. __ bind(¬_fast_object); - __ CheckFastSmiElements(r3, r7, &call_builtin); + __ CheckFastSmiElements(r3, r9, &call_builtin); - __ ldr(r7, FieldMemOperand(r4, HeapObject::kMapOffset)); + __ ldr(r9, FieldMemOperand(r4, HeapObject::kMapOffset)); __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); - __ cmp(r7, ip); + __ cmp(r9, ip); __ b(eq, &call_builtin); // edx: receiver // r3: map @@ -1855,7 +1868,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, r3, - r7, + r9, &try_holey_map); __ mov(r2, receiver); ElementsTransitionGenerator:: @@ -1868,7 +1881,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( __ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS, FAST_HOLEY_ELEMENTS, r3, - r7, + r9, &call_builtin); __ mov(r2, receiver); ElementsTransitionGenerator:: @@ -1901,7 +1914,6 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( __ bind(&attempt_to_grow_elements); // r0: array's length + 1. - // r4: elements' length. if (!FLAG_inline_new) { __ b(&call_builtin); @@ -1912,8 +1924,8 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( // the new element is non-Smi. For now, delegate to the builtin. Label no_fast_elements_check; __ JumpIfSmi(r2, &no_fast_elements_check); - __ ldr(r7, FieldMemOperand(receiver, HeapObject::kMapOffset)); - __ CheckFastObjectElements(r7, r7, &call_builtin); + __ ldr(r9, FieldMemOperand(receiver, HeapObject::kMapOffset)); + __ CheckFastObjectElements(r9, r9, &call_builtin); __ bind(&no_fast_elements_check); ExternalReference new_space_allocation_top = @@ -1925,8 +1937,8 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( // Load top and check if it is the end of elements. __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(r0)); __ add(end_elements, end_elements, Operand(kEndElementsOffset)); - __ mov(r7, Operand(new_space_allocation_top)); - __ ldr(r3, MemOperand(r7)); + __ mov(r4, Operand(new_space_allocation_top)); + __ ldr(r3, MemOperand(r4)); __ cmp(end_elements, r3); __ b(ne, &call_builtin); @@ -1938,7 +1950,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( // We fit and could grow elements. // Update new_space_allocation_top. - __ str(r3, MemOperand(r7)); + __ str(r3, MemOperand(r4)); // Push the argument. __ str(r2, MemOperand(end_elements)); // Fill the rest with holes. @@ -1949,6 +1961,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( // Update elements' and array's sizes. __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); + __ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset)); __ add(r4, r4, Operand(Smi::FromInt(kAllocationDelta))); __ str(r4, FieldMemOperand(elements, FixedArray::kLengthOffset)); @@ -2539,7 +2552,7 @@ Handle<Code> CallStubCompiler::CompileFastApiCall( CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4, name, depth, &miss); - GenerateFastApiDirectCall(masm(), optimization, argc); + GenerateFastApiDirectCall(masm(), optimization, argc, false); __ bind(&miss); FreeSpaceForFastApiCall(masm()); @@ -2991,6 +3004,7 @@ void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name, void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm, + Register receiver, Handle<JSFunction> getter) { // ----------- S t a t e ------------- // -- r0 : receiver @@ -3002,7 +3016,7 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm, if (!getter.is_null()) { // Call the JavaScript getter with the receiver on the stack. - __ push(r0); + __ push(receiver); ParameterCount actual(0); ParameterCount expected(getter); __ InvokeFunction(getter, expected, actual, diff --git a/deps/v8/src/array-iterator.js b/deps/v8/src/array-iterator.js index defd7342ab..e734986840 100644 --- a/deps/v8/src/array-iterator.js +++ b/deps/v8/src/array-iterator.js @@ -36,9 +36,9 @@ var ARRAY_ITERATOR_KIND_VALUES = 2; var ARRAY_ITERATOR_KIND_ENTRIES = 3; // The spec draft also has "sparse" but it is never used. -var iteratorObjectSymbol = %CreateSymbol(void 0); -var arrayIteratorNextIndexSymbol = %CreateSymbol(void 0); -var arrayIterationKindSymbol = %CreateSymbol(void 0); +var iteratorObjectSymbol = %CreateSymbol(UNDEFINED); +var arrayIteratorNextIndexSymbol = %CreateSymbol(UNDEFINED); +var arrayIterationKindSymbol = %CreateSymbol(UNDEFINED); function ArrayIterator() {} @@ -74,7 +74,7 @@ function ArrayIteratorNext() { if (index >= length) { iterator[arrayIteratorNextIndexSymbol] = 1 / 0; // Infinity - return CreateIteratorResultObject(void 0, true); + return CreateIteratorResultObject(UNDEFINED, true); } iterator[arrayIteratorNextIndexSymbol] = index + 1; diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js index 5f89ebb7a6..e98d7f5b53 100644 --- a/deps/v8/src/array.js +++ b/deps/v8/src/array.js @@ -399,14 +399,13 @@ function ObservedArrayPop(n) { n--; var value = this[n]; - EnqueueSpliceRecord(this, n, [value], 0); - try { BeginPerformSplice(this); delete this[n]; this.length = n; } finally { EndPerformSplice(this); + EnqueueSpliceRecord(this, n, [value], 0); } return value; @@ -431,7 +430,7 @@ function ArrayPop() { n--; var value = this[n]; - delete this[n]; + Delete(this, ToName(n), true); this.length = n; return value; } @@ -441,8 +440,6 @@ function ObservedArrayPush() { var n = TO_UINT32(this.length); var m = %_ArgumentsLength(); - EnqueueSpliceRecord(this, n, [], m); - try { BeginPerformSplice(this); for (var i = 0; i < m; i++) { @@ -451,6 +448,7 @@ function ObservedArrayPush() { this.length = n + m; } finally { EndPerformSplice(this); + EnqueueSpliceRecord(this, n, [], m); } return this.length; @@ -581,14 +579,13 @@ function ArrayReverse() { function ObservedArrayShift(len) { var first = this[0]; - EnqueueSpliceRecord(this, 0, [first], 0); - try { BeginPerformSplice(this); SimpleMove(this, 0, 1, len, 0); this.length = len - 1; } finally { EndPerformSplice(this); + EnqueueSpliceRecord(this, 0, [first], 0); } return first; @@ -627,8 +624,6 @@ function ObservedArrayUnshift() { var len = TO_UINT32(this.length); var num_arguments = %_ArgumentsLength(); - EnqueueSpliceRecord(this, 0, [], num_arguments); - try { BeginPerformSplice(this); SimpleMove(this, 0, 0, len, num_arguments); @@ -638,6 +633,7 @@ function ObservedArrayUnshift() { this.length = len + num_arguments; } finally { EndPerformSplice(this); + EnqueueSpliceRecord(this, 0, [], num_arguments); } return len + num_arguments; @@ -681,7 +677,7 @@ function ArraySlice(start, end) { var start_i = TO_INTEGER(start); var end_i = len; - if (end !== void 0) end_i = TO_INTEGER(end); + if (!IS_UNDEFINED(end)) end_i = TO_INTEGER(end); if (start_i < 0) { start_i += len; @@ -1020,7 +1016,7 @@ function ArraySort(comparefn) { var proto_length = indices; for (var i = from; i < proto_length; i++) { if (proto.hasOwnProperty(i)) { - obj[i] = void 0; + obj[i] = UNDEFINED; } } } else { @@ -1028,7 +1024,7 @@ function ArraySort(comparefn) { var index = indices[i]; if (!IS_UNDEFINED(index) && from <= index && proto.hasOwnProperty(index)) { - obj[index] = void 0; + obj[index] = UNDEFINED; } } } @@ -1065,7 +1061,7 @@ function ArraySort(comparefn) { if (first_undefined < last_defined) { // Fill in hole or undefined. obj[first_undefined] = obj[last_defined]; - obj[last_defined] = void 0; + obj[last_defined] = UNDEFINED; } } // If there were any undefineds in the entire array, first_undefined @@ -1077,12 +1073,12 @@ function ArraySort(comparefn) { // an undefined should be and vice versa. var i; for (i = first_undefined; i < length - num_holes; i++) { - obj[i] = void 0; + obj[i] = UNDEFINED; } for (i = length - num_holes; i < length; i++) { // For compatability with Webkit, do not expose elements in the prototype. if (i in %GetPrototype(obj)) { - obj[i] = void 0; + obj[i] = UNDEFINED; } else { delete obj[i]; } diff --git a/deps/v8/src/arraybuffer.js b/deps/v8/src/arraybuffer.js index 4a4f570146..c5c98dbe4b 100644 --- a/deps/v8/src/arraybuffer.js +++ b/deps/v8/src/arraybuffer.js @@ -81,6 +81,10 @@ function ArrayBufferSlice(start, end) { return result; } +function ArrayBufferIsView(obj) { + return %ArrayBufferIsView(obj); +} + function SetUpArrayBuffer() { %CheckIsBootstrapping(); @@ -93,6 +97,10 @@ function SetUpArrayBuffer() { InstallGetter($ArrayBuffer.prototype, "byteLength", ArrayBufferGetByteLength); + InstallFunctions($ArrayBuffer, DONT_ENUM, $Array( + "isView", ArrayBufferIsView + )); + InstallFunctions($ArrayBuffer.prototype, DONT_ENUM, $Array( "slice", ArrayBufferSlice )); diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc index fbff62dd65..9ed43601c5 100644 --- a/deps/v8/src/assembler.cc +++ b/deps/v8/src/assembler.cc @@ -98,6 +98,7 @@ struct DoubleConstant BASE_EMBEDDED { double negative_infinity; double canonical_non_hole_nan; double the_hole_nan; + double uint32_bias; }; static DoubleConstant double_constants; @@ -207,6 +208,24 @@ CpuFeatureScope::~CpuFeatureScope() { // ----------------------------------------------------------------------------- +// Implementation of PlatformFeatureScope + +PlatformFeatureScope::PlatformFeatureScope(CpuFeature f) + : old_cross_compile_(CpuFeatures::cross_compile_) { + // CpuFeatures is a global singleton, therefore this is only safe in + // single threaded code. + ASSERT(Serializer::enabled()); + uint64_t mask = static_cast<uint64_t>(1) << f; + CpuFeatures::cross_compile_ |= mask; +} + + +PlatformFeatureScope::~PlatformFeatureScope() { + CpuFeatures::cross_compile_ = old_cross_compile_; +} + + +// ----------------------------------------------------------------------------- // Implementation of Label int Label::pos() const { @@ -890,6 +909,8 @@ void ExternalReference::SetUp() { double_constants.canonical_non_hole_nan = OS::nan_value(); double_constants.the_hole_nan = BitCast<double>(kHoleNanInt64); double_constants.negative_infinity = -V8_INFINITY; + double_constants.uint32_bias = + static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1; math_exp_data_mutex = new Mutex(); } @@ -1067,6 +1088,13 @@ ExternalReference ExternalReference::get_make_code_young_function( } +ExternalReference ExternalReference::get_mark_code_as_executed_function( + Isolate* isolate) { + return ExternalReference(Redirect( + isolate, FUNCTION_ADDR(Code::MarkCodeAsExecuted))); +} + + ExternalReference ExternalReference::date_cache_stamp(Isolate* isolate) { return ExternalReference(isolate->date_cache()->stamp_address()); } @@ -1315,6 +1343,20 @@ ExternalReference ExternalReference::address_of_the_hole_nan() { } +ExternalReference ExternalReference::record_object_allocation_function( + Isolate* isolate) { + return ExternalReference( + Redirect(isolate, + FUNCTION_ADDR(HeapProfiler::RecordObjectAllocationFromMasm))); +} + + +ExternalReference ExternalReference::address_of_uint32_bias() { + return ExternalReference( + reinterpret_cast<void*>(&double_constants.uint32_bias)); +} + + #ifndef V8_INTERPRETED_REGEXP ExternalReference ExternalReference::re_check_stack_guard_state( diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h index 6b399f2082..f0b7fed909 100644 --- a/deps/v8/src/assembler.h +++ b/deps/v8/src/assembler.h @@ -134,6 +134,18 @@ class CpuFeatureScope BASE_EMBEDDED { }; +// Enable a unsupported feature within a scope for cross-compiling for a +// different CPU. +class PlatformFeatureScope BASE_EMBEDDED { + public: + explicit PlatformFeatureScope(CpuFeature f); + ~PlatformFeatureScope(); + + private: + uint64_t old_cross_compile_; +}; + + // ----------------------------------------------------------------------------- // Labels represent pc locations; they are typically jump or call targets. // After declaration, a label can be freely used to denote known or (yet) @@ -389,6 +401,7 @@ class RelocInfo BASE_EMBEDDED { INLINE(Handle<Cell> target_cell_handle()); INLINE(void set_target_cell(Cell* cell, WriteBarrierMode mode = UPDATE_WRITE_BARRIER)); + INLINE(Handle<Object> code_age_stub_handle(Assembler* origin)); INLINE(Code* code_age_stub()); INLINE(void set_code_age_stub(Code* stub)); @@ -715,6 +728,10 @@ class ExternalReference BASE_EMBEDDED { static ExternalReference date_cache_stamp(Isolate* isolate); static ExternalReference get_make_code_young_function(Isolate* isolate); + static ExternalReference get_mark_code_as_executed_function(Isolate* isolate); + + // New heap objects tracking support. + static ExternalReference record_object_allocation_function(Isolate* isolate); // Deoptimization support. static ExternalReference new_deoptimizer_function(Isolate* isolate); @@ -798,6 +815,7 @@ class ExternalReference BASE_EMBEDDED { static ExternalReference address_of_negative_infinity(); static ExternalReference address_of_canonical_non_hole_nan(); static ExternalReference address_of_the_hole_nan(); + static ExternalReference address_of_uint32_bias(); static ExternalReference math_sin_double_function(Isolate* isolate); static ExternalReference math_cos_double_function(Isolate* isolate); diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc index 823dedee09..843f8c8960 100644 --- a/deps/v8/src/ast.cc +++ b/deps/v8/src/ast.cc @@ -82,14 +82,13 @@ bool Expression::IsUndefinedLiteral(Isolate* isolate) { } -VariableProxy::VariableProxy(Isolate* isolate, Variable* var) - : Expression(isolate), +VariableProxy::VariableProxy(Isolate* isolate, Variable* var, int position) + : Expression(isolate, position), name_(var->name()), var_(NULL), // Will be set by the call to BindTo. is_this_(var->is_this()), is_trivial_(false), is_lvalue_(false), - position_(RelocInfo::kNoPosition), interface_(var->interface()) { BindTo(var); } @@ -100,13 +99,12 @@ VariableProxy::VariableProxy(Isolate* isolate, bool is_this, Interface* interface, int position) - : Expression(isolate), + : Expression(isolate, position), name_(name), var_(NULL), is_this_(is_this), is_trivial_(false), is_lvalue_(false), - position_(position), interface_(interface) { // Names must be canonicalized for fast equality checks. ASSERT(name->IsInternalizedString()); @@ -133,15 +131,15 @@ Assignment::Assignment(Isolate* isolate, Expression* target, Expression* value, int pos) - : Expression(isolate), + : Expression(isolate, pos), op_(op), target_(target), value_(value), - pos_(pos), binary_operation_(NULL), assignment_id_(GetNextId(isolate)), is_monomorphic_(false), is_uninitialized_(false), + is_pre_monomorphic_(false), store_mode_(STANDARD_STORE) { } @@ -234,33 +232,6 @@ bool ObjectLiteral::Property::emit_store() { } -bool IsEqualString(void* first, void* second) { - ASSERT((*reinterpret_cast<String**>(first))->IsString()); - ASSERT((*reinterpret_cast<String**>(second))->IsString()); - Handle<String> h1(reinterpret_cast<String**>(first)); - Handle<String> h2(reinterpret_cast<String**>(second)); - return (*h1)->Equals(*h2); -} - - -bool IsEqualNumber(void* first, void* second) { - ASSERT((*reinterpret_cast<Object**>(first))->IsNumber()); - ASSERT((*reinterpret_cast<Object**>(second))->IsNumber()); - - Handle<Object> h1(reinterpret_cast<Object**>(first)); - Handle<Object> h2(reinterpret_cast<Object**>(second)); - if (h1->IsSmi()) { - return h2->IsSmi() && *h1 == *h2; - } - if (h2->IsSmi()) return false; - Handle<HeapNumber> n1 = Handle<HeapNumber>::cast(h1); - Handle<HeapNumber> n2 = Handle<HeapNumber>::cast(h2); - ASSERT(std::isfinite(n1->value())); - ASSERT(std::isfinite(n2->value())); - return n1->value() == n2->value(); -} - - void ObjectLiteral::CalculateEmitStore(Zone* zone) { ZoneAllocationPolicy allocator(zone); @@ -456,14 +427,13 @@ void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle, is_uninitialized_ = oracle->LoadIsUninitialized(this); if (is_uninitialized_) return; + is_pre_monomorphic_ = oracle->LoadIsPreMonomorphic(this); is_monomorphic_ = oracle->LoadIsMonomorphicNormal(this); + ASSERT(!is_pre_monomorphic_ || !is_monomorphic_); receiver_types_.Clear(); if (key()->IsPropertyName()) { FunctionPrototypeStub proto_stub(Code::LOAD_IC); - StringLengthStub string_stub(Code::LOAD_IC, false); - if (oracle->LoadIsStub(this, &string_stub)) { - is_string_length_ = true; - } else if (oracle->LoadIsStub(this, &proto_stub)) { + if (oracle->LoadIsStub(this, &proto_stub)) { is_function_prototype_ = true; } else { Literal* lit_key = key()->AsLiteral(); @@ -474,8 +444,7 @@ void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle, } else if (oracle->LoadIsBuiltin(this, Builtins::kKeyedLoadIC_String)) { is_string_access_ = true; } else if (is_monomorphic_) { - receiver_types_.Add(oracle->LoadMonomorphicReceiverType(this), - zone); + receiver_types_.Add(oracle->LoadMonomorphicReceiverType(this), zone); } else if (oracle->LoadIsPolymorphic(this)) { receiver_types_.Reserve(kMaxKeyedPolymorphism, zone); oracle->CollectKeyedReceiverTypes(PropertyFeedbackId(), &receiver_types_); @@ -490,7 +459,10 @@ void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle, TypeFeedbackId id = AssignmentFeedbackId(); is_uninitialized_ = oracle->StoreIsUninitialized(id); if (is_uninitialized_) return; + + is_pre_monomorphic_ = oracle->StoreIsPreMonomorphic(id); is_monomorphic_ = oracle->StoreIsMonomorphicNormal(id); + ASSERT(!is_pre_monomorphic_ || !is_monomorphic_); receiver_types_.Clear(); if (prop->key()->IsPropertyName()) { Literal* lit_key = prop->key()->AsLiteral(); @@ -655,7 +627,7 @@ void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle, holder_ = GetPrototypeForPrimitiveCheck(check_type_, oracle->isolate()); receiver_types_.Add(handle(holder_->map()), oracle->zone()); } -#ifdef DEBUG +#ifdef ENABLE_SLOW_ASSERTS if (FLAG_enable_slow_asserts) { int length = receiver_types_.length(); for (int i = 0; i < length; i++) { @@ -1067,9 +1039,9 @@ CaseClause::CaseClause(Isolate* isolate, Expression* label, ZoneList<Statement*>* statements, int pos) - : label_(label), + : AstNode(pos), + label_(label), statements_(statements), - position_(pos), compare_type_(Type::None(), isolate), compare_id_(AstNode::GetNextId(isolate)), entry_id_(AstNode::GetNextId(isolate)) { @@ -1111,6 +1083,7 @@ REGULAR_NODE(ContinueStatement) REGULAR_NODE(BreakStatement) REGULAR_NODE(ReturnStatement) REGULAR_NODE(SwitchStatement) +REGULAR_NODE(CaseClause) REGULAR_NODE(Conditional) REGULAR_NODE(Literal) REGULAR_NODE(ArrayLiteral) @@ -1146,7 +1119,7 @@ DONT_OPTIMIZE_NODE(WithStatement) DONT_OPTIMIZE_NODE(TryCatchStatement) DONT_OPTIMIZE_NODE(TryFinallyStatement) DONT_OPTIMIZE_NODE(DebuggerStatement) -DONT_OPTIMIZE_NODE(SharedFunctionInfoLiteral) +DONT_OPTIMIZE_NODE(NativeFunctionLiteral) DONT_SELFOPTIMIZE_NODE(DoWhileStatement) DONT_SELFOPTIMIZE_NODE(WhileStatement) diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h index c63090687b..b4f7348eee 100644 --- a/deps/v8/src/ast.h +++ b/deps/v8/src/ast.h @@ -97,7 +97,7 @@ namespace internal { #define EXPRESSION_NODE_LIST(V) \ V(FunctionLiteral) \ - V(SharedFunctionInfoLiteral) \ + V(NativeFunctionLiteral) \ V(Conditional) \ V(VariableProxy) \ V(Literal) \ @@ -117,11 +117,15 @@ namespace internal { V(CompareOperation) \ V(ThisFunction) +#define AUXILIARY_NODE_LIST(V) \ + V(CaseClause) + #define AST_NODE_LIST(V) \ DECLARATION_NODE_LIST(V) \ MODULE_NODE_LIST(V) \ STATEMENT_NODE_LIST(V) \ - EXPRESSION_NODE_LIST(V) + EXPRESSION_NODE_LIST(V) \ + AUXILIARY_NODE_LIST(V) // Forward declarations class AstConstructionVisitor; @@ -206,12 +210,12 @@ class AstNode: public ZoneObject { return zone->New(static_cast<int>(size)); } - AstNode() {} - + explicit AstNode(int position): position_(position) {} virtual ~AstNode() {} virtual void Accept(AstVisitor* v) = 0; virtual NodeType node_type() const = 0; + int position() const { return position_; } // Type testing & conversion functions overridden by concrete subclasses. #define DECLARE_NODE_FUNCTIONS(type) \ @@ -248,21 +252,17 @@ class AstNode: public ZoneObject { void* operator new(size_t size); friend class CaseClause; // Generates AST IDs. + + int position_; }; class Statement : public AstNode { public: - Statement() : statement_pos_(RelocInfo::kNoPosition) {} + explicit Statement(int position) : AstNode(position) {} bool IsEmpty() { return AsEmptyStatement() != NULL; } virtual bool IsJump() const { return false; } - - void set_statement_pos(int statement_pos) { statement_pos_ = statement_pos; } - int statement_pos() const { return statement_pos_; } - - private: - int statement_pos_; }; @@ -329,11 +329,6 @@ class Expression : public AstNode { kTest }; - virtual int position() const { - UNREACHABLE(); - return 0; - } - virtual bool IsValidLeftHandSide() { return false; } // Helpers for ToBoolean conversion. @@ -387,8 +382,9 @@ class Expression : public AstNode { TypeFeedbackId test_id() const { return test_id_; } protected: - explicit Expression(Isolate* isolate) - : bounds_(Bounds::Unbounded(isolate)), + Expression(Isolate* isolate, int pos) + : AstNode(pos), + bounds_(Bounds::Unbounded(isolate)), id_(GetNextId(isolate)), test_id_(GetNextId(isolate)) {} void set_to_boolean_types(byte types) { to_boolean_types_ = types; } @@ -431,8 +427,10 @@ class BreakableStatement : public Statement { protected: BreakableStatement( - Isolate* isolate, ZoneStringList* labels, BreakableType breakable_type) - : labels_(labels), + Isolate* isolate, ZoneStringList* labels, + BreakableType breakable_type, int position) + : Statement(position), + labels_(labels), breakable_type_(breakable_type), entry_id_(GetNextId(isolate)), exit_id_(GetNextId(isolate)) { @@ -473,8 +471,9 @@ class Block V8_FINAL : public BreakableStatement { ZoneStringList* labels, int capacity, bool is_initializer_block, + int pos, Zone* zone) - : BreakableStatement(isolate, labels, TARGET_FOR_NAMED_ONLY), + : BreakableStatement(isolate, labels, TARGET_FOR_NAMED_ONLY, pos), statements_(capacity, zone), is_initializer_block_(is_initializer_block), scope_(NULL) { @@ -498,8 +497,10 @@ class Declaration : public AstNode { protected: Declaration(VariableProxy* proxy, VariableMode mode, - Scope* scope) - : proxy_(proxy), + Scope* scope, + int pos) + : AstNode(pos), + proxy_(proxy), mode_(mode), scope_(scope) { ASSERT(IsDeclaredVariableMode(mode)); @@ -525,8 +526,9 @@ class VariableDeclaration V8_FINAL : public Declaration { protected: VariableDeclaration(VariableProxy* proxy, VariableMode mode, - Scope* scope) - : Declaration(proxy, mode, scope) { + Scope* scope, + int pos) + : Declaration(proxy, mode, scope, pos) { } }; @@ -545,8 +547,9 @@ class FunctionDeclaration V8_FINAL : public Declaration { FunctionDeclaration(VariableProxy* proxy, VariableMode mode, FunctionLiteral* fun, - Scope* scope) - : Declaration(proxy, mode, scope), + Scope* scope, + int pos) + : Declaration(proxy, mode, scope, pos), fun_(fun) { // At the moment there are no "const functions" in JavaScript... ASSERT(mode == VAR || mode == LET); @@ -570,8 +573,9 @@ class ModuleDeclaration V8_FINAL : public Declaration { protected: ModuleDeclaration(VariableProxy* proxy, Module* module, - Scope* scope) - : Declaration(proxy, MODULE, scope), + Scope* scope, + int pos) + : Declaration(proxy, MODULE, scope, pos), module_(module) { } @@ -592,8 +596,9 @@ class ImportDeclaration V8_FINAL : public Declaration { protected: ImportDeclaration(VariableProxy* proxy, Module* module, - Scope* scope) - : Declaration(proxy, LET, scope), + Scope* scope, + int pos) + : Declaration(proxy, LET, scope, pos), module_(module) { } @@ -611,8 +616,8 @@ class ExportDeclaration V8_FINAL : public Declaration { } protected: - ExportDeclaration(VariableProxy* proxy, Scope* scope) - : Declaration(proxy, LET, scope) {} + ExportDeclaration(VariableProxy* proxy, Scope* scope, int pos) + : Declaration(proxy, LET, scope, pos) {} }; @@ -622,11 +627,13 @@ class Module : public AstNode { Block* body() const { return body_; } protected: - explicit Module(Zone* zone) - : interface_(Interface::NewModule(zone)), + Module(Zone* zone, int pos) + : AstNode(pos), + interface_(Interface::NewModule(zone)), body_(NULL) {} - explicit Module(Interface* interface, Block* body = NULL) - : interface_(interface), + Module(Interface* interface, int pos, Block* body = NULL) + : AstNode(pos), + interface_(interface), body_(body) {} private: @@ -640,7 +647,8 @@ class ModuleLiteral V8_FINAL : public Module { DECLARE_NODE_TYPE(ModuleLiteral) protected: - ModuleLiteral(Block* body, Interface* interface) : Module(interface, body) {} + ModuleLiteral(Block* body, Interface* interface, int pos) + : Module(interface, pos, body) {} }; @@ -651,7 +659,7 @@ class ModuleVariable V8_FINAL : public Module { VariableProxy* proxy() const { return proxy_; } protected: - inline explicit ModuleVariable(VariableProxy* proxy); + inline ModuleVariable(VariableProxy* proxy, int pos); private: VariableProxy* proxy_; @@ -666,8 +674,8 @@ class ModulePath V8_FINAL : public Module { Handle<String> name() const { return name_; } protected: - ModulePath(Module* module, Handle<String> name, Zone* zone) - : Module(zone), + ModulePath(Module* module, Handle<String> name, Zone* zone, int pos) + : Module(zone, pos), module_(module), name_(name) { } @@ -685,8 +693,8 @@ class ModuleUrl V8_FINAL : public Module { Handle<String> url() const { return url_; } protected: - ModuleUrl(Handle<String> url, Zone* zone) - : Module(zone), url_(url) { + ModuleUrl(Handle<String> url, Zone* zone, int pos) + : Module(zone, pos), url_(url) { } private: @@ -702,8 +710,9 @@ class ModuleStatement V8_FINAL : public Statement { Block* body() const { return body_; } protected: - ModuleStatement(VariableProxy* proxy, Block* body) - : proxy_(proxy), + ModuleStatement(VariableProxy* proxy, Block* body, int pos) + : Statement(pos), + proxy_(proxy), body_(body) { } @@ -730,8 +739,8 @@ class IterationStatement : public BreakableStatement { Label* continue_target() { return &continue_target_; } protected: - IterationStatement(Isolate* isolate, ZoneStringList* labels) - : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS), + IterationStatement(Isolate* isolate, ZoneStringList* labels, int pos) + : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS, pos), body_(NULL), osr_entry_id_(GetNextId(isolate)) { } @@ -759,20 +768,14 @@ class DoWhileStatement V8_FINAL : public IterationStatement { Expression* cond() const { return cond_; } - // Position where condition expression starts. We need it to make - // the loop's condition a breakable location. - int condition_position() { return condition_position_; } - void set_condition_position(int pos) { condition_position_ = pos; } - virtual BailoutId ContinueId() const V8_OVERRIDE { return continue_id_; } virtual BailoutId StackCheckId() const V8_OVERRIDE { return back_edge_id_; } BailoutId BackEdgeId() const { return back_edge_id_; } protected: - DoWhileStatement(Isolate* isolate, ZoneStringList* labels) - : IterationStatement(isolate, labels), + DoWhileStatement(Isolate* isolate, ZoneStringList* labels, int pos) + : IterationStatement(isolate, labels, pos), cond_(NULL), - condition_position_(-1), continue_id_(GetNextId(isolate)), back_edge_id_(GetNextId(isolate)) { } @@ -780,8 +783,6 @@ class DoWhileStatement V8_FINAL : public IterationStatement { private: Expression* cond_; - int condition_position_; - const BailoutId continue_id_; const BailoutId back_edge_id_; }; @@ -809,8 +810,8 @@ class WhileStatement V8_FINAL : public IterationStatement { BailoutId BodyId() const { return body_id_; } protected: - WhileStatement(Isolate* isolate, ZoneStringList* labels) - : IterationStatement(isolate, labels), + WhileStatement(Isolate* isolate, ZoneStringList* labels, int pos) + : IterationStatement(isolate, labels, pos), cond_(NULL), may_have_function_literal_(true), body_id_(GetNextId(isolate)) { @@ -860,8 +861,8 @@ class ForStatement V8_FINAL : public IterationStatement { void set_loop_variable(Variable* var) { loop_variable_ = var; } protected: - ForStatement(Isolate* isolate, ZoneStringList* labels) - : IterationStatement(isolate, labels), + ForStatement(Isolate* isolate, ZoneStringList* labels, int pos) + : IterationStatement(isolate, labels, pos), init_(NULL), cond_(NULL), next_(NULL), @@ -902,8 +903,8 @@ class ForEachStatement : public IterationStatement { Expression* subject() const { return subject_; } protected: - ForEachStatement(Isolate* isolate, ZoneStringList* labels) - : IterationStatement(isolate, labels), + ForEachStatement(Isolate* isolate, ZoneStringList* labels, int pos) + : IterationStatement(isolate, labels, pos), each_(NULL), subject_(NULL) { } @@ -933,8 +934,8 @@ class ForInStatement V8_FINAL : public ForEachStatement { virtual BailoutId StackCheckId() const V8_OVERRIDE { return body_id_; } protected: - ForInStatement(Isolate* isolate, ZoneStringList* labels) - : ForEachStatement(isolate, labels), + ForInStatement(Isolate* isolate, ZoneStringList* labels, int pos) + : ForEachStatement(isolate, labels, pos), for_in_type_(SLOW_FOR_IN), body_id_(GetNextId(isolate)), prepare_id_(GetNextId(isolate)) { @@ -994,8 +995,8 @@ class ForOfStatement V8_FINAL : public ForEachStatement { BailoutId BackEdgeId() const { return back_edge_id_; } protected: - ForOfStatement(Isolate* isolate, ZoneStringList* labels) - : ForEachStatement(isolate, labels), + ForOfStatement(Isolate* isolate, ZoneStringList* labels, int pos) + : ForEachStatement(isolate, labels, pos), assign_iterator_(NULL), next_result_(NULL), result_done_(NULL), @@ -1020,8 +1021,8 @@ class ExpressionStatement V8_FINAL : public Statement { virtual bool IsJump() const V8_OVERRIDE { return expression_->IsThrow(); } protected: - explicit ExpressionStatement(Expression* expression) - : expression_(expression) { } + ExpressionStatement(Expression* expression, int pos) + : Statement(pos), expression_(expression) { } private: Expression* expression_; @@ -1033,7 +1034,7 @@ class JumpStatement : public Statement { virtual bool IsJump() const V8_FINAL V8_OVERRIDE { return true; } protected: - JumpStatement() {} + explicit JumpStatement(int pos) : Statement(pos) {} }; @@ -1044,8 +1045,8 @@ class ContinueStatement V8_FINAL : public JumpStatement { IterationStatement* target() const { return target_; } protected: - explicit ContinueStatement(IterationStatement* target) - : target_(target) { } + explicit ContinueStatement(IterationStatement* target, int pos) + : JumpStatement(pos), target_(target) { } private: IterationStatement* target_; @@ -1059,8 +1060,8 @@ class BreakStatement V8_FINAL : public JumpStatement { BreakableStatement* target() const { return target_; } protected: - explicit BreakStatement(BreakableStatement* target) - : target_(target) { } + explicit BreakStatement(BreakableStatement* target, int pos) + : JumpStatement(pos), target_(target) { } private: BreakableStatement* target_; @@ -1074,8 +1075,8 @@ class ReturnStatement V8_FINAL : public JumpStatement { Expression* expression() const { return expression_; } protected: - explicit ReturnStatement(Expression* expression) - : expression_(expression) { } + explicit ReturnStatement(Expression* expression, int pos) + : JumpStatement(pos), expression_(expression) { } private: Expression* expression_; @@ -1091,8 +1092,10 @@ class WithStatement V8_FINAL : public Statement { Statement* statement() const { return statement_; } protected: - WithStatement(Scope* scope, Expression* expression, Statement* statement) - : scope_(scope), + WithStatement( + Scope* scope, Expression* expression, Statement* statement, int pos) + : Statement(pos), + scope_(scope), expression_(expression), statement_(statement) { } @@ -1103,12 +1106,9 @@ class WithStatement V8_FINAL : public Statement { }; -class CaseClause V8_FINAL : public ZoneObject { +class CaseClause V8_FINAL : public AstNode { public: - CaseClause(Isolate* isolate, - Expression* label, - ZoneList<Statement*>* statements, - int pos); + DECLARE_NODE_TYPE(CaseClause) bool is_default() const { return label_ == NULL; } Expression* label() const { @@ -1118,9 +1118,6 @@ class CaseClause V8_FINAL : public ZoneObject { Label* body_target() { return &body_target_; } ZoneList<Statement*>* statements() const { return statements_; } - int position() const { return position_; } - void set_position(int pos) { position_ = pos; } - BailoutId EntryId() const { return entry_id_; } // Type feedback information. @@ -1129,10 +1126,14 @@ class CaseClause V8_FINAL : public ZoneObject { Handle<Type> compare_type() { return compare_type_; } private: + CaseClause(Isolate* isolate, + Expression* label, + ZoneList<Statement*>* statements, + int pos); + Expression* label_; Label body_target_; ZoneList<Statement*>* statements_; - int position_; Handle<Type> compare_type_; const TypeFeedbackId compare_id_; @@ -1158,8 +1159,8 @@ class SwitchStatement V8_FINAL : public BreakableStatement { void set_switch_type(SwitchType switch_type) { switch_type_ = switch_type; } protected: - SwitchStatement(Isolate* isolate, ZoneStringList* labels) - : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS), + SwitchStatement(Isolate* isolate, ZoneStringList* labels, int pos) + : BreakableStatement(isolate, labels, TARGET_FOR_ANONYMOUS, pos), tag_(NULL), cases_(NULL) { } @@ -1199,8 +1200,10 @@ class IfStatement V8_FINAL : public Statement { IfStatement(Isolate* isolate, Expression* condition, Statement* then_statement, - Statement* else_statement) - : condition_(condition), + Statement* else_statement, + int pos) + : Statement(pos), + condition_(condition), then_statement_(then_statement), else_statement_(else_statement), if_id_(GetNextId(isolate)), @@ -1222,7 +1225,8 @@ class IfStatement V8_FINAL : public Statement { // stack in the compiler; this should probably be reworked. class TargetCollector V8_FINAL : public AstNode { public: - explicit TargetCollector(Zone* zone) : targets_(0, zone) { } + explicit TargetCollector(Zone* zone) + : AstNode(RelocInfo::kNoPosition), targets_(0, zone) { } // Adds a jump target to the collector. The collector stores a pointer not // a copy of the target to make binding work, so make sure not to pass in @@ -1252,8 +1256,9 @@ class TryStatement : public Statement { ZoneList<Label*>* escaping_targets() const { return escaping_targets_; } protected: - TryStatement(int index, Block* try_block) - : index_(index), + TryStatement(int index, Block* try_block, int pos) + : Statement(pos), + index_(index), try_block_(try_block), escaping_targets_(NULL) { } @@ -1279,8 +1284,9 @@ class TryCatchStatement V8_FINAL : public TryStatement { Block* try_block, Scope* scope, Variable* variable, - Block* catch_block) - : TryStatement(index, try_block), + Block* catch_block, + int pos) + : TryStatement(index, try_block, pos), scope_(scope), variable_(variable), catch_block_(catch_block) { @@ -1300,8 +1306,9 @@ class TryFinallyStatement V8_FINAL : public TryStatement { Block* finally_block() const { return finally_block_; } protected: - TryFinallyStatement(int index, Block* try_block, Block* finally_block) - : TryStatement(index, try_block), + TryFinallyStatement( + int index, Block* try_block, Block* finally_block, int pos) + : TryStatement(index, try_block, pos), finally_block_(finally_block) { } private: @@ -1314,7 +1321,7 @@ class DebuggerStatement V8_FINAL : public Statement { DECLARE_NODE_TYPE(DebuggerStatement) protected: - DebuggerStatement() {} + explicit DebuggerStatement(int pos): Statement(pos) {} }; @@ -1323,7 +1330,7 @@ class EmptyStatement V8_FINAL : public Statement { DECLARE_NODE_TYPE(EmptyStatement) protected: - EmptyStatement() {} + explicit EmptyStatement(int pos): Statement(pos) {} }; @@ -1380,8 +1387,9 @@ class Literal V8_FINAL : public Expression { TypeFeedbackId LiteralFeedbackId() const { return reuse(id()); } protected: - Literal(Isolate* isolate, Handle<Object> value) - : Expression(isolate), + Literal( + Isolate* isolate, Handle<Object> value, int position) + : Expression(isolate, position), value_(value), isolate_(isolate) { } @@ -1411,8 +1419,9 @@ class MaterializedLiteral : public Expression { MaterializedLiteral(Isolate* isolate, int literal_index, bool is_simple, - int depth) - : Expression(isolate), + int depth, + int pos) + : Expression(isolate, pos), literal_index_(literal_index), is_simple_(is_simple), depth_(depth) {} @@ -1510,8 +1519,9 @@ class ObjectLiteral V8_FINAL : public MaterializedLiteral { bool fast_elements, int depth, bool may_store_doubles, - bool has_function) - : MaterializedLiteral(isolate, literal_index, is_simple, depth), + bool has_function, + int pos) + : MaterializedLiteral(isolate, literal_index, is_simple, depth, pos), constant_properties_(constant_properties), properties_(properties), fast_elements_(fast_elements), @@ -1539,8 +1549,9 @@ class RegExpLiteral V8_FINAL : public MaterializedLiteral { RegExpLiteral(Isolate* isolate, Handle<String> pattern, Handle<String> flags, - int literal_index) - : MaterializedLiteral(isolate, literal_index, false, 1), + int literal_index, + int pos) + : MaterializedLiteral(isolate, literal_index, false, 1, pos), pattern_(pattern), flags_(flags) {} @@ -1549,6 +1560,7 @@ class RegExpLiteral V8_FINAL : public MaterializedLiteral { Handle<String> flags_; }; + // An array literal has a literals object that is used // for minimizing the work when constructing it at runtime. class ArrayLiteral V8_FINAL : public MaterializedLiteral { @@ -1569,8 +1581,9 @@ class ArrayLiteral V8_FINAL : public MaterializedLiteral { ZoneList<Expression*>* values, int literal_index, bool is_simple, - int depth) - : MaterializedLiteral(isolate, literal_index, is_simple, depth), + int depth, + int pos) + : MaterializedLiteral(isolate, literal_index, is_simple, depth, pos), constant_elements_(constant_elements), values_(values), first_element_id_(ReserveIdRange(isolate, values->length())) {} @@ -1603,7 +1616,6 @@ class VariableProxy V8_FINAL : public Expression { Handle<String> name() const { return name_; } Variable* var() const { return var_; } bool is_this() const { return is_this_; } - int position() const { return position_; } Interface* interface() const { return interface_; } @@ -1614,7 +1626,7 @@ class VariableProxy V8_FINAL : public Expression { void BindTo(Variable* var); protected: - VariableProxy(Isolate* isolate, Variable* var); + VariableProxy(Isolate* isolate, Variable* var, int position); VariableProxy(Isolate* isolate, Handle<String> name, @@ -1629,7 +1641,6 @@ class VariableProxy V8_FINAL : public Expression { // True if this variable proxy is being used in an assignment // or with a increment/decrement operator. bool is_lvalue_; - int position_; Interface* interface_; }; @@ -1642,11 +1653,9 @@ class Property V8_FINAL : public Expression { Expression* obj() const { return obj_; } Expression* key() const { return key_; } - virtual int position() const V8_OVERRIDE { return pos_; } BailoutId LoadId() const { return load_id_; } - bool IsStringLength() const { return is_string_length_; } bool IsStringAccess() const { return is_string_access_; } bool IsFunctionPrototype() const { return is_function_prototype_; } @@ -1660,6 +1669,10 @@ class Property V8_FINAL : public Expression { return STANDARD_STORE; } bool IsUninitialized() { return is_uninitialized_; } + bool IsPreMonomorphic() { return is_pre_monomorphic_; } + bool HasNoTypeInformation() { + return is_uninitialized_ || is_pre_monomorphic_; + } TypeFeedbackId PropertyFeedbackId() { return reuse(id()); } protected: @@ -1667,27 +1680,25 @@ class Property V8_FINAL : public Expression { Expression* obj, Expression* key, int pos) - : Expression(isolate), + : Expression(isolate, pos), obj_(obj), key_(key), - pos_(pos), load_id_(GetNextId(isolate)), is_monomorphic_(false), + is_pre_monomorphic_(false), is_uninitialized_(false), - is_string_length_(false), is_string_access_(false), is_function_prototype_(false) { } private: Expression* obj_; Expression* key_; - int pos_; const BailoutId load_id_; SmallMapList receiver_types_; bool is_monomorphic_ : 1; + bool is_pre_monomorphic_ : 1; bool is_uninitialized_ : 1; - bool is_string_length_ : 1; bool is_string_access_ : 1; bool is_function_prototype_ : 1; }; @@ -1699,7 +1710,6 @@ class Call V8_FINAL : public Expression { Expression* expression() const { return expression_; } ZoneList<Expression*>* arguments() const { return arguments_; } - virtual int position() const V8_FINAL { return pos_; } // Type feedback information. TypeFeedbackId CallFeedbackId() const { return reuse(id()); } @@ -1754,10 +1764,9 @@ class Call V8_FINAL : public Expression { Expression* expression, ZoneList<Expression*>* arguments, int pos) - : Expression(isolate), + : Expression(isolate, pos), expression_(expression), arguments_(arguments), - pos_(pos), is_monomorphic_(false), check_type_(RECEIVER_MAP_CHECK), return_id_(GetNextId(isolate)) { } @@ -1765,7 +1774,6 @@ class Call V8_FINAL : public Expression { private: Expression* expression_; ZoneList<Expression*>* arguments_; - int pos_; bool is_monomorphic_; CheckType check_type_; @@ -1784,7 +1792,6 @@ class CallNew V8_FINAL : public Expression { Expression* expression() const { return expression_; } ZoneList<Expression*>* arguments() const { return arguments_; } - virtual int position() const V8_OVERRIDE { return pos_; } // Type feedback information. TypeFeedbackId CallNewFeedbackId() const { return reuse(id()); } @@ -1803,10 +1810,9 @@ class CallNew V8_FINAL : public Expression { Expression* expression, ZoneList<Expression*>* arguments, int pos) - : Expression(isolate), + : Expression(isolate, pos), expression_(expression), arguments_(arguments), - pos_(pos), is_monomorphic_(false), elements_kind_(GetInitialFastElementsKind()), return_id_(GetNextId(isolate)) { } @@ -1814,7 +1820,6 @@ class CallNew V8_FINAL : public Expression { private: Expression* expression_; ZoneList<Expression*>* arguments_; - int pos_; bool is_monomorphic_; Handle<JSFunction> target_; @@ -1844,8 +1849,9 @@ class CallRuntime V8_FINAL : public Expression { CallRuntime(Isolate* isolate, Handle<String> name, const Runtime::Function* function, - ZoneList<Expression*>* arguments) - : Expression(isolate), + ZoneList<Expression*>* arguments, + int pos) + : Expression(isolate, pos), name_(name), function_(function), arguments_(arguments) { } @@ -1863,7 +1869,6 @@ class UnaryOperation V8_FINAL : public Expression { Token::Value op() const { return op_; } Expression* expression() const { return expression_; } - virtual int position() const V8_OVERRIDE { return pos_; } BailoutId MaterializeTrueId() { return materialize_true_id_; } BailoutId MaterializeFalseId() { return materialize_false_id_; } @@ -1876,10 +1881,9 @@ class UnaryOperation V8_FINAL : public Expression { Token::Value op, Expression* expression, int pos) - : Expression(isolate), + : Expression(isolate, pos), op_(op), expression_(expression), - pos_(pos), materialize_true_id_(GetNextId(isolate)), materialize_false_id_(GetNextId(isolate)) { ASSERT(Token::IsUnaryOp(op)); @@ -1888,7 +1892,6 @@ class UnaryOperation V8_FINAL : public Expression { private: Token::Value op_; Expression* expression_; - int pos_; // For unary not (Token::NOT), the AST ids where true and false will // actually be materialized, respectively. @@ -1906,7 +1909,6 @@ class BinaryOperation V8_FINAL : public Expression { Token::Value op() const { return op_; } Expression* left() const { return left_; } Expression* right() const { return right_; } - virtual int position() const V8_OVERRIDE { return pos_; } BailoutId RightId() const { return right_id_; } @@ -1923,11 +1925,10 @@ class BinaryOperation V8_FINAL : public Expression { Expression* left, Expression* right, int pos) - : Expression(isolate), + : Expression(isolate, pos), op_(op), left_(left), right_(right), - pos_(pos), right_id_(GetNextId(isolate)) { ASSERT(Token::IsBinaryOp(op)); } @@ -1936,7 +1937,6 @@ class BinaryOperation V8_FINAL : public Expression { Token::Value op_; Expression* left_; Expression* right_; - int pos_; // TODO(rossberg): the fixed arg should probably be represented as a Constant // type for the RHS. @@ -1961,7 +1961,6 @@ class CountOperation V8_FINAL : public Expression { } Expression* expression() const { return expression_; } - virtual int position() const V8_OVERRIDE { return pos_; } void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* zone); virtual bool IsMonomorphic() V8_OVERRIDE { return is_monomorphic_; } @@ -1971,7 +1970,7 @@ class CountOperation V8_FINAL : public Expression { virtual KeyedAccessStoreMode GetStoreMode() V8_OVERRIDE { return store_mode_; } - TypeInfo type() const { return type_; } + Handle<Type> type() const { return type_; } BailoutId AssignmentId() const { return assignment_id_; } @@ -1984,13 +1983,12 @@ class CountOperation V8_FINAL : public Expression { bool is_prefix, Expression* expr, int pos) - : Expression(isolate), + : Expression(isolate, pos), op_(op), is_prefix_(is_prefix), is_monomorphic_(false), store_mode_(STANDARD_STORE), expression_(expr), - pos_(pos), assignment_id_(GetNextId(isolate)), count_id_(GetNextId(isolate)) {} @@ -2000,10 +1998,9 @@ class CountOperation V8_FINAL : public Expression { bool is_monomorphic_ : 1; KeyedAccessStoreMode store_mode_ : 5; // Windows treats as signed, // must have extra bit. - TypeInfo type_; + Handle<Type> type_; Expression* expression_; - int pos_; const BailoutId assignment_id_; const TypeFeedbackId count_id_; SmallMapList receiver_types_; @@ -2017,7 +2014,6 @@ class CompareOperation V8_FINAL : public Expression { Token::Value op() const { return op_; } Expression* left() const { return left_; } Expression* right() const { return right_; } - virtual int position() const V8_OVERRIDE { return pos_; } // Type feedback information. TypeFeedbackId CompareOperationFeedbackId() const { return reuse(id()); } @@ -2035,12 +2031,11 @@ class CompareOperation V8_FINAL : public Expression { Expression* left, Expression* right, int pos) - : Expression(isolate), + : Expression(isolate, pos), op_(op), left_(left), right_(right), - pos_(pos), - combined_type_(Type::Null(), isolate) { + combined_type_(Type::None(), isolate) { ASSERT(Token::IsCompareOp(op)); } @@ -2048,7 +2043,6 @@ class CompareOperation V8_FINAL : public Expression { Token::Value op_; Expression* left_; Expression* right_; - int pos_; Handle<Type> combined_type_; }; @@ -2062,9 +2056,6 @@ class Conditional V8_FINAL : public Expression { Expression* then_expression() const { return then_expression_; } Expression* else_expression() const { return else_expression_; } - int then_expression_position() const { return then_expression_position_; } - int else_expression_position() const { return else_expression_position_; } - BailoutId ThenId() const { return then_id_; } BailoutId ElseId() const { return else_id_; } @@ -2073,14 +2064,11 @@ class Conditional V8_FINAL : public Expression { Expression* condition, Expression* then_expression, Expression* else_expression, - int then_expression_position, - int else_expression_position) - : Expression(isolate), + int position) + : Expression(isolate, position), condition_(condition), then_expression_(then_expression), else_expression_(else_expression), - then_expression_position_(then_expression_position), - else_expression_position_(else_expression_position), then_id_(GetNextId(isolate)), else_id_(GetNextId(isolate)) { } @@ -2088,8 +2076,6 @@ class Conditional V8_FINAL : public Expression { Expression* condition_; Expression* then_expression_; Expression* else_expression_; - int then_expression_position_; - int else_expression_position_; const BailoutId then_id_; const BailoutId else_id_; }; @@ -2106,7 +2092,6 @@ class Assignment V8_FINAL : public Expression { Token::Value op() const { return op_; } Expression* target() const { return target_; } Expression* value() const { return value_; } - virtual int position() const V8_OVERRIDE { return pos_; } BinaryOperation* binary_operation() const { return binary_operation_; } // This check relies on the definition order of token in token.h. @@ -2119,6 +2104,10 @@ class Assignment V8_FINAL : public Expression { void RecordTypeFeedback(TypeFeedbackOracle* oracle, Zone* zone); virtual bool IsMonomorphic() V8_OVERRIDE { return is_monomorphic_; } bool IsUninitialized() { return is_uninitialized_; } + bool IsPreMonomorphic() { return is_pre_monomorphic_; } + bool HasNoTypeInformation() { + return is_uninitialized_ || is_pre_monomorphic_; + } virtual SmallMapList* GetReceiverTypes() V8_OVERRIDE { return &receiver_types_; } @@ -2137,8 +2126,8 @@ class Assignment V8_FINAL : public Expression { void Init(Isolate* isolate, AstNodeFactory<Visitor>* factory) { ASSERT(Token::IsAssignmentOp(op_)); if (is_compound()) { - binary_operation_ = - factory->NewBinaryOperation(binary_op(), target_, value_, pos_ + 1); + binary_operation_ = factory->NewBinaryOperation( + binary_op(), target_, value_, position() + 1); } } @@ -2146,12 +2135,12 @@ class Assignment V8_FINAL : public Expression { Token::Value op_; Expression* target_; Expression* value_; - int pos_; BinaryOperation* binary_operation_; const BailoutId assignment_id_; bool is_monomorphic_ : 1; bool is_uninitialized_ : 1; + bool is_pre_monomorphic_ : 1; KeyedAccessStoreMode store_mode_ : 5; // Windows treats as signed, // must have extra bit. SmallMapList receiver_types_; @@ -2172,7 +2161,6 @@ class Yield V8_FINAL : public Expression { Expression* generator_object() const { return generator_object_; } Expression* expression() const { return expression_; } Kind yield_kind() const { return yield_kind_; } - virtual int position() const V8_OVERRIDE { return pos_; } // Delegating yield surrounds the "yield" in a "try/catch". This index // locates the catch handler in the handler table, and is equivalent to @@ -2192,19 +2180,17 @@ class Yield V8_FINAL : public Expression { Expression* expression, Kind yield_kind, int pos) - : Expression(isolate), + : Expression(isolate, pos), generator_object_(generator_object), expression_(expression), yield_kind_(yield_kind), - index_(-1), - pos_(pos) { } + index_(-1) { } private: Expression* generator_object_; Expression* expression_; Kind yield_kind_; int index_; - int pos_; }; @@ -2213,15 +2199,13 @@ class Throw V8_FINAL : public Expression { DECLARE_NODE_TYPE(Throw) Expression* exception() const { return exception_; } - virtual int position() const V8_OVERRIDE { return pos_; } protected: Throw(Isolate* isolate, Expression* exception, int pos) - : Expression(isolate), exception_(exception), pos_(pos) {} + : Expression(isolate, pos), exception_(exception) {} private: Expression* exception_; - int pos_; }; @@ -2336,8 +2320,9 @@ class FunctionLiteral V8_FINAL : public Expression { ParameterFlag has_duplicate_parameters, IsFunctionFlag is_function, IsParenthesizedFlag is_parenthesized, - IsGeneratorFlag is_generator) - : Expression(isolate), + IsGeneratorFlag is_generator, + int position) + : Expression(isolate, position), name_(name), scope_(scope), body_(body), @@ -2383,23 +2368,21 @@ class FunctionLiteral V8_FINAL : public Expression { }; -class SharedFunctionInfoLiteral V8_FINAL : public Expression { +class NativeFunctionLiteral V8_FINAL : public Expression { public: - DECLARE_NODE_TYPE(SharedFunctionInfoLiteral) + DECLARE_NODE_TYPE(NativeFunctionLiteral) - Handle<SharedFunctionInfo> shared_function_info() const { - return shared_function_info_; - } + Handle<String> name() const { return name_; } + v8::Extension* extension() const { return extension_; } protected: - SharedFunctionInfoLiteral( - Isolate* isolate, - Handle<SharedFunctionInfo> shared_function_info) - : Expression(isolate), - shared_function_info_(shared_function_info) { } + NativeFunctionLiteral( + Isolate* isolate, Handle<String> name, v8::Extension* extension, int pos) + : Expression(isolate, pos), name_(name), extension_(extension) {} private: - Handle<SharedFunctionInfo> shared_function_info_; + Handle<String> name_; + v8::Extension* extension_; }; @@ -2408,7 +2391,7 @@ class ThisFunction V8_FINAL : public Expression { DECLARE_NODE_TYPE(ThisFunction) protected: - explicit ThisFunction(Isolate* isolate): Expression(isolate) {} + explicit ThisFunction(Isolate* isolate, int pos): Expression(isolate, pos) {} }; #undef DECLARE_NODE_TYPE @@ -2775,8 +2758,8 @@ class RegExpEmpty V8_FINAL : public RegExpTree { // ---------------------------------------------------------------------------- // Out-of-line inline constructors (to side-step cyclic dependencies). -inline ModuleVariable::ModuleVariable(VariableProxy* proxy) - : Module(proxy->interface()), +inline ModuleVariable::ModuleVariable(VariableProxy* proxy, int pos) + : Module(proxy->interface(), pos), proxy_(proxy) { } @@ -2893,75 +2876,81 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED { VariableDeclaration* NewVariableDeclaration(VariableProxy* proxy, VariableMode mode, - Scope* scope) { + Scope* scope, + int pos) { VariableDeclaration* decl = - new(zone_) VariableDeclaration(proxy, mode, scope); + new(zone_) VariableDeclaration(proxy, mode, scope, pos); VISIT_AND_RETURN(VariableDeclaration, decl) } FunctionDeclaration* NewFunctionDeclaration(VariableProxy* proxy, VariableMode mode, FunctionLiteral* fun, - Scope* scope) { + Scope* scope, + int pos) { FunctionDeclaration* decl = - new(zone_) FunctionDeclaration(proxy, mode, fun, scope); + new(zone_) FunctionDeclaration(proxy, mode, fun, scope, pos); VISIT_AND_RETURN(FunctionDeclaration, decl) } ModuleDeclaration* NewModuleDeclaration(VariableProxy* proxy, Module* module, - Scope* scope) { + Scope* scope, + int pos) { ModuleDeclaration* decl = - new(zone_) ModuleDeclaration(proxy, module, scope); + new(zone_) ModuleDeclaration(proxy, module, scope, pos); VISIT_AND_RETURN(ModuleDeclaration, decl) } ImportDeclaration* NewImportDeclaration(VariableProxy* proxy, Module* module, - Scope* scope) { + Scope* scope, + int pos) { ImportDeclaration* decl = - new(zone_) ImportDeclaration(proxy, module, scope); + new(zone_) ImportDeclaration(proxy, module, scope, pos); VISIT_AND_RETURN(ImportDeclaration, decl) } ExportDeclaration* NewExportDeclaration(VariableProxy* proxy, - Scope* scope) { + Scope* scope, + int pos) { ExportDeclaration* decl = - new(zone_) ExportDeclaration(proxy, scope); + new(zone_) ExportDeclaration(proxy, scope, pos); VISIT_AND_RETURN(ExportDeclaration, decl) } - ModuleLiteral* NewModuleLiteral(Block* body, Interface* interface) { - ModuleLiteral* module = new(zone_) ModuleLiteral(body, interface); + ModuleLiteral* NewModuleLiteral(Block* body, Interface* interface, int pos) { + ModuleLiteral* module = new(zone_) ModuleLiteral(body, interface, pos); VISIT_AND_RETURN(ModuleLiteral, module) } - ModuleVariable* NewModuleVariable(VariableProxy* proxy) { - ModuleVariable* module = new(zone_) ModuleVariable(proxy); + ModuleVariable* NewModuleVariable(VariableProxy* proxy, int pos) { + ModuleVariable* module = new(zone_) ModuleVariable(proxy, pos); VISIT_AND_RETURN(ModuleVariable, module) } - ModulePath* NewModulePath(Module* origin, Handle<String> name) { - ModulePath* module = new(zone_) ModulePath(origin, name, zone_); + ModulePath* NewModulePath(Module* origin, Handle<String> name, int pos) { + ModulePath* module = new(zone_) ModulePath(origin, name, zone_, pos); VISIT_AND_RETURN(ModulePath, module) } - ModuleUrl* NewModuleUrl(Handle<String> url) { - ModuleUrl* module = new(zone_) ModuleUrl(url, zone_); + ModuleUrl* NewModuleUrl(Handle<String> url, int pos) { + ModuleUrl* module = new(zone_) ModuleUrl(url, zone_, pos); VISIT_AND_RETURN(ModuleUrl, module) } Block* NewBlock(ZoneStringList* labels, int capacity, - bool is_initializer_block) { + bool is_initializer_block, + int pos) { Block* block = new(zone_) Block( - isolate_, labels, capacity, is_initializer_block, zone_); + isolate_, labels, capacity, is_initializer_block, pos, zone_); VISIT_AND_RETURN(Block, block) } #define STATEMENT_WITH_LABELS(NodeType) \ - NodeType* New##NodeType(ZoneStringList* labels) { \ - NodeType* stmt = new(zone_) NodeType(isolate_, labels); \ + NodeType* New##NodeType(ZoneStringList* labels, int pos) { \ + NodeType* stmt = new(zone_) NodeType(isolate_, labels, pos); \ VISIT_AND_RETURN(NodeType, stmt); \ } STATEMENT_WITH_LABELS(DoWhileStatement) @@ -2971,14 +2960,15 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED { #undef STATEMENT_WITH_LABELS ForEachStatement* NewForEachStatement(ForEachStatement::VisitMode visit_mode, - ZoneStringList* labels) { + ZoneStringList* labels, + int pos) { switch (visit_mode) { case ForEachStatement::ENUMERATE: { - ForInStatement* stmt = new(zone_) ForInStatement(isolate_, labels); + ForInStatement* stmt = new(zone_) ForInStatement(isolate_, labels, pos); VISIT_AND_RETURN(ForInStatement, stmt); } case ForEachStatement::ITERATE: { - ForOfStatement* stmt = new(zone_) ForOfStatement(isolate_, labels); + ForOfStatement* stmt = new(zone_) ForOfStatement(isolate_, labels, pos); VISIT_AND_RETURN(ForOfStatement, stmt); } } @@ -2986,44 +2976,47 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED { return NULL; } - ModuleStatement* NewModuleStatement(VariableProxy* proxy, Block* body) { - ModuleStatement* stmt = new(zone_) ModuleStatement(proxy, body); + ModuleStatement* NewModuleStatement( + VariableProxy* proxy, Block* body, int pos) { + ModuleStatement* stmt = new(zone_) ModuleStatement(proxy, body, pos); VISIT_AND_RETURN(ModuleStatement, stmt) } - ExpressionStatement* NewExpressionStatement(Expression* expression) { - ExpressionStatement* stmt = new(zone_) ExpressionStatement(expression); + ExpressionStatement* NewExpressionStatement(Expression* expression, int pos) { + ExpressionStatement* stmt = new(zone_) ExpressionStatement(expression, pos); VISIT_AND_RETURN(ExpressionStatement, stmt) } - ContinueStatement* NewContinueStatement(IterationStatement* target) { - ContinueStatement* stmt = new(zone_) ContinueStatement(target); + ContinueStatement* NewContinueStatement(IterationStatement* target, int pos) { + ContinueStatement* stmt = new(zone_) ContinueStatement(target, pos); VISIT_AND_RETURN(ContinueStatement, stmt) } - BreakStatement* NewBreakStatement(BreakableStatement* target) { - BreakStatement* stmt = new(zone_) BreakStatement(target); + BreakStatement* NewBreakStatement(BreakableStatement* target, int pos) { + BreakStatement* stmt = new(zone_) BreakStatement(target, pos); VISIT_AND_RETURN(BreakStatement, stmt) } - ReturnStatement* NewReturnStatement(Expression* expression) { - ReturnStatement* stmt = new(zone_) ReturnStatement(expression); + ReturnStatement* NewReturnStatement(Expression* expression, int pos) { + ReturnStatement* stmt = new(zone_) ReturnStatement(expression, pos); VISIT_AND_RETURN(ReturnStatement, stmt) } WithStatement* NewWithStatement(Scope* scope, Expression* expression, - Statement* statement) { + Statement* statement, + int pos) { WithStatement* stmt = new(zone_) WithStatement( - scope, expression, statement); + scope, expression, statement, pos); VISIT_AND_RETURN(WithStatement, stmt) } IfStatement* NewIfStatement(Expression* condition, Statement* then_statement, - Statement* else_statement) { + Statement* else_statement, + int pos) { IfStatement* stmt = new(zone_) IfStatement( - isolate_, condition, then_statement, else_statement); + isolate_, condition, then_statement, else_statement, pos); VISIT_AND_RETURN(IfStatement, stmt) } @@ -3031,36 +3024,45 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED { Block* try_block, Scope* scope, Variable* variable, - Block* catch_block) { + Block* catch_block, + int pos) { TryCatchStatement* stmt = new(zone_) TryCatchStatement( - index, try_block, scope, variable, catch_block); + index, try_block, scope, variable, catch_block, pos); VISIT_AND_RETURN(TryCatchStatement, stmt) } TryFinallyStatement* NewTryFinallyStatement(int index, Block* try_block, - Block* finally_block) { + Block* finally_block, + int pos) { TryFinallyStatement* stmt = - new(zone_) TryFinallyStatement(index, try_block, finally_block); + new(zone_) TryFinallyStatement(index, try_block, finally_block, pos); VISIT_AND_RETURN(TryFinallyStatement, stmt) } - DebuggerStatement* NewDebuggerStatement() { - DebuggerStatement* stmt = new(zone_) DebuggerStatement(); + DebuggerStatement* NewDebuggerStatement(int pos) { + DebuggerStatement* stmt = new(zone_) DebuggerStatement(pos); VISIT_AND_RETURN(DebuggerStatement, stmt) } - EmptyStatement* NewEmptyStatement() { - return new(zone_) EmptyStatement(); + EmptyStatement* NewEmptyStatement(int pos) { + return new(zone_) EmptyStatement(pos); + } + + CaseClause* NewCaseClause( + Expression* label, ZoneList<Statement*>* statements, int pos) { + CaseClause* clause = + new(zone_) CaseClause(isolate_, label, statements, pos); + VISIT_AND_RETURN(CaseClause, clause) } - Literal* NewLiteral(Handle<Object> handle) { - Literal* lit = new(zone_) Literal(isolate_, handle); + Literal* NewLiteral(Handle<Object> handle, int pos) { + Literal* lit = new(zone_) Literal(isolate_, handle, pos); VISIT_AND_RETURN(Literal, lit) } - Literal* NewNumberLiteral(double number) { - return NewLiteral(isolate_->factory()->NewNumber(number, TENURED)); + Literal* NewNumberLiteral(double number, int pos) { + return NewLiteral(isolate_->factory()->NewNumber(number, TENURED), pos); } ObjectLiteral* NewObjectLiteral( @@ -3071,26 +3073,29 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED { bool fast_elements, int depth, bool may_store_doubles, - bool has_function) { + bool has_function, + int pos) { ObjectLiteral* lit = new(zone_) ObjectLiteral( isolate_, constant_properties, properties, literal_index, - is_simple, fast_elements, depth, may_store_doubles, has_function); + is_simple, fast_elements, depth, may_store_doubles, has_function, pos); VISIT_AND_RETURN(ObjectLiteral, lit) } ObjectLiteral::Property* NewObjectLiteralProperty(bool is_getter, - FunctionLiteral* value) { + FunctionLiteral* value, + int pos) { ObjectLiteral::Property* prop = new(zone_) ObjectLiteral::Property(is_getter, value); - prop->set_key(NewLiteral(value->name())); + prop->set_key(NewLiteral(value->name(), pos)); return prop; // Not an AST node, will not be visited. } RegExpLiteral* NewRegExpLiteral(Handle<String> pattern, Handle<String> flags, - int literal_index) { + int literal_index, + int pos) { RegExpLiteral* lit = - new(zone_) RegExpLiteral(isolate_, pattern, flags, literal_index); + new(zone_) RegExpLiteral(isolate_, pattern, flags, literal_index, pos); VISIT_AND_RETURN(RegExpLiteral, lit); } @@ -3098,14 +3103,17 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED { ZoneList<Expression*>* values, int literal_index, bool is_simple, - int depth) { + int depth, + int pos) { ArrayLiteral* lit = new(zone_) ArrayLiteral( - isolate_, constant_elements, values, literal_index, is_simple, depth); + isolate_, constant_elements, values, literal_index, is_simple, + depth, pos); VISIT_AND_RETURN(ArrayLiteral, lit) } - VariableProxy* NewVariableProxy(Variable* var) { - VariableProxy* proxy = new(zone_) VariableProxy(isolate_, var); + VariableProxy* NewVariableProxy(Variable* var, + int pos = RelocInfo::kNoPosition) { + VariableProxy* proxy = new(zone_) VariableProxy(isolate_, var, pos); VISIT_AND_RETURN(VariableProxy, proxy) } @@ -3139,9 +3147,10 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED { CallRuntime* NewCallRuntime(Handle<String> name, const Runtime::Function* function, - ZoneList<Expression*>* arguments) { + ZoneList<Expression*>* arguments, + int pos) { CallRuntime* call = - new(zone_) CallRuntime(isolate_, name, function, arguments); + new(zone_) CallRuntime(isolate_, name, function, arguments, pos); VISIT_AND_RETURN(CallRuntime, call) } @@ -3183,11 +3192,9 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED { Conditional* NewConditional(Expression* condition, Expression* then_expression, Expression* else_expression, - int then_expression_position, - int else_expression_position) { + int position) { Conditional* cond = new(zone_) Conditional( - isolate_, condition, then_expression, else_expression, - then_expression_position, else_expression_position); + isolate_, condition, then_expression, else_expression, position); VISIT_AND_RETURN(Conditional, cond) } @@ -3227,12 +3234,13 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED { FunctionLiteral::FunctionType function_type, FunctionLiteral::IsFunctionFlag is_function, FunctionLiteral::IsParenthesizedFlag is_parenthesized, - FunctionLiteral::IsGeneratorFlag is_generator) { + FunctionLiteral::IsGeneratorFlag is_generator, + int position) { FunctionLiteral* lit = new(zone_) FunctionLiteral( isolate_, name, scope, body, materialized_literal_count, expected_property_count, handler_count, parameter_count, function_type, has_duplicate_parameters, is_function, - is_parenthesized, is_generator); + is_parenthesized, is_generator, position); // Top-level literal doesn't count for the AST's properties. if (is_function == FunctionLiteral::kIsFunction) { visitor_.VisitFunctionLiteral(lit); @@ -3240,15 +3248,15 @@ class AstNodeFactory V8_FINAL BASE_EMBEDDED { return lit; } - SharedFunctionInfoLiteral* NewSharedFunctionInfoLiteral( - Handle<SharedFunctionInfo> shared_function_info) { - SharedFunctionInfoLiteral* lit = - new(zone_) SharedFunctionInfoLiteral(isolate_, shared_function_info); - VISIT_AND_RETURN(SharedFunctionInfoLiteral, lit) + NativeFunctionLiteral* NewNativeFunctionLiteral( + Handle<String> name, v8::Extension* extension, int pos) { + NativeFunctionLiteral* lit = + new(zone_) NativeFunctionLiteral(isolate_, name, extension, pos); + VISIT_AND_RETURN(NativeFunctionLiteral, lit) } - ThisFunction* NewThisFunction() { - ThisFunction* fun = new(zone_) ThisFunction(isolate_); + ThisFunction* NewThisFunction(int pos) { + ThisFunction* fun = new(zone_) ThisFunction(isolate_, pos); VISIT_AND_RETURN(ThisFunction, fun) } diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc index 0756aefb0b..234a2118bd 100644 --- a/deps/v8/src/bootstrapper.cc +++ b/deps/v8/src/bootstrapper.cc @@ -824,7 +824,7 @@ void Genesis::HookUpInnerGlobal(Handle<GlobalObject> inner_global) { // work in the snapshot case is done in HookUpInnerGlobal. void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global, Handle<JSFunction> empty_function) { - // --- G l o b a l C o n t e x t --- + // --- N a t i v e C o n t e x t --- // Use the empty function as closure (no scope info). native_context()->set_closure(*empty_function); native_context()->set_previous(NULL); @@ -1043,7 +1043,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global, } { // -- J S O N - Handle<String> name = factory->NewStringFromAscii(CStrVector("JSON")); + Handle<String> name = factory->InternalizeUtf8String("JSON"); Handle<JSFunction> cons = factory->NewFunction(name, factory->the_hole_value()); JSFunction::SetInstancePrototype(cons, @@ -2067,6 +2067,11 @@ bool Genesis::InstallExperimentalNatives() { "native harmony-array.js") == 0) { if (!CompileExperimentalBuiltin(isolate(), i)) return false; } + if (FLAG_harmony_maths && + strcmp(ExperimentalNatives::GetScriptName(i).start(), + "native harmony-math.js") == 0) { + if (!CompileExperimentalBuiltin(isolate(), i)) return false; + } } InstallExperimentalNativeFunctions(); diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc index 9290852dc9..b614904c9f 100644 --- a/deps/v8/src/builtins.cc +++ b/deps/v8/src/builtins.cc @@ -195,79 +195,6 @@ BUILTIN(EmptyFunction) { } -static MaybeObject* ArrayCodeGenericCommon(Arguments* args, - Isolate* isolate, - JSFunction* constructor) { - ASSERT(args->length() >= 1); - Heap* heap = isolate->heap(); - isolate->counters()->array_function_runtime()->Increment(); - - JSArray* array; - if (CalledAsConstructor(isolate)) { - array = JSArray::cast((*args)[0]); - // Initialize elements and length in case later allocations fail so that the - // array object is initialized in a valid state. - MaybeObject* maybe_array = array->Initialize(0); - if (maybe_array->IsFailure()) return maybe_array; - - AllocationMemento* memento = AllocationMemento::FindForJSObject(array); - if (memento != NULL && memento->IsValid()) { - AllocationSite* site = memento->GetAllocationSite(); - ElementsKind to_kind = site->GetElementsKind(); - if (IsMoreGeneralElementsKindTransition(array->GetElementsKind(), - to_kind)) { - // We have advice that we should change the elements kind - if (FLAG_trace_track_allocation_sites) { - PrintF("AllocationSite: pre-transitioning array %p(%s->%s)\n", - reinterpret_cast<void*>(array), - ElementsKindToString(array->GetElementsKind()), - ElementsKindToString(to_kind)); - } - - maybe_array = array->TransitionElementsKind(to_kind); - if (maybe_array->IsFailure()) return maybe_array; - } - } - - if (!FLAG_smi_only_arrays) { - Context* native_context = isolate->context()->native_context(); - if (array->GetElementsKind() == GetInitialFastElementsKind() && - !native_context->js_array_maps()->IsUndefined()) { - FixedArray* map_array = - FixedArray::cast(native_context->js_array_maps()); - array->set_map(Map::cast(map_array-> - get(TERMINAL_FAST_ELEMENTS_KIND))); - } - } - } else { - // Allocate the JS Array - MaybeObject* maybe_obj = heap->AllocateJSObject(constructor); - if (!maybe_obj->To(&array)) return maybe_obj; - } - - Arguments adjusted_arguments(args->length() - 1, args->arguments() - 1); - ASSERT(adjusted_arguments.length() < 1 || - adjusted_arguments[0] == (*args)[1]); - return ArrayConstructInitializeElements(array, &adjusted_arguments); -} - - -BUILTIN(InternalArrayCodeGeneric) { - return ArrayCodeGenericCommon( - &args, - isolate, - isolate->context()->native_context()->internal_array_function()); -} - - -BUILTIN(ArrayCodeGeneric) { - return ArrayCodeGenericCommon( - &args, - isolate, - isolate->context()->native_context()->array_function()); -} - - static void MoveDoubleElements(FixedDoubleArray* dst, int dst_index, FixedDoubleArray* src, @@ -346,10 +273,20 @@ static FixedArrayBase* LeftTrimFixedArray(Heap* heap, MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta); } - HEAP_PROFILE(heap, ObjectMoveEvent(elms->address(), - elms->address() + size_delta)); - return FixedArrayBase::cast(HeapObject::FromAddress( - elms->address() + to_trim * entry_size)); + FixedArrayBase* new_elms = FixedArrayBase::cast(HeapObject::FromAddress( + elms->address() + size_delta)); + HeapProfiler* profiler = heap->isolate()->heap_profiler(); + if (profiler->is_profiling()) { + profiler->ObjectMoveEvent(elms->address(), + new_elms->address(), + new_elms->Size()); + if (profiler->is_tracking_allocations()) { + // Report filler object as a new allocation. + // Otherwise it will become an untracked object. + profiler->NewObjectEvent(elms->address(), elms->Size()); + } + } + return new_elms; } @@ -1392,7 +1329,8 @@ static void Generate_LoadIC_Normal(MacroAssembler* masm) { static void Generate_LoadIC_Getter_ForDeopt(MacroAssembler* masm) { - LoadStubCompiler::GenerateLoadViaGetter(masm, Handle<JSFunction>()); + LoadStubCompiler::GenerateLoadViaGetter( + masm, LoadStubCompiler::registers()[0], Handle<JSFunction>()); } @@ -1451,6 +1389,11 @@ static void Generate_StoreIC_Slow(MacroAssembler* masm) { } +static void Generate_StoreIC_Slow_Strict(MacroAssembler* masm) { + StoreIC::GenerateSlow(masm); +} + + static void Generate_StoreIC_Initialize(MacroAssembler* masm) { StoreIC::GenerateInitialize(masm); } @@ -1546,6 +1489,11 @@ static void Generate_KeyedStoreIC_Slow(MacroAssembler* masm) { } +static void Generate_KeyedStoreIC_Slow_Strict(MacroAssembler* masm) { + KeyedStoreIC::GenerateSlow(masm); +} + + static void Generate_KeyedStoreIC_Initialize(MacroAssembler* masm) { KeyedStoreIC::GenerateInitialize(masm); } @@ -1728,8 +1676,19 @@ void Builtins::InitBuiltinFunctionTable() { functions->extra_args = NO_EXTRA_ARGUMENTS; \ ++functions; +#define DEF_FUNCTION_PTR_H(aname, kind, extra) \ + functions->generator = FUNCTION_ADDR(Generate_##aname); \ + functions->c_code = NULL; \ + functions->s_name = #aname; \ + functions->name = k##aname; \ + functions->flags = Code::ComputeFlags( \ + Code::HANDLER, MONOMORPHIC, extra, Code::NORMAL, Code::kind); \ + functions->extra_args = NO_EXTRA_ARGUMENTS; \ + ++functions; + BUILTIN_LIST_C(DEF_FUNCTION_PTR_C) BUILTIN_LIST_A(DEF_FUNCTION_PTR_A) + BUILTIN_LIST_H(DEF_FUNCTION_PTR_H) BUILTIN_LIST_DEBUG_A(DEF_FUNCTION_PTR_A) #undef DEF_FUNCTION_PTR_C @@ -1854,8 +1813,15 @@ Handle<Code> Builtins::name() { \ reinterpret_cast<Code**>(builtin_address(k##name)); \ return Handle<Code>(code_address); \ } +#define DEFINE_BUILTIN_ACCESSOR_H(name, kind, extra) \ +Handle<Code> Builtins::name() { \ + Code** code_address = \ + reinterpret_cast<Code**>(builtin_address(k##name)); \ + return Handle<Code>(code_address); \ +} BUILTIN_LIST_C(DEFINE_BUILTIN_ACCESSOR_C) BUILTIN_LIST_A(DEFINE_BUILTIN_ACCESSOR_A) +BUILTIN_LIST_H(DEFINE_BUILTIN_ACCESSOR_H) BUILTIN_LIST_DEBUG_A(DEFINE_BUILTIN_ACCESSOR_A) #undef DEFINE_BUILTIN_ACCESSOR_C #undef DEFINE_BUILTIN_ACCESSOR_A diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h index c712f1ee02..9b589d843d 100644 --- a/deps/v8/src/builtins.h +++ b/deps/v8/src/builtins.h @@ -50,6 +50,10 @@ enum BuiltinExtraArguments { #define CODE_AGE_LIST(V) \ CODE_AGE_LIST_WITH_ARG(CODE_AGE_LIST_IGNORE_ARG, V) +#define CODE_AGE_LIST_WITH_NO_AGE(V) \ + V(NoAge) \ + CODE_AGE_LIST_WITH_ARG(CODE_AGE_LIST_IGNORE_ARG, V) + #define DECLARE_CODE_AGE_BUILTIN(C, V) \ V(Make##C##CodeYoungAgainOddMarking, BUILTIN, \ UNINITIALIZED, Code::kNoExtraICState) \ @@ -63,9 +67,6 @@ enum BuiltinExtraArguments { \ V(EmptyFunction, NO_EXTRA_ARGUMENTS) \ \ - V(InternalArrayCodeGeneric, NO_EXTRA_ARGUMENTS) \ - V(ArrayCodeGeneric, NO_EXTRA_ARGUMENTS) \ - \ V(ArrayPush, NO_EXTRA_ARGUMENTS) \ V(ArrayPop, NO_EXTRA_ARGUMENTS) \ V(ArrayShift, NO_EXTRA_ARGUMENTS) \ @@ -111,8 +112,6 @@ enum BuiltinExtraArguments { Code::kNoExtraICState) \ V(NotifyStubFailure, BUILTIN, UNINITIALIZED, \ Code::kNoExtraICState) \ - V(NotifyOSR, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ \ V(LoadIC_Miss, BUILTIN, UNINITIALIZED, \ Code::kNoExtraICState) \ @@ -120,30 +119,20 @@ enum BuiltinExtraArguments { Code::kNoExtraICState) \ V(KeyedLoadIC_MissForceGeneric, BUILTIN, UNINITIALIZED, \ Code::kNoExtraICState) \ - V(KeyedLoadIC_Slow, STUB, MONOMORPHIC, \ - Code::kNoExtraICState) \ V(StoreIC_Miss, BUILTIN, UNINITIALIZED, \ Code::kNoExtraICState) \ - V(StoreIC_Slow, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED, \ Code::kNoExtraICState) \ V(KeyedStoreIC_MissForceGeneric, BUILTIN, UNINITIALIZED, \ Code::kNoExtraICState) \ - V(KeyedStoreIC_Slow, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) \ V(LoadIC_Initialize, LOAD_IC, UNINITIALIZED, \ Code::kNoExtraICState) \ V(LoadIC_PreMonomorphic, LOAD_IC, PREMONOMORPHIC, \ Code::kNoExtraICState) \ - V(LoadIC_Normal, LOAD_IC, MONOMORPHIC, \ - Code::kNoExtraICState) \ V(LoadIC_Megamorphic, LOAD_IC, MEGAMORPHIC, \ Code::kNoExtraICState) \ V(LoadIC_Getter_ForDeopt, LOAD_IC, MONOMORPHIC, \ Code::kNoExtraICState) \ - V(LoadIC_Slow, STUB, MONOMORPHIC, \ - Code::kNoExtraICState) \ \ V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED, \ Code::kNoExtraICState) \ @@ -162,8 +151,6 @@ enum BuiltinExtraArguments { Code::kNoExtraICState) \ V(StoreIC_PreMonomorphic, STORE_IC, PREMONOMORPHIC, \ Code::kNoExtraICState) \ - V(StoreIC_Normal, STORE_IC, MONOMORPHIC, \ - Code::kNoExtraICState) \ V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC, \ Code::kNoExtraICState) \ V(StoreIC_Generic, STORE_IC, GENERIC, \ @@ -176,8 +163,6 @@ enum BuiltinExtraArguments { kStrictMode) \ V(StoreIC_PreMonomorphic_Strict, STORE_IC, PREMONOMORPHIC, \ kStrictMode) \ - V(StoreIC_Normal_Strict, STORE_IC, MONOMORPHIC, \ - kStrictMode) \ V(StoreIC_Megamorphic_Strict, STORE_IC, MEGAMORPHIC, \ kStrictMode) \ V(StoreIC_GlobalProxy_Strict, STORE_IC, GENERIC, \ @@ -219,10 +204,29 @@ enum BuiltinExtraArguments { Code::kNoExtraICState) \ V(InterruptCheck, BUILTIN, UNINITIALIZED, \ Code::kNoExtraICState) \ + V(OsrAfterStackCheck, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ V(StackCheck, BUILTIN, UNINITIALIZED, \ Code::kNoExtraICState) \ + \ + V(MarkCodeAsExecutedOnce, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ + V(MarkCodeAsExecutedTwice, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, V) +// Define list of builtin handlers implemented in assembly. +#define BUILTIN_LIST_H(V) \ + V(LoadIC_Slow, LOAD_IC, Code::kNoExtraICState) \ + V(KeyedLoadIC_Slow, KEYED_LOAD_IC, Code::kNoExtraICState) \ + V(StoreIC_Slow, STORE_IC, Code::kNoExtraICState) \ + V(StoreIC_Slow_Strict, STORE_IC, kStrictMode) \ + V(KeyedStoreIC_Slow, KEYED_STORE_IC, Code::kNoExtraICState)\ + V(KeyedStoreIC_Slow_Strict, KEYED_STORE_IC, kStrictMode) \ + V(LoadIC_Normal, LOAD_IC, Code::kNoExtraICState) \ + V(StoreIC_Normal, STORE_IC, Code::kNoExtraICState) \ + V(StoreIC_Normal_Strict, STORE_IC, kStrictMode) + #ifdef ENABLE_DEBUGGER_SUPPORT // Define list of builtins used by the debugger implemented in assembly. #define BUILTIN_LIST_DEBUG_A(V) \ @@ -310,8 +314,10 @@ class Builtins { enum Name { #define DEF_ENUM_C(name, ignore) k##name, #define DEF_ENUM_A(name, kind, state, extra) k##name, +#define DEF_ENUM_H(name, kind, extra) k##name, BUILTIN_LIST_C(DEF_ENUM_C) BUILTIN_LIST_A(DEF_ENUM_A) + BUILTIN_LIST_H(DEF_ENUM_H) BUILTIN_LIST_DEBUG_A(DEF_ENUM_A) #undef DEF_ENUM_C #undef DEF_ENUM_A @@ -335,8 +341,10 @@ class Builtins { #define DECLARE_BUILTIN_ACCESSOR_C(name, ignore) Handle<Code> name(); #define DECLARE_BUILTIN_ACCESSOR_A(name, kind, state, extra) \ Handle<Code> name(); +#define DECLARE_BUILTIN_ACCESSOR_H(name, kind, extra) Handle<Code> name(); BUILTIN_LIST_C(DECLARE_BUILTIN_ACCESSOR_C) BUILTIN_LIST_A(DECLARE_BUILTIN_ACCESSOR_A) + BUILTIN_LIST_H(DECLARE_BUILTIN_ACCESSOR_H) BUILTIN_LIST_DEBUG_A(DECLARE_BUILTIN_ACCESSOR_A) #undef DECLARE_BUILTIN_ACCESSOR_C #undef DECLARE_BUILTIN_ACCESSOR_A @@ -391,7 +399,6 @@ class Builtins { static void Generate_NotifyDeoptimized(MacroAssembler* masm); static void Generate_NotifySoftDeoptimized(MacroAssembler* masm); static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm); - static void Generate_NotifyOSR(MacroAssembler* masm); static void Generate_NotifyStubFailure(MacroAssembler* masm); static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm); @@ -403,7 +410,7 @@ class Builtins { static void Generate_StringConstructCode(MacroAssembler* masm); static void Generate_OnStackReplacement(MacroAssembler* masm); - + static void Generate_OsrAfterStackCheck(MacroAssembler* masm); static void Generate_InterruptCheck(MacroAssembler* masm); static void Generate_StackCheck(MacroAssembler* masm); @@ -415,6 +422,9 @@ class Builtins { CODE_AGE_LIST(DECLARE_CODE_AGE_BUILTIN_GENERATOR) #undef DECLARE_CODE_AGE_BUILTIN_GENERATOR + static void Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm); + static void Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm); + static void InitBuiltinFunctionTable(); bool initialized_; diff --git a/deps/v8/src/checks.cc b/deps/v8/src/checks.cc index 7108d18892..e08cd7c685 100644 --- a/deps/v8/src/checks.cc +++ b/deps/v8/src/checks.cc @@ -25,11 +25,48 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#include <stdarg.h> +#include "checks.h" -#include "v8.h" +#if V8_LIBC_GLIBC || V8_OS_BSD +# include <cxxabi.h> +# include <execinfo.h> +#endif // V8_LIBC_GLIBC || V8_OS_BSD +#include <stdio.h> #include "platform.h" +#include "v8.h" + + +// Attempts to dump a backtrace (if supported). +static V8_INLINE void DumpBacktrace() { +#if V8_LIBC_GLIBC || V8_OS_BSD + void* trace[100]; + int size = backtrace(trace, ARRAY_SIZE(trace)); + char** symbols = backtrace_symbols(trace, size); + i::OS::PrintError("\n==== C stack trace ===============================\n\n"); + if (size == 0) { + i::OS::PrintError("(empty)\n"); + } else if (symbols == NULL) { + i::OS::PrintError("(no symbols)\n"); + } else { + for (int i = 1; i < size; ++i) { + i::OS::PrintError("%2d: ", i); + char mangled[201]; + if (sscanf(symbols[i], "%*[^(]%*[(]%200[^)+]", mangled) == 1) { // NOLINT + int status; + size_t length; + char* demangled = abi::__cxa_demangle(mangled, NULL, &length, &status); + i::OS::PrintError("%s\n", demangled != NULL ? demangled : mangled); + free(demangled); + } else { + i::OS::PrintError("??\n"); + } + } + } + free(symbols); +#endif // V8_LIBC_GLIBC || V8_OS_BSD +} + // Contains protection against recursive calls (faults while handling faults). extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) { @@ -43,7 +80,8 @@ extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) { i::OS::VPrintError(format, arguments); va_end(arguments); i::OS::PrintError("\n#\n"); - i::OS::DumpBacktrace(); + DumpBacktrace(); + fflush(stderr); i::OS::Abort(); } @@ -91,8 +129,6 @@ void API_Fatal(const char* location, const char* format, ...) { namespace v8 { namespace internal { - bool EnableSlowAsserts() { return FLAG_enable_slow_asserts; } - intptr_t HeapObjectTagMask() { return kHeapObjectTagMask; } } } // namespace v8::internal diff --git a/deps/v8/src/checks.h b/deps/v8/src/checks.h index f5c5f232bd..9d2db28d8f 100644 --- a/deps/v8/src/checks.h +++ b/deps/v8/src/checks.h @@ -272,7 +272,24 @@ template <int> class StaticAssertionHelper { }; #endif +#ifdef DEBUG +#ifndef OPTIMIZED_DEBUG +#define ENABLE_SLOW_ASSERTS 1 +#endif +#endif + +namespace v8 { +namespace internal { +#ifdef ENABLE_SLOW_ASSERTS +#define SLOW_ASSERT(condition) \ + CHECK(!v8::internal::FLAG_enable_slow_asserts || (condition)) extern bool FLAG_enable_slow_asserts; +#else +#define SLOW_ASSERT(condition) ((void) 0) +const bool FLAG_enable_slow_asserts = false; +#endif +} // namespace internal +} // namespace v8 // The ASSERT macro is equivalent to CHECK except that it only @@ -285,7 +302,6 @@ extern bool FLAG_enable_slow_asserts; #define ASSERT_GE(v1, v2) CHECK_GE(v1, v2) #define ASSERT_LT(v1, v2) CHECK_LT(v1, v2) #define ASSERT_LE(v1, v2) CHECK_LE(v1, v2) -#define SLOW_ASSERT(condition) CHECK(!FLAG_enable_slow_asserts || (condition)) #else #define ASSERT_RESULT(expr) (expr) #define ASSERT(condition) ((void) 0) @@ -294,7 +310,6 @@ extern bool FLAG_enable_slow_asserts; #define ASSERT_GE(v1, v2) ((void) 0) #define ASSERT_LT(v1, v2) ((void) 0) #define ASSERT_LE(v1, v2) ((void) 0) -#define SLOW_ASSERT(condition) ((void) 0) #endif // Static asserts has no impact on runtime performance, so they can be // safely enabled in release mode. Moreover, the ((void) 0) expression diff --git a/deps/v8/src/code-stubs-hydrogen.cc b/deps/v8/src/code-stubs-hydrogen.cc index 23d4269c84..dfa5ecd8cf 100644 --- a/deps/v8/src/code-stubs-hydrogen.cc +++ b/deps/v8/src/code-stubs-hydrogen.cc @@ -146,14 +146,10 @@ bool CodeStubGraphBuilderBase::BuildGraph() { int param_count = descriptor_->register_param_count_; HEnvironment* start_environment = graph()->start_environment(); HBasicBlock* next_block = CreateBasicBlock(start_environment); - current_block()->Goto(next_block); + Goto(next_block); next_block->SetJoinId(BailoutId::StubEntry()); set_current_block(next_block); - HConstant* undefined_constant = - Add<HConstant>(isolate()->factory()->undefined_value()); - graph()->set_undefined_constant(undefined_constant); - for (int i = 0; i < param_count; ++i) { HParameter* param = Add<HParameter>(i, HParameter::REGISTER_PARAMETER); @@ -162,7 +158,7 @@ bool CodeStubGraphBuilderBase::BuildGraph() { } HInstruction* stack_parameter_count; - if (descriptor_->stack_parameter_count_ != NULL) { + if (descriptor_->stack_parameter_count_.is_valid()) { ASSERT(descriptor_->environment_length() == (param_count + 1)); stack_parameter_count = New<HParameter>(param_count, HParameter::REGISTER_PARAMETER, @@ -178,8 +174,7 @@ bool CodeStubGraphBuilderBase::BuildGraph() { arguments_length_ = graph()->GetConstant0(); } - context_ = New<HContext>(); - AddInstruction(context_); + context_ = Add<HContext>(); start_environment->BindContext(context_); Add<HSimulate>(BailoutId::StubEntry()); @@ -207,8 +202,7 @@ bool CodeStubGraphBuilderBase::BuildGraph() { if (current_block() != NULL) { HReturn* hreturn_instruction = New<HReturn>(return_value, stack_pop_count); - current_block()->Finish(hreturn_instruction); - set_current_block(NULL); + FinishCurrentBlock(hreturn_instruction); } return true; } @@ -298,12 +292,21 @@ static Handle<Code> DoGenerateCode(Isolate* isolate, Stub* stub) { // the runtime that is significantly faster than using the standard // stub-failure deopt mechanism. if (stub->IsUninitialized() && descriptor->has_miss_handler()) { - ASSERT(descriptor->stack_parameter_count_ == NULL); + ASSERT(!descriptor->stack_parameter_count_.is_valid()); return stub->GenerateLightweightMissCode(isolate); } + ElapsedTimer timer; + if (FLAG_profile_hydrogen_code_stub_compilation) { + timer.Start(); + } CodeStubGraphBuilder<Stub> builder(isolate, stub); LChunk* chunk = OptimizeGraph(builder.CreateGraph()); - return chunk->Codegen(); + Handle<Code> code = chunk->Codegen(); + if (FLAG_profile_hydrogen_code_stub_compilation) { + double ms = timer.Elapsed().InMillisecondsF(); + PrintF("[Lazy compilation of %s took %0.3f ms]\n", *stub->GetName(), ms); + } + return code; } @@ -339,6 +342,19 @@ Handle<Code> ToNumberStub::GenerateCode(Isolate* isolate) { template <> +HValue* CodeStubGraphBuilder<NumberToStringStub>::BuildCodeStub() { + info()->MarkAsSavesCallerDoubles(); + HValue* number = GetParameter(NumberToStringStub::kNumber); + return BuildNumberToString(number, handle(Type::Number(), isolate())); +} + + +Handle<Code> NumberToStringStub::GenerateCode(Isolate* isolate) { + return DoGenerateCode(isolate, this); +} + + +template <> HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() { Factory* factory = isolate()->factory(); HValue* undefined = graph()->GetConstantUndefined(); @@ -355,42 +371,48 @@ HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() { undefined); checker.Then(); - HObjectAccess access = HObjectAccess::ForAllocationSiteTransitionInfo(); + HObjectAccess access = HObjectAccess::ForAllocationSiteOffset( + AllocationSite::kTransitionInfoOffset); HInstruction* boilerplate = Add<HLoadNamedField>(allocation_site, access); + HValue* push_value; if (mode == FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS) { HValue* elements = AddLoadElements(boilerplate); IfBuilder if_fixed_cow(this); if_fixed_cow.If<HCompareMap>(elements, factory->fixed_cow_array_map()); if_fixed_cow.Then(); - environment()->Push(BuildCloneShallowArray(boilerplate, - allocation_site, - alloc_site_mode, - FAST_ELEMENTS, - 0/*copy-on-write*/)); + push_value = BuildCloneShallowArray(boilerplate, + allocation_site, + alloc_site_mode, + FAST_ELEMENTS, + 0/*copy-on-write*/); + environment()->Push(push_value); if_fixed_cow.Else(); IfBuilder if_fixed(this); if_fixed.If<HCompareMap>(elements, factory->fixed_array_map()); if_fixed.Then(); - environment()->Push(BuildCloneShallowArray(boilerplate, - allocation_site, - alloc_site_mode, - FAST_ELEMENTS, - length)); + push_value = BuildCloneShallowArray(boilerplate, + allocation_site, + alloc_site_mode, + FAST_ELEMENTS, + length); + environment()->Push(push_value); if_fixed.Else(); - environment()->Push(BuildCloneShallowArray(boilerplate, - allocation_site, - alloc_site_mode, - FAST_DOUBLE_ELEMENTS, - length)); + push_value = BuildCloneShallowArray(boilerplate, + allocation_site, + alloc_site_mode, + FAST_DOUBLE_ELEMENTS, + length); + environment()->Push(push_value); } else { ElementsKind elements_kind = casted_stub()->ComputeElementsKind(); - environment()->Push(BuildCloneShallowArray(boilerplate, - allocation_site, - alloc_site_mode, - elements_kind, - length)); + push_value = BuildCloneShallowArray(boilerplate, + allocation_site, + alloc_site_mode, + elements_kind, + length); + environment()->Push(push_value); } checker.ElseDeopt("Uninitialized boilerplate literals"); @@ -407,23 +429,33 @@ Handle<Code> FastCloneShallowArrayStub::GenerateCode(Isolate* isolate) { template <> HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() { - Zone* zone = this->zone(); HValue* undefined = graph()->GetConstantUndefined(); - HInstruction* boilerplate = Add<HLoadKeyed>(GetParameter(0), - GetParameter(1), - static_cast<HValue*>(NULL), - FAST_ELEMENTS); + HInstruction* allocation_site = Add<HLoadKeyed>(GetParameter(0), + GetParameter(1), + static_cast<HValue*>(NULL), + FAST_ELEMENTS); IfBuilder checker(this); - checker.IfNot<HCompareObjectEqAndBranch, HValue*>(boilerplate, + checker.IfNot<HCompareObjectEqAndBranch, HValue*>(allocation_site, undefined); checker.And(); + HObjectAccess access = HObjectAccess::ForAllocationSiteOffset( + AllocationSite::kTransitionInfoOffset); + HInstruction* boilerplate = Add<HLoadNamedField>(allocation_site, access); + int size = JSObject::kHeaderSize + casted_stub()->length() * kPointerSize; - HValue* boilerplate_size = - AddInstruction(new(zone) HInstanceSize(boilerplate)); - HValue* size_in_words = Add<HConstant>(size >> kPointerSizeLog2); + int object_size = size; + if (FLAG_allocation_site_pretenuring) { + size += AllocationMemento::kSize; + } + + HValue* boilerplate_map = Add<HLoadNamedField>( + boilerplate, HObjectAccess::ForMap()); + HValue* boilerplate_size = Add<HLoadNamedField>( + boilerplate_map, HObjectAccess::ForMapInstanceSize()); + HValue* size_in_words = Add<HConstant>(object_size >> kPointerSizeLog2); checker.If<HCompareNumericAndBranch>(boilerplate_size, size_in_words, Token::EQ); checker.Then(); @@ -433,12 +465,17 @@ HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() { HInstruction* object = Add<HAllocate>(size_in_bytes, HType::JSObject(), isolate()->heap()->GetPretenureMode(), JS_OBJECT_TYPE); - for (int i = 0; i < size; i += kPointerSize) { + for (int i = 0; i < object_size; i += kPointerSize) { HObjectAccess access = HObjectAccess::ForJSObjectOffset(i); Add<HStoreNamedField>(object, access, Add<HLoadNamedField>(boilerplate, access)); } + ASSERT(FLAG_allocation_site_pretenuring || (size == object_size)); + if (FLAG_allocation_site_pretenuring) { + BuildCreateAllocationMemento(object, object_size, allocation_site); + } + environment()->Push(object); checker.ElseDeopt("Uninitialized boilerplate in fast clone"); checker.End(); @@ -459,24 +496,39 @@ HValue* CodeStubGraphBuilder<CreateAllocationSiteStub>::BuildCodeStub() { JS_OBJECT_TYPE); // Store the map - Handle<Map> allocation_site_map(isolate()->heap()->allocation_site_map(), - isolate()); + Handle<Map> allocation_site_map = isolate()->factory()->allocation_site_map(); AddStoreMapConstant(object, allocation_site_map); // Store the payload (smi elements kind) HValue* initial_elements_kind = Add<HConstant>(GetInitialFastElementsKind()); Add<HStoreNamedField>(object, - HObjectAccess::ForAllocationSiteTransitionInfo(), + HObjectAccess::ForAllocationSiteOffset( + AllocationSite::kTransitionInfoOffset), initial_elements_kind); + // Unlike literals, constructed arrays don't have nested sites + Add<HStoreNamedField>(object, + HObjectAccess::ForAllocationSiteOffset( + AllocationSite::kNestedSiteOffset), + graph()->GetConstant0()); + + // Store an empty fixed array for the code dependency. + HConstant* empty_fixed_array = + Add<HConstant>(isolate()->factory()->empty_fixed_array()); + HStoreNamedField* store = Add<HStoreNamedField>( + object, + HObjectAccess::ForAllocationSiteOffset( + AllocationSite::kDependentCodeOffset), + empty_fixed_array); + // Link the object to the allocation site list HValue* site_list = Add<HConstant>( ExternalReference::allocation_sites_list_address(isolate())); HValue* site = Add<HLoadNamedField>(site_list, HObjectAccess::ForAllocationSiteList()); - HStoreNamedField* store = - Add<HStoreNamedField>(object, HObjectAccess::ForAllocationSiteWeakNext(), - site); + store = Add<HStoreNamedField>(object, + HObjectAccess::ForAllocationSiteOffset(AllocationSite::kWeakNextOffset), + site); store->SkipWriteBarrier(); Add<HStoreNamedField>(site_list, HObjectAccess::ForAllocationSiteList(), object); @@ -519,7 +571,7 @@ HValue* CodeStubGraphBuilder<LoadFieldStub>::BuildCodeStub() { HObjectAccess access = casted_stub()->is_inobject() ? HObjectAccess::ForJSObjectOffset(casted_stub()->offset(), rep) : HObjectAccess::ForBackingStoreOffset(casted_stub()->offset(), rep); - return AddInstruction(BuildLoadNamedField(GetParameter(0), access)); + return AddLoadNamedField(GetParameter(0), access); } @@ -534,7 +586,7 @@ HValue* CodeStubGraphBuilder<KeyedLoadFieldStub>::BuildCodeStub() { HObjectAccess access = casted_stub()->is_inobject() ? HObjectAccess::ForJSObjectOffset(casted_stub()->offset(), rep) : HObjectAccess::ForBackingStoreOffset(casted_stub()->offset(), rep); - return AddInstruction(BuildLoadNamedField(GetParameter(0), access)); + return AddLoadNamedField(GetParameter(0), access); } @@ -640,14 +692,13 @@ HValue* CodeStubGraphBuilderBase::BuildArraySingleArgumentConstructor( HValue* constant_zero = graph()->GetConstant0(); HInstruction* elements = Add<HArgumentsElements>(false); - HInstruction* argument = AddInstruction( - new(zone()) HAccessArgumentsAt(elements, constant_one, constant_zero)); + HInstruction* argument = Add<HAccessArgumentsAt>( + elements, constant_one, constant_zero); HConstant* max_alloc_length = Add<HConstant>(JSObject::kInitialMaxFastElementArray); const int initial_capacity = JSArray::kPreallocatedArrayElements; - HConstant* initial_capacity_node = New<HConstant>(initial_capacity); - AddInstruction(initial_capacity_node); + HConstant* initial_capacity_node = Add<HConstant>(initial_capacity); HInstruction* checked_arg = Add<HBoundsCheck>(argument, max_alloc_length); IfBuilder if_builder(this); @@ -690,8 +741,8 @@ HValue* CodeStubGraphBuilderBase::BuildArrayNArgumentsConstructor( HValue* start = graph()->GetConstant0(); HValue* key = builder.BeginBody(start, length, Token::LT); HInstruction* argument_elements = Add<HArgumentsElements>(false); - HInstruction* argument = AddInstruction(new(zone()) HAccessArgumentsAt( - argument_elements, length, key)); + HInstruction* argument = Add<HAccessArgumentsAt>( + argument_elements, length, key); Add<HStoreKeyed>(elements, key, argument, kind); builder.EndBody(); @@ -792,7 +843,7 @@ HValue* CodeStubGraphBuilder<CompareNilICStub>::BuildCodeInitializedStub() { HIfContinuation continuation; Handle<Map> sentinel_map(isolate->heap()->meta_map()); Handle<Type> type = stub->GetType(isolate, sentinel_map); - BuildCompareNil(GetParameter(0), type, RelocInfo::kNoPosition, &continuation); + BuildCompareNil(GetParameter(0), type, &continuation); IfBuilder if_nil(this, &continuation); if_nil.Then(); if (continuation.IsFalseReachable()) { @@ -812,6 +863,115 @@ Handle<Code> CompareNilICStub::GenerateCode(Isolate* isolate) { template <> +HValue* CodeStubGraphBuilder<BinaryOpStub>::BuildCodeInitializedStub() { + BinaryOpStub* stub = casted_stub(); + HValue* left = GetParameter(0); + HValue* right = GetParameter(1); + + Handle<Type> left_type = stub->GetLeftType(isolate()); + Handle<Type> right_type = stub->GetRightType(isolate()); + Handle<Type> result_type = stub->GetResultType(isolate()); + + ASSERT(!left_type->Is(Type::None()) && !right_type->Is(Type::None()) && + (stub->HasSideEffects(isolate()) || !result_type->Is(Type::None()))); + + HValue* result = NULL; + if (stub->operation() == Token::ADD && + (left_type->Maybe(Type::String()) || right_type->Maybe(Type::String())) && + !left_type->Is(Type::String()) && !right_type->Is(Type::String())) { + // For the generic add stub a fast case for string addition is performance + // critical. + if (left_type->Maybe(Type::String())) { + IfBuilder if_leftisstring(this); + if_leftisstring.If<HIsStringAndBranch>(left); + if_leftisstring.Then(); + { + Push(AddInstruction(BuildBinaryOperation( + stub->operation(), left, right, + handle(Type::String(), isolate()), right_type, + result_type, stub->fixed_right_arg(), true))); + } + if_leftisstring.Else(); + { + Push(AddInstruction(BuildBinaryOperation( + stub->operation(), left, right, + left_type, right_type, result_type, + stub->fixed_right_arg(), true))); + } + if_leftisstring.End(); + result = Pop(); + } else { + IfBuilder if_rightisstring(this); + if_rightisstring.If<HIsStringAndBranch>(right); + if_rightisstring.Then(); + { + Push(AddInstruction(BuildBinaryOperation( + stub->operation(), left, right, + left_type, handle(Type::String(), isolate()), + result_type, stub->fixed_right_arg(), true))); + } + if_rightisstring.Else(); + { + Push(AddInstruction(BuildBinaryOperation( + stub->operation(), left, right, + left_type, right_type, result_type, + stub->fixed_right_arg(), true))); + } + if_rightisstring.End(); + result = Pop(); + } + } else { + result = AddInstruction(BuildBinaryOperation( + stub->operation(), left, right, + left_type, right_type, result_type, + stub->fixed_right_arg(), true)); + } + + // If we encounter a generic argument, the number conversion is + // observable, thus we cannot afford to bail out after the fact. + if (!stub->HasSideEffects(isolate())) { + if (result_type->Is(Type::Smi())) { + if (stub->operation() == Token::SHR) { + // TODO(olivf) Replace this by a SmiTagU Instruction. + // 0x40000000: this number would convert to negative when interpreting + // the register as signed value; + IfBuilder if_of(this); + if_of.IfNot<HCompareNumericAndBranch>(result, + Add<HConstant>(static_cast<int>(SmiValuesAre32Bits() + ? 0x80000000 : 0x40000000)), Token::EQ_STRICT); + if_of.Then(); + if_of.ElseDeopt("UInt->Smi oveflow"); + if_of.End(); + } + } + result = EnforceNumberType(result, result_type); + } + + // Reuse the double box of one of the operands if we are allowed to (i.e. + // chained binops). + if (stub->CanReuseDoubleBox()) { + HValue* operand = (stub->mode() == OVERWRITE_LEFT) ? left : right; + IfBuilder if_heap_number(this); + if_heap_number.IfNot<HIsSmiAndBranch>(operand); + if_heap_number.Then(); + Add<HStoreNamedField>(operand, HObjectAccess::ForHeapNumberValue(), result); + Push(operand); + if_heap_number.Else(); + Push(result); + if_heap_number.End(); + result = Pop(); + } + + return result; +} + + +Handle<Code> BinaryOpStub::GenerateCode(Isolate* isolate) { + return DoGenerateCode(isolate, this); +} + + +template <> HValue* CodeStubGraphBuilder<ToBooleanStub>::BuildCodeInitializedStub() { ToBooleanStub* stub = casted_stub(); @@ -918,8 +1078,7 @@ void CodeStubGraphBuilderBase::BuildInstallOptimizedCode( HValue* native_context, HValue* code_object) { Counters* counters = isolate()->counters(); - AddIncrementCounter(counters->fast_new_closure_install_optimized(), - context()); + AddIncrementCounter(counters->fast_new_closure_install_optimized()); // TODO(fschneider): Idea: store proper code pointers in the optimized code // map and either unmangle them on marking or do nothing as the whole map is @@ -967,7 +1126,7 @@ void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap( } is_optimized.Else(); { - AddIncrementCounter(counters->fast_new_closure_try_optimized(), context()); + AddIncrementCounter(counters->fast_new_closure_try_optimized()); // optimized_map points to fixed array of 3-element entries // (native context, optimized code, literals). // Map must never be empty, so check the first elements. @@ -1012,8 +1171,8 @@ void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap( } restore_check.Else(); { - HValue* keyed_minus = AddInstruction(HSub::New(zone(), context(), key, - shared_function_entry_length)); + HValue* keyed_minus = AddUncasted<HSub>( + key, shared_function_entry_length); HInstruction* keyed_lookup = Add<HLoadKeyed>(optimized_map, keyed_minus, static_cast<HValue*>(NULL), FAST_ELEMENTS); IfBuilder done_check(this); @@ -1022,8 +1181,8 @@ void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap( done_check.Then(); { // Hit: fetch the optimized code. - HValue* keyed_plus = AddInstruction(HAdd::New(zone(), context(), - keyed_minus, graph()->GetConstant1())); + HValue* keyed_plus = AddUncasted<HAdd>( + keyed_minus, graph()->GetConstant1()); HValue* code_object = Add<HLoadKeyed>(optimized_map, keyed_plus, static_cast<HValue*>(NULL), FAST_ELEMENTS); BuildInstallOptimizedCode(js_function, native_context, code_object); @@ -1052,11 +1211,12 @@ HValue* CodeStubGraphBuilder<FastNewClosureStub>::BuildCodeStub() { Add<HConstant>(factory->empty_fixed_array()); HValue* shared_info = GetParameter(0); + AddIncrementCounter(counters->fast_new_closure_total()); + // Create a new closure from the given function info in new space HValue* size = Add<HConstant>(JSFunction::kSize); HInstruction* js_function = Add<HAllocate>(size, HType::JSObject(), NOT_TENURED, JS_FUNCTION_TYPE); - AddIncrementCounter(counters->fast_new_closure_total(), context()); int map_index = Context::FunctionMapIndex(casted_stub()->language_mode(), casted_stub()->is_generator()); diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc index ace4af42a9..e68a5dd0c8 100644 --- a/deps/v8/src/code-stubs.cc +++ b/deps/v8/src/code-stubs.cc @@ -41,7 +41,7 @@ namespace internal { CodeStubInterfaceDescriptor::CodeStubInterfaceDescriptor() : register_param_count_(-1), - stack_parameter_count_(NULL), + stack_parameter_count_(no_reg), hint_stack_parameter_count_(-1), function_mode_(NOT_JS_FUNCTION_STUB_MODE), register_params_(NULL), @@ -129,6 +129,11 @@ Handle<Code> PlatformCodeStub::GenerateCode(Isolate* isolate) { } +void CodeStub::VerifyPlatformFeatures(Isolate* isolate) { + ASSERT(CpuFeatures::VerifyCrossCompiling()); +} + + Handle<Code> CodeStub::GetCode(Isolate* isolate) { Factory* factory = isolate->factory(); Heap* heap = isolate->heap(); @@ -137,9 +142,14 @@ Handle<Code> CodeStub::GetCode(Isolate* isolate) { ? FindCodeInSpecialCache(&code, isolate) : FindCodeInCache(&code, isolate)) { ASSERT(IsPregenerated(isolate) == code->is_pregenerated()); + ASSERT(GetCodeKind() == code->kind()); return Handle<Code>(code); } +#ifdef DEBUG + VerifyPlatformFeatures(isolate); +#endif + { HandleScope scope(isolate); @@ -203,119 +213,471 @@ void CodeStub::PrintName(StringStream* stream) { } -void BinaryOpStub::Generate(MacroAssembler* masm) { - // Explicitly allow generation of nested stubs. It is safe here because - // generation code does not use any raw pointers. - AllowStubCallsScope allow_stub_calls(masm, true); +void BinaryOpStub::PrintBaseName(StringStream* stream) { + const char* op_name = Token::Name(op_); + const char* ovr = ""; + if (mode_ == OVERWRITE_LEFT) ovr = "_ReuseLeft"; + if (mode_ == OVERWRITE_RIGHT) ovr = "_ReuseRight"; + stream->Add("BinaryOpStub_%s%s", op_name, ovr); +} + - BinaryOpIC::TypeInfo operands_type = Max(left_type_, right_type_); - if (left_type_ == BinaryOpIC::ODDBALL && right_type_ == BinaryOpIC::ODDBALL) { - // The OddballStub handles a number and an oddball, not two oddballs. - operands_type = BinaryOpIC::GENERIC; +void BinaryOpStub::PrintState(StringStream* stream) { + stream->Add("("); + stream->Add(StateToName(left_state_)); + stream->Add("*"); + if (fixed_right_arg_.has_value) { + stream->Add("%d", fixed_right_arg_.value); + } else { + stream->Add(StateToName(right_state_)); } - switch (operands_type) { - case BinaryOpIC::UNINITIALIZED: - GenerateTypeTransition(masm); - break; - case BinaryOpIC::SMI: - GenerateSmiStub(masm); - break; - case BinaryOpIC::INT32: - GenerateInt32Stub(masm); - break; - case BinaryOpIC::NUMBER: - GenerateNumberStub(masm); - break; - case BinaryOpIC::ODDBALL: - GenerateOddballStub(masm); - break; - case BinaryOpIC::STRING: - GenerateStringStub(masm); - break; - case BinaryOpIC::GENERIC: - GenerateGeneric(masm); - break; - default: - UNREACHABLE(); + stream->Add("->"); + stream->Add(StateToName(result_state_)); + stream->Add(")"); +} + + +Maybe<Handle<Object> > BinaryOpStub::Result(Handle<Object> left, + Handle<Object> right, + Isolate* isolate) { + Handle<JSBuiltinsObject> builtins(isolate->js_builtins_object()); + Builtins::JavaScript func = BinaryOpIC::TokenToJSBuiltin(op_); + Object* builtin = builtins->javascript_builtin(func); + Handle<JSFunction> builtin_function = + Handle<JSFunction>(JSFunction::cast(builtin), isolate); + bool caught_exception; + Handle<Object> result = Execution::Call(isolate, builtin_function, left, + 1, &right, &caught_exception); + return Maybe<Handle<Object> >(!caught_exception, result); +} + + +void BinaryOpStub::Initialize() { + fixed_right_arg_.has_value = false; + left_state_ = right_state_ = result_state_ = NONE; +} + + +void BinaryOpStub::Generate(Token::Value op, + State left, + State right, + State result, + OverwriteMode mode, + Isolate* isolate) { + BinaryOpStub stub(INITIALIZED); + stub.op_ = op; + stub.left_state_ = left; + stub.right_state_ = right; + stub.result_state_ = result; + stub.mode_ = mode; + stub.GetCode(isolate); +} + + +void BinaryOpStub::Generate(Token::Value op, + State left, + int right, + State result, + OverwriteMode mode, + Isolate* isolate) { + BinaryOpStub stub(INITIALIZED); + stub.op_ = op; + stub.left_state_ = left; + stub.fixed_right_arg_.has_value = true; + stub.fixed_right_arg_.value = right; + stub.right_state_ = SMI; + stub.result_state_ = result; + stub.mode_ = mode; + stub.GetCode(isolate); +} + + +void BinaryOpStub::GenerateAheadOfTime(Isolate* isolate) { + Token::Value binop[] = {Token::SUB, Token::MOD, Token::DIV, Token::MUL, + Token::ADD, Token::SAR, Token::BIT_OR, Token::BIT_AND, + Token::BIT_XOR, Token::SHL, Token::SHR}; + for (unsigned i = 0; i < ARRAY_SIZE(binop); i++) { + BinaryOpStub stub(UNINITIALIZED); + stub.op_ = binop[i]; + stub.GetCode(isolate); } + + // TODO(olivf) We should investigate why adding stubs to the snapshot is so + // expensive at runtime. When solved we should be able to add most binops to + // the snapshot instead of hand-picking them. + // Generated list of commonly used stubs + Generate(Token::ADD, INT32, INT32, INT32, NO_OVERWRITE, isolate); + Generate(Token::ADD, INT32, INT32, INT32, OVERWRITE_LEFT, isolate); + Generate(Token::ADD, INT32, INT32, NUMBER, NO_OVERWRITE, isolate); + Generate(Token::ADD, INT32, INT32, NUMBER, OVERWRITE_LEFT, isolate); + Generate(Token::ADD, INT32, NUMBER, NUMBER, NO_OVERWRITE, isolate); + Generate(Token::ADD, INT32, NUMBER, NUMBER, OVERWRITE_LEFT, isolate); + Generate(Token::ADD, INT32, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate); + Generate(Token::ADD, INT32, SMI, INT32, NO_OVERWRITE, isolate); + Generate(Token::ADD, INT32, SMI, INT32, OVERWRITE_LEFT, isolate); + Generate(Token::ADD, INT32, SMI, INT32, OVERWRITE_RIGHT, isolate); + Generate(Token::ADD, NUMBER, INT32, NUMBER, NO_OVERWRITE, isolate); + Generate(Token::ADD, NUMBER, INT32, NUMBER, OVERWRITE_LEFT, isolate); + Generate(Token::ADD, NUMBER, INT32, NUMBER, OVERWRITE_RIGHT, isolate); + Generate(Token::ADD, NUMBER, NUMBER, NUMBER, NO_OVERWRITE, isolate); + Generate(Token::ADD, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT, isolate); + Generate(Token::ADD, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate); + Generate(Token::ADD, NUMBER, SMI, NUMBER, NO_OVERWRITE, isolate); + Generate(Token::ADD, NUMBER, SMI, NUMBER, OVERWRITE_LEFT, isolate); + Generate(Token::ADD, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT, isolate); + Generate(Token::ADD, SMI, INT32, INT32, NO_OVERWRITE, isolate); + Generate(Token::ADD, SMI, INT32, INT32, OVERWRITE_LEFT, isolate); + Generate(Token::ADD, SMI, INT32, NUMBER, NO_OVERWRITE, isolate); + Generate(Token::ADD, SMI, NUMBER, NUMBER, NO_OVERWRITE, isolate); + Generate(Token::ADD, SMI, NUMBER, NUMBER, OVERWRITE_LEFT, isolate); + Generate(Token::ADD, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate); + Generate(Token::ADD, SMI, SMI, INT32, OVERWRITE_LEFT, isolate); + Generate(Token::ADD, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate); + Generate(Token::BIT_AND, INT32, INT32, INT32, NO_OVERWRITE, isolate); + Generate(Token::BIT_AND, INT32, INT32, INT32, OVERWRITE_LEFT, isolate); + Generate(Token::BIT_AND, INT32, INT32, INT32, OVERWRITE_RIGHT, isolate); + Generate(Token::BIT_AND, INT32, INT32, SMI, NO_OVERWRITE, isolate); + Generate(Token::BIT_AND, INT32, INT32, SMI, OVERWRITE_RIGHT, isolate); + Generate(Token::BIT_AND, INT32, SMI, INT32, NO_OVERWRITE, isolate); + Generate(Token::BIT_AND, INT32, SMI, INT32, OVERWRITE_RIGHT, isolate); + Generate(Token::BIT_AND, INT32, SMI, SMI, NO_OVERWRITE, isolate); + Generate(Token::BIT_AND, INT32, SMI, SMI, OVERWRITE_LEFT, isolate); + Generate(Token::BIT_AND, INT32, SMI, SMI, OVERWRITE_RIGHT, isolate); + Generate(Token::BIT_AND, NUMBER, INT32, INT32, OVERWRITE_RIGHT, isolate); + Generate(Token::BIT_AND, NUMBER, SMI, SMI, NO_OVERWRITE, isolate); + Generate(Token::BIT_AND, NUMBER, SMI, SMI, OVERWRITE_RIGHT, isolate); + Generate(Token::BIT_AND, SMI, INT32, INT32, NO_OVERWRITE, isolate); + Generate(Token::BIT_AND, SMI, INT32, SMI, OVERWRITE_RIGHT, isolate); + Generate(Token::BIT_AND, SMI, NUMBER, SMI, OVERWRITE_RIGHT, isolate); + Generate(Token::BIT_AND, SMI, SMI, SMI, NO_OVERWRITE, isolate); + Generate(Token::BIT_AND, SMI, SMI, SMI, OVERWRITE_LEFT, isolate); + Generate(Token::BIT_AND, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate); + Generate(Token::BIT_OR, INT32, INT32, INT32, OVERWRITE_LEFT, isolate); + Generate(Token::BIT_OR, INT32, INT32, INT32, OVERWRITE_RIGHT, isolate); + Generate(Token::BIT_OR, INT32, INT32, SMI, OVERWRITE_LEFT, isolate); + Generate(Token::BIT_OR, INT32, SMI, INT32, NO_OVERWRITE, isolate); + Generate(Token::BIT_OR, INT32, SMI, INT32, OVERWRITE_LEFT, isolate); + Generate(Token::BIT_OR, INT32, SMI, INT32, OVERWRITE_RIGHT, isolate); + Generate(Token::BIT_OR, INT32, SMI, SMI, NO_OVERWRITE, isolate); + Generate(Token::BIT_OR, INT32, SMI, SMI, OVERWRITE_RIGHT, isolate); + Generate(Token::BIT_OR, NUMBER, SMI, INT32, NO_OVERWRITE, isolate); + Generate(Token::BIT_OR, NUMBER, SMI, INT32, OVERWRITE_LEFT, isolate); + Generate(Token::BIT_OR, NUMBER, SMI, INT32, OVERWRITE_RIGHT, isolate); + Generate(Token::BIT_OR, NUMBER, SMI, SMI, NO_OVERWRITE, isolate); + Generate(Token::BIT_OR, NUMBER, SMI, SMI, OVERWRITE_LEFT, isolate); + Generate(Token::BIT_OR, SMI, INT32, INT32, OVERWRITE_LEFT, isolate); + Generate(Token::BIT_OR, SMI, INT32, INT32, OVERWRITE_RIGHT, isolate); + Generate(Token::BIT_OR, SMI, INT32, SMI, OVERWRITE_RIGHT, isolate); + Generate(Token::BIT_OR, SMI, SMI, SMI, OVERWRITE_LEFT, isolate); + Generate(Token::BIT_OR, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate); + Generate(Token::BIT_XOR, INT32, INT32, INT32, NO_OVERWRITE, isolate); + Generate(Token::BIT_XOR, INT32, INT32, INT32, OVERWRITE_LEFT, isolate); + Generate(Token::BIT_XOR, INT32, INT32, INT32, OVERWRITE_RIGHT, isolate); + Generate(Token::BIT_XOR, INT32, INT32, SMI, NO_OVERWRITE, isolate); + Generate(Token::BIT_XOR, INT32, INT32, SMI, OVERWRITE_LEFT, isolate); + Generate(Token::BIT_XOR, INT32, NUMBER, SMI, NO_OVERWRITE, isolate); + Generate(Token::BIT_XOR, INT32, SMI, INT32, NO_OVERWRITE, isolate); + Generate(Token::BIT_XOR, INT32, SMI, INT32, OVERWRITE_LEFT, isolate); + Generate(Token::BIT_XOR, INT32, SMI, INT32, OVERWRITE_RIGHT, isolate); + Generate(Token::BIT_XOR, NUMBER, INT32, INT32, NO_OVERWRITE, isolate); + Generate(Token::BIT_XOR, NUMBER, SMI, INT32, NO_OVERWRITE, isolate); + Generate(Token::BIT_XOR, NUMBER, SMI, SMI, NO_OVERWRITE, isolate); + Generate(Token::BIT_XOR, SMI, INT32, INT32, NO_OVERWRITE, isolate); + Generate(Token::BIT_XOR, SMI, INT32, INT32, OVERWRITE_LEFT, isolate); + Generate(Token::BIT_XOR, SMI, INT32, SMI, OVERWRITE_LEFT, isolate); + Generate(Token::BIT_XOR, SMI, SMI, SMI, NO_OVERWRITE, isolate); + Generate(Token::BIT_XOR, SMI, SMI, SMI, OVERWRITE_LEFT, isolate); + Generate(Token::BIT_XOR, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate); + Generate(Token::DIV, INT32, INT32, INT32, NO_OVERWRITE, isolate); + Generate(Token::DIV, INT32, INT32, NUMBER, NO_OVERWRITE, isolate); + Generate(Token::DIV, INT32, NUMBER, NUMBER, NO_OVERWRITE, isolate); + Generate(Token::DIV, INT32, NUMBER, NUMBER, OVERWRITE_LEFT, isolate); + Generate(Token::DIV, INT32, SMI, INT32, NO_OVERWRITE, isolate); + Generate(Token::DIV, INT32, SMI, NUMBER, NO_OVERWRITE, isolate); + Generate(Token::DIV, NUMBER, INT32, NUMBER, NO_OVERWRITE, isolate); + Generate(Token::DIV, NUMBER, INT32, NUMBER, OVERWRITE_LEFT, isolate); + Generate(Token::DIV, NUMBER, NUMBER, NUMBER, NO_OVERWRITE, isolate); + Generate(Token::DIV, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT, isolate); + Generate(Token::DIV, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate); + Generate(Token::DIV, NUMBER, SMI, NUMBER, NO_OVERWRITE, isolate); + Generate(Token::DIV, NUMBER, SMI, NUMBER, OVERWRITE_LEFT, isolate); + Generate(Token::DIV, SMI, INT32, INT32, NO_OVERWRITE, isolate); + Generate(Token::DIV, SMI, INT32, NUMBER, NO_OVERWRITE, isolate); + Generate(Token::DIV, SMI, INT32, NUMBER, OVERWRITE_LEFT, isolate); + Generate(Token::DIV, SMI, NUMBER, NUMBER, NO_OVERWRITE, isolate); + Generate(Token::DIV, SMI, NUMBER, NUMBER, OVERWRITE_LEFT, isolate); + Generate(Token::DIV, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate); + Generate(Token::DIV, SMI, SMI, NUMBER, NO_OVERWRITE, isolate); + Generate(Token::DIV, SMI, SMI, NUMBER, OVERWRITE_LEFT, isolate); + Generate(Token::DIV, SMI, SMI, NUMBER, OVERWRITE_RIGHT, isolate); + Generate(Token::DIV, SMI, SMI, SMI, NO_OVERWRITE, isolate); + Generate(Token::DIV, SMI, SMI, SMI, OVERWRITE_LEFT, isolate); + Generate(Token::DIV, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate); + Generate(Token::MOD, NUMBER, SMI, NUMBER, OVERWRITE_LEFT, isolate); + Generate(Token::MOD, SMI, 16, SMI, OVERWRITE_LEFT, isolate); + Generate(Token::MOD, SMI, 2, SMI, NO_OVERWRITE, isolate); + Generate(Token::MOD, SMI, 2048, SMI, NO_OVERWRITE, isolate); + Generate(Token::MOD, SMI, 32, SMI, NO_OVERWRITE, isolate); + Generate(Token::MOD, SMI, 4, SMI, NO_OVERWRITE, isolate); + Generate(Token::MOD, SMI, 4, SMI, OVERWRITE_LEFT, isolate); + Generate(Token::MOD, SMI, 8, SMI, NO_OVERWRITE, isolate); + Generate(Token::MOD, SMI, SMI, SMI, NO_OVERWRITE, isolate); + Generate(Token::MOD, SMI, SMI, SMI, OVERWRITE_LEFT, isolate); + Generate(Token::MUL, INT32, INT32, INT32, NO_OVERWRITE, isolate); + Generate(Token::MUL, INT32, INT32, NUMBER, NO_OVERWRITE, isolate); + Generate(Token::MUL, INT32, NUMBER, NUMBER, NO_OVERWRITE, isolate); + Generate(Token::MUL, INT32, NUMBER, NUMBER, OVERWRITE_LEFT, isolate); + Generate(Token::MUL, INT32, SMI, INT32, NO_OVERWRITE, isolate); + Generate(Token::MUL, INT32, SMI, INT32, OVERWRITE_LEFT, isolate); + Generate(Token::MUL, INT32, SMI, NUMBER, NO_OVERWRITE, isolate); + Generate(Token::MUL, NUMBER, INT32, NUMBER, NO_OVERWRITE, isolate); + Generate(Token::MUL, NUMBER, INT32, NUMBER, OVERWRITE_LEFT, isolate); + Generate(Token::MUL, NUMBER, INT32, NUMBER, OVERWRITE_RIGHT, isolate); + Generate(Token::MUL, NUMBER, NUMBER, NUMBER, NO_OVERWRITE, isolate); + Generate(Token::MUL, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT, isolate); + Generate(Token::MUL, NUMBER, SMI, NUMBER, NO_OVERWRITE, isolate); + Generate(Token::MUL, NUMBER, SMI, NUMBER, OVERWRITE_LEFT, isolate); + Generate(Token::MUL, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT, isolate); + Generate(Token::MUL, SMI, INT32, INT32, NO_OVERWRITE, isolate); + Generate(Token::MUL, SMI, INT32, INT32, OVERWRITE_LEFT, isolate); + Generate(Token::MUL, SMI, INT32, NUMBER, NO_OVERWRITE, isolate); + Generate(Token::MUL, SMI, NUMBER, NUMBER, NO_OVERWRITE, isolate); + Generate(Token::MUL, SMI, NUMBER, NUMBER, OVERWRITE_LEFT, isolate); + Generate(Token::MUL, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate); + Generate(Token::MUL, SMI, SMI, INT32, NO_OVERWRITE, isolate); + Generate(Token::MUL, SMI, SMI, NUMBER, NO_OVERWRITE, isolate); + Generate(Token::MUL, SMI, SMI, NUMBER, OVERWRITE_LEFT, isolate); + Generate(Token::MUL, SMI, SMI, SMI, NO_OVERWRITE, isolate); + Generate(Token::MUL, SMI, SMI, SMI, OVERWRITE_LEFT, isolate); + Generate(Token::MUL, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate); + Generate(Token::SAR, INT32, SMI, INT32, OVERWRITE_RIGHT, isolate); + Generate(Token::SAR, INT32, SMI, SMI, NO_OVERWRITE, isolate); + Generate(Token::SAR, INT32, SMI, SMI, OVERWRITE_RIGHT, isolate); + Generate(Token::SAR, NUMBER, SMI, SMI, NO_OVERWRITE, isolate); + Generate(Token::SAR, NUMBER, SMI, SMI, OVERWRITE_RIGHT, isolate); + Generate(Token::SAR, SMI, SMI, SMI, OVERWRITE_LEFT, isolate); + Generate(Token::SAR, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate); + Generate(Token::SHL, INT32, SMI, INT32, NO_OVERWRITE, isolate); + Generate(Token::SHL, INT32, SMI, INT32, OVERWRITE_RIGHT, isolate); + Generate(Token::SHL, INT32, SMI, SMI, NO_OVERWRITE, isolate); + Generate(Token::SHL, INT32, SMI, SMI, OVERWRITE_RIGHT, isolate); + Generate(Token::SHL, NUMBER, SMI, SMI, OVERWRITE_RIGHT, isolate); + Generate(Token::SHL, SMI, SMI, INT32, NO_OVERWRITE, isolate); + Generate(Token::SHL, SMI, SMI, INT32, OVERWRITE_LEFT, isolate); + Generate(Token::SHL, SMI, SMI, INT32, OVERWRITE_RIGHT, isolate); + Generate(Token::SHL, SMI, SMI, SMI, NO_OVERWRITE, isolate); + Generate(Token::SHL, SMI, SMI, SMI, OVERWRITE_LEFT, isolate); + Generate(Token::SHL, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate); + Generate(Token::SHR, INT32, SMI, SMI, NO_OVERWRITE, isolate); + Generate(Token::SHR, INT32, SMI, SMI, OVERWRITE_LEFT, isolate); + Generate(Token::SHR, INT32, SMI, SMI, OVERWRITE_RIGHT, isolate); + Generate(Token::SHR, NUMBER, SMI, SMI, NO_OVERWRITE, isolate); + Generate(Token::SHR, NUMBER, SMI, SMI, OVERWRITE_LEFT, isolate); + Generate(Token::SHR, NUMBER, SMI, INT32, OVERWRITE_RIGHT, isolate); + Generate(Token::SHR, SMI, SMI, SMI, NO_OVERWRITE, isolate); + Generate(Token::SHR, SMI, SMI, SMI, OVERWRITE_LEFT, isolate); + Generate(Token::SHR, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate); + Generate(Token::SUB, INT32, INT32, INT32, NO_OVERWRITE, isolate); + Generate(Token::SUB, INT32, INT32, INT32, OVERWRITE_LEFT, isolate); + Generate(Token::SUB, INT32, NUMBER, NUMBER, NO_OVERWRITE, isolate); + Generate(Token::SUB, INT32, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate); + Generate(Token::SUB, INT32, SMI, INT32, OVERWRITE_LEFT, isolate); + Generate(Token::SUB, INT32, SMI, INT32, OVERWRITE_RIGHT, isolate); + Generate(Token::SUB, NUMBER, INT32, NUMBER, NO_OVERWRITE, isolate); + Generate(Token::SUB, NUMBER, INT32, NUMBER, OVERWRITE_LEFT, isolate); + Generate(Token::SUB, NUMBER, NUMBER, NUMBER, NO_OVERWRITE, isolate); + Generate(Token::SUB, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT, isolate); + Generate(Token::SUB, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate); + Generate(Token::SUB, NUMBER, SMI, NUMBER, NO_OVERWRITE, isolate); + Generate(Token::SUB, NUMBER, SMI, NUMBER, OVERWRITE_LEFT, isolate); + Generate(Token::SUB, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT, isolate); + Generate(Token::SUB, SMI, INT32, INT32, NO_OVERWRITE, isolate); + Generate(Token::SUB, SMI, NUMBER, NUMBER, NO_OVERWRITE, isolate); + Generate(Token::SUB, SMI, NUMBER, NUMBER, OVERWRITE_LEFT, isolate); + Generate(Token::SUB, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT, isolate); + Generate(Token::SUB, SMI, SMI, SMI, NO_OVERWRITE, isolate); + Generate(Token::SUB, SMI, SMI, SMI, OVERWRITE_LEFT, isolate); + Generate(Token::SUB, SMI, SMI, SMI, OVERWRITE_RIGHT, isolate); } -#define __ ACCESS_MASM(masm) +bool BinaryOpStub::can_encode_arg_value(int32_t value) const { + return op_ == Token::MOD && value > 0 && IsPowerOf2(value) && + FixedRightArgValueBits::is_valid(WhichPowerOf2(value)); +} -void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) { - switch (op_) { - case Token::ADD: - __ InvokeBuiltin(Builtins::ADD, CALL_FUNCTION); - break; - case Token::SUB: - __ InvokeBuiltin(Builtins::SUB, CALL_FUNCTION); - break; - case Token::MUL: - __ InvokeBuiltin(Builtins::MUL, CALL_FUNCTION); - break; - case Token::DIV: - __ InvokeBuiltin(Builtins::DIV, CALL_FUNCTION); - break; - case Token::MOD: - __ InvokeBuiltin(Builtins::MOD, CALL_FUNCTION); - break; - case Token::BIT_OR: - __ InvokeBuiltin(Builtins::BIT_OR, CALL_FUNCTION); - break; - case Token::BIT_AND: - __ InvokeBuiltin(Builtins::BIT_AND, CALL_FUNCTION); - break; - case Token::BIT_XOR: - __ InvokeBuiltin(Builtins::BIT_XOR, CALL_FUNCTION); +int BinaryOpStub::encode_arg_value(int32_t value) const { + ASSERT(can_encode_arg_value(value)); + return WhichPowerOf2(value); +} + + +int32_t BinaryOpStub::decode_arg_value(int value) const { + return 1 << value; +} + + +int BinaryOpStub::encode_token(Token::Value op) const { + ASSERT(op >= FIRST_TOKEN && op <= LAST_TOKEN); + return op - FIRST_TOKEN; +} + + +Token::Value BinaryOpStub::decode_token(int op) const { + int res = op + FIRST_TOKEN; + ASSERT(res >= FIRST_TOKEN && res <= LAST_TOKEN); + return static_cast<Token::Value>(res); +} + + +const char* BinaryOpStub::StateToName(State state) { + switch (state) { + case NONE: + return "None"; + case SMI: + return "Smi"; + case INT32: + return "Int32"; + case NUMBER: + return "Number"; + case STRING: + return "String"; + case GENERIC: + return "Generic"; + } + return ""; +} + + +void BinaryOpStub::UpdateStatus(Handle<Object> left, + Handle<Object> right, + Maybe<Handle<Object> > result) { + int old_state = GetExtraICState(); + + UpdateStatus(left, &left_state_); + UpdateStatus(right, &right_state_); + + int32_t value; + bool new_has_fixed_right_arg = + right->ToInt32(&value) && can_encode_arg_value(value) && + (left_state_ == SMI || left_state_ == INT32) && + (result_state_ == NONE || !fixed_right_arg_.has_value); + + fixed_right_arg_ = Maybe<int32_t>(new_has_fixed_right_arg, value); + + if (result.has_value) UpdateStatus(result.value, &result_state_); + + State max_input = Max(left_state_, right_state_); + + if (!has_int_result() && op_ != Token::SHR && + max_input <= NUMBER && max_input > result_state_) { + result_state_ = max_input; + } + + ASSERT(result_state_ <= (has_int_result() ? INT32 : NUMBER) || + op_ == Token::ADD); + + if (old_state == GetExtraICState()) { + // Tagged operations can lead to non-truncating HChanges + if (left->IsUndefined() || left->IsBoolean()) { + left_state_ = GENERIC; + } else if (right->IsUndefined() || right->IsBoolean()) { + right_state_ = GENERIC; + } else { + // Since the fpu is to precise, we might bail out on numbers which + // actually would truncate with 64 bit precision. + ASSERT(!CpuFeatures::IsSupported(SSE2) && + result_state_ <= INT32); + result_state_ = NUMBER; + } + } +} + + +void BinaryOpStub::UpdateStatus(Handle<Object> object, + State* state) { + bool is_truncating = (op_ == Token::BIT_AND || op_ == Token::BIT_OR || + op_ == Token::BIT_XOR || op_ == Token::SAR || + op_ == Token::SHL || op_ == Token::SHR); + v8::internal::TypeInfo type = v8::internal::TypeInfo::FromValue(object); + if (object->IsBoolean() && is_truncating) { + // Booleans are converted by truncating by HChange. + type = TypeInfo::Integer32(); + } + if (object->IsUndefined()) { + // Undefined will be automatically truncated for us by HChange. + type = is_truncating ? TypeInfo::Integer32() : TypeInfo::Double(); + } + State int_state = SmiValuesAre32Bits() ? NUMBER : INT32; + State new_state = NONE; + if (type.IsSmi()) { + new_state = SMI; + } else if (type.IsInteger32()) { + new_state = int_state; + } else if (type.IsNumber()) { + new_state = NUMBER; + } else if (object->IsString() && operation() == Token::ADD) { + new_state = STRING; + } else { + new_state = GENERIC; + } + if ((new_state <= NUMBER && *state > NUMBER) || + (new_state > NUMBER && *state <= NUMBER && *state != NONE)) { + new_state = GENERIC; + } + *state = Max(*state, new_state); +} + + +Handle<Type> BinaryOpStub::StateToType(State state, + Isolate* isolate) { + Handle<Type> t = handle(Type::None(), isolate); + switch (state) { + case NUMBER: + t = handle(Type::Union(t, handle(Type::Double(), isolate)), isolate); + // Fall through. + case INT32: + t = handle(Type::Union(t, handle(Type::Signed32(), isolate)), isolate); + // Fall through. + case SMI: + t = handle(Type::Union(t, handle(Type::Smi(), isolate)), isolate); break; - case Token::SAR: - __ InvokeBuiltin(Builtins::SAR, CALL_FUNCTION); + + case STRING: + t = handle(Type::Union(t, handle(Type::String(), isolate)), isolate); break; - case Token::SHR: - __ InvokeBuiltin(Builtins::SHR, CALL_FUNCTION); + case GENERIC: + return handle(Type::Any(), isolate); break; - case Token::SHL: - __ InvokeBuiltin(Builtins::SHL, CALL_FUNCTION); + case NONE: break; - default: - UNREACHABLE(); } + return t; } -#undef __ +Handle<Type> BinaryOpStub::GetLeftType(Isolate* isolate) const { + return StateToType(left_state_, isolate); +} -void BinaryOpStub::PrintName(StringStream* stream) { - const char* op_name = Token::Name(op_); - const char* overwrite_name; - switch (mode_) { - case NO_OVERWRITE: overwrite_name = "Alloc"; break; - case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; - case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; - default: overwrite_name = "UnknownOverwrite"; break; - } - stream->Add("BinaryOpStub_%s_%s_%s+%s", - op_name, - overwrite_name, - BinaryOpIC::GetName(left_type_), - BinaryOpIC::GetName(right_type_)); +Handle<Type> BinaryOpStub::GetRightType(Isolate* isolate) const { + return StateToType(right_state_, isolate); } -void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) { - ASSERT(left_type_ == BinaryOpIC::STRING || right_type_ == BinaryOpIC::STRING); - ASSERT(op_ == Token::ADD); - if (left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING) { - GenerateBothStringStub(masm); - return; +Handle<Type> BinaryOpStub::GetResultType(Isolate* isolate) const { + if (HasSideEffects(isolate)) return StateToType(NONE, isolate); + if (result_state_ == GENERIC && op_ == Token::ADD) { + return handle(Type::Union(handle(Type::Number(), isolate), + handle(Type::String(), isolate)), isolate); + } + ASSERT(result_state_ != GENERIC); + if (result_state_ == NUMBER && op_ == Token::SHR) { + return handle(Type::Unsigned32(), isolate); } - // Try to add arguments as strings, otherwise, transition to the generic - // BinaryOpIC type. - GenerateAddStrings(masm); - GenerateTypeTransition(masm); + return StateToType(result_state_, isolate); } @@ -759,6 +1121,12 @@ void ArrayConstructorStubBase::InstallDescriptors(Isolate* isolate) { } +void NumberToStringStub::InstallDescriptors(Isolate* isolate) { + NumberToStringStub stub; + InstallDescriptor(isolate, &stub); +} + + void FastNewClosureStub::InstallDescriptors(Isolate* isolate) { FastNewClosureStub stub(STRICT_MODE, false); InstallDescriptor(isolate, &stub); diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h index 946eb76962..80d99d8b68 100644 --- a/deps/v8/src/code-stubs.h +++ b/deps/v8/src/code-stubs.h @@ -30,8 +30,9 @@ #include "allocation.h" #include "assembler.h" -#include "globals.h" #include "codegen.h" +#include "globals.h" +#include "macro-assembler.h" namespace v8 { namespace internal { @@ -200,19 +201,21 @@ class CodeStub BASE_EMBEDDED { virtual void PrintName(StringStream* stream); + // Returns a name for logging/debugging purposes. + SmartArrayPointer<const char> GetName(); + protected: static bool CanUseFPRegisters(); // Generates the assembler code for the stub. virtual Handle<Code> GenerateCode(Isolate* isolate) = 0; + virtual void VerifyPlatformFeatures(Isolate* isolate); // Returns whether the code generated for this stub needs to be allocated as // a fixed (non-moveable) code object. virtual bool NeedsImmovableCode() { return false; } - // Returns a name for logging/debugging purposes. - SmartArrayPointer<const char> GetName(); virtual void PrintBaseName(StringStream* stream); virtual void PrintState(StringStream* stream) { } @@ -278,7 +281,7 @@ enum StubFunctionMode { NOT_JS_FUNCTION_STUB_MODE, JS_FUNCTION_STUB_MODE }; struct CodeStubInterfaceDescriptor { CodeStubInterfaceDescriptor(); int register_param_count_; - const Register* stack_parameter_count_; + Register stack_parameter_count_; // if hint_stack_parameter_count_ > 0, the code stub can optimize the // return sequence. Default value is -1, which means it is ignored. int hint_stack_parameter_count_; @@ -287,7 +290,7 @@ struct CodeStubInterfaceDescriptor { Address deoptimization_handler_; int environment_length() const { - if (stack_parameter_count_ != NULL) { + if (stack_parameter_count_.is_valid()) { return register_param_count_ + 1; } return register_param_count_; @@ -318,7 +321,7 @@ struct CodeStubInterfaceDescriptor { // defined outside of the platform directories #define DESCRIPTOR_GET_PARAMETER_REGISTER(descriptor, index) \ ((index) == (descriptor)->register_param_count_) \ - ? *((descriptor)->stack_parameter_count_) \ + ? (descriptor)->stack_parameter_count_ \ : (descriptor)->register_params_[(index)] @@ -402,9 +405,7 @@ enum StringAddFlags { // Check right parameter. STRING_ADD_CHECK_RIGHT = 1 << 1, // Check both parameters. - STRING_ADD_CHECK_BOTH = STRING_ADD_CHECK_LEFT | STRING_ADD_CHECK_RIGHT, - // Stub needs a frame before calling the runtime - STRING_ADD_ERECT_FRAME = 1 << 2 + STRING_ADD_CHECK_BOTH = STRING_ADD_CHECK_LEFT | STRING_ADD_CHECK_RIGHT }; } } // namespace v8::internal @@ -464,6 +465,27 @@ class ToNumberStub: public HydrogenCodeStub { }; +class NumberToStringStub V8_FINAL : public HydrogenCodeStub { + public: + NumberToStringStub() {} + + virtual Handle<Code> GenerateCode(Isolate* isolate) V8_OVERRIDE; + + virtual void InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE; + + static void InstallDescriptors(Isolate* isolate); + + // Parameters accessed via CodeStubGraphBuilder::GetParameter() + static const int kNumber = 0; + + private: + virtual Major MajorKey() V8_OVERRIDE { return NumberToString; } + virtual int NotMissMinorKey() V8_OVERRIDE { return 0; } +}; + + class FastNewClosureStub : public HydrogenCodeStub { public: explicit FastNewClosureStub(LanguageMode language_mode, bool is_generator) @@ -830,19 +852,12 @@ class FunctionPrototypeStub: public ICStub { class StringLengthStub: public ICStub { public: - StringLengthStub(Code::Kind kind, bool support_wrapper) - : ICStub(kind), support_wrapper_(support_wrapper) { } + explicit StringLengthStub(Code::Kind kind) : ICStub(kind) { } virtual void Generate(MacroAssembler* masm); private: STATIC_ASSERT(KindBits::kSize == 4); - class WrapperModeBits: public BitField<bool, 4, 1> {}; - virtual CodeStub::Major MajorKey() { return StringLength; } - virtual int MinorKey() { - return KindBits::encode(kind()) | WrapperModeBits::encode(support_wrapper_); - } - - bool support_wrapper_; + virtual CodeStub::Major MajorKey() { return StringLength; } }; @@ -892,7 +907,7 @@ class HICStub: public HydrogenCodeStub { class HandlerStub: public HICStub { public: - virtual Code::Kind GetCodeKind() const { return Code::STUB; } + virtual Code::Kind GetCodeKind() const { return Code::HANDLER; } virtual int GetStubFlags() { return kind(); } protected: @@ -983,156 +998,177 @@ class KeyedLoadFieldStub: public LoadFieldStub { }; -class BinaryOpStub: public PlatformCodeStub { +class BinaryOpStub: public HydrogenCodeStub { public: BinaryOpStub(Token::Value op, OverwriteMode mode) - : op_(op), - mode_(mode), - platform_specific_bit_(false), - left_type_(BinaryOpIC::UNINITIALIZED), - right_type_(BinaryOpIC::UNINITIALIZED), - result_type_(BinaryOpIC::UNINITIALIZED), - encoded_right_arg_(false, encode_arg_value(1)) { + : HydrogenCodeStub(UNINITIALIZED), op_(op), mode_(mode) { + ASSERT(op <= LAST_TOKEN && op >= FIRST_TOKEN); Initialize(); - ASSERT(OpBits::is_valid(Token::NUM_TOKENS)); } - BinaryOpStub( - int key, - BinaryOpIC::TypeInfo left_type, - BinaryOpIC::TypeInfo right_type, - BinaryOpIC::TypeInfo result_type, - Maybe<int32_t> fixed_right_arg) - : op_(OpBits::decode(key)), - mode_(ModeBits::decode(key)), - platform_specific_bit_(PlatformSpecificBits::decode(key)), - left_type_(left_type), - right_type_(right_type), - result_type_(result_type), - encoded_right_arg_(fixed_right_arg.has_value, - encode_arg_value(fixed_right_arg.value)) { } + explicit BinaryOpStub(Code::ExtraICState state) + : op_(decode_token(OpBits::decode(state))), + mode_(OverwriteModeField::decode(state)), + fixed_right_arg_( + Maybe<int>(HasFixedRightArgBits::decode(state), + decode_arg_value(FixedRightArgValueBits::decode(state)))), + left_state_(LeftStateField::decode(state)), + right_state_(fixed_right_arg_.has_value + ? ((fixed_right_arg_.value <= Smi::kMaxValue) ? SMI : INT32) + : RightStateField::decode(state)), + result_state_(ResultStateField::decode(state)) { + // We don't deserialize the SSE2 Field, since this is only used to be able + // to include SSE2 as well as non-SSE2 versions in the snapshot. For code + // generation we always want it to reflect the current state. + ASSERT(!fixed_right_arg_.has_value || + can_encode_arg_value(fixed_right_arg_.value)); + } + + static const int FIRST_TOKEN = Token::BIT_OR; + static const int LAST_TOKEN = Token::MOD; - static void decode_types_from_minor_key(int minor_key, - BinaryOpIC::TypeInfo* left_type, - BinaryOpIC::TypeInfo* right_type, - BinaryOpIC::TypeInfo* result_type) { - *left_type = - static_cast<BinaryOpIC::TypeInfo>(LeftTypeBits::decode(minor_key)); - *right_type = - static_cast<BinaryOpIC::TypeInfo>(RightTypeBits::decode(minor_key)); - *result_type = - static_cast<BinaryOpIC::TypeInfo>(ResultTypeBits::decode(minor_key)); + static void GenerateAheadOfTime(Isolate* isolate); + virtual void InitializeInterfaceDescriptor( + Isolate* isolate, CodeStubInterfaceDescriptor* descriptor); + static void InitializeForIsolate(Isolate* isolate) { + BinaryOpStub binopStub(UNINITIALIZED); + binopStub.InitializeInterfaceDescriptor( + isolate, isolate->code_stub_interface_descriptor(CodeStub::BinaryOp)); + } + + virtual Code::Kind GetCodeKind() const { return Code::BINARY_OP_IC; } + virtual InlineCacheState GetICState() { + if (Max(left_state_, right_state_) == NONE) { + return ::v8::internal::UNINITIALIZED; + } + if (Max(left_state_, right_state_) == GENERIC) return MEGAMORPHIC; + return MONOMORPHIC; } - static Token::Value decode_op_from_minor_key(int minor_key) { - return static_cast<Token::Value>(OpBits::decode(minor_key)); + virtual void VerifyPlatformFeatures(Isolate* isolate) V8_OVERRIDE { + ASSERT(CpuFeatures::VerifyCrossCompiling(SSE2)); } - static Maybe<int> decode_fixed_right_arg_from_minor_key(int minor_key) { - return Maybe<int>( - HasFixedRightArgBits::decode(minor_key), - decode_arg_value(FixedRightArgValueBits::decode(minor_key))); + virtual Code::ExtraICState GetExtraICState() { + bool sse_field = Max(result_state_, Max(left_state_, right_state_)) > SMI && + CpuFeatures::IsSafeForSnapshot(SSE2); + + return OpBits::encode(encode_token(op_)) + | LeftStateField::encode(left_state_) + | RightStateField::encode(fixed_right_arg_.has_value + ? NONE : right_state_) + | ResultStateField::encode(result_state_) + | HasFixedRightArgBits::encode(fixed_right_arg_.has_value) + | FixedRightArgValueBits::encode(fixed_right_arg_.has_value + ? encode_arg_value( + fixed_right_arg_.value) + : 0) + | SSE2Field::encode(sse_field) + | OverwriteModeField::encode(mode_); } - int fixed_right_arg_value() const { - return decode_arg_value(encoded_right_arg_.value); + bool CanReuseDoubleBox() { + return result_state_ <= NUMBER && result_state_ > SMI && + ((left_state_ > SMI && left_state_ <= NUMBER && + mode_ == OVERWRITE_LEFT) || + (right_state_ > SMI && right_state_ <= NUMBER && + mode_ == OVERWRITE_RIGHT)); } - static bool can_encode_arg_value(int32_t value) { - return value > 0 && - IsPowerOf2(value) && - FixedRightArgValueBits::is_valid(WhichPowerOf2(value)); + bool HasSideEffects(Isolate* isolate) const { + Handle<Type> left = GetLeftType(isolate); + Handle<Type> right = GetRightType(isolate); + return left->Maybe(Type::Receiver()) || right->Maybe(Type::Receiver()); } - enum SmiCodeGenerateHeapNumberResults { - ALLOW_HEAPNUMBER_RESULTS, - NO_HEAPNUMBER_RESULTS - }; + virtual Handle<Code> GenerateCode(Isolate* isolate); - private: - Token::Value op_; - OverwriteMode mode_; - bool platform_specific_bit_; // Indicates SSE3 on IA32. + Maybe<Handle<Object> > Result(Handle<Object> left, + Handle<Object> right, + Isolate* isolate); - // Operand type information determined at runtime. - BinaryOpIC::TypeInfo left_type_; - BinaryOpIC::TypeInfo right_type_; - BinaryOpIC::TypeInfo result_type_; + Token::Value operation() const { return op_; } + OverwriteMode mode() const { return mode_; } + Maybe<int> fixed_right_arg() const { return fixed_right_arg_; } - Maybe<int> encoded_right_arg_; + Handle<Type> GetLeftType(Isolate* isolate) const; + Handle<Type> GetRightType(Isolate* isolate) const; + Handle<Type> GetResultType(Isolate* isolate) const; - static int encode_arg_value(int32_t value) { - ASSERT(can_encode_arg_value(value)); - return WhichPowerOf2(value); - } + void UpdateStatus(Handle<Object> left, + Handle<Object> right, + Maybe<Handle<Object> > result); - static int32_t decode_arg_value(int value) { - return 1 << value; + void PrintState(StringStream* stream); + + private: + explicit BinaryOpStub(InitializationState state) : HydrogenCodeStub(state), + op_(Token::ADD), + mode_(NO_OVERWRITE) { + Initialize(); } + void Initialize(); - virtual void PrintName(StringStream* stream); + enum State { NONE, SMI, INT32, NUMBER, STRING, GENERIC }; + + // We truncate the last bit of the token. + STATIC_ASSERT(LAST_TOKEN - FIRST_TOKEN < (1 << 5)); + class LeftStateField: public BitField<State, 0, 3> {}; + // When fixed right arg is set, we don't need to store the right state. + // Thus the two fields can overlap. + class HasFixedRightArgBits: public BitField<bool, 4, 1> {}; + class FixedRightArgValueBits: public BitField<int, 5, 4> {}; + class RightStateField: public BitField<State, 5, 3> {}; + class ResultStateField: public BitField<State, 9, 3> {}; + class SSE2Field: public BitField<bool, 12, 1> {}; + class OverwriteModeField: public BitField<OverwriteMode, 13, 2> {}; + class OpBits: public BitField<int, 15, 5> {}; + + virtual CodeStub::Major MajorKey() { return BinaryOp; } + virtual int NotMissMinorKey() { return GetExtraICState(); } - // Minor key encoding in all 25 bits FFFFFHTTTRRRLLLPOOOOOOOMM. - // Note: We actually do not need 7 bits for the operation, just 4 bits to - // encode ADD, SUB, MUL, DIV, MOD, BIT_OR, BIT_AND, BIT_XOR, SAR, SHL, SHR. - class ModeBits: public BitField<OverwriteMode, 0, 2> {}; - class OpBits: public BitField<Token::Value, 2, 7> {}; - class PlatformSpecificBits: public BitField<bool, 9, 1> {}; - class LeftTypeBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {}; - class RightTypeBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {}; - class ResultTypeBits: public BitField<BinaryOpIC::TypeInfo, 16, 3> {}; - class HasFixedRightArgBits: public BitField<bool, 19, 1> {}; - class FixedRightArgValueBits: public BitField<int, 20, 5> {}; - - Major MajorKey() { return BinaryOp; } - int MinorKey() { - return OpBits::encode(op_) - | ModeBits::encode(mode_) - | PlatformSpecificBits::encode(platform_specific_bit_) - | LeftTypeBits::encode(left_type_) - | RightTypeBits::encode(right_type_) - | ResultTypeBits::encode(result_type_) - | HasFixedRightArgBits::encode(encoded_right_arg_.has_value) - | FixedRightArgValueBits::encode(encoded_right_arg_.value); - } + static Handle<Type> StateToType(State state, + Isolate* isolate); + static void Generate(Token::Value op, + State left, + int right, + State result, + OverwriteMode mode, + Isolate* isolate); - // Platform-independent implementation. - void Generate(MacroAssembler* masm); - void GenerateCallRuntime(MacroAssembler* masm); + static void Generate(Token::Value op, + State left, + State right, + State result, + OverwriteMode mode, + Isolate* isolate); - // Platform-independent signature, platform-specific implementation. - void Initialize(); - void GenerateAddStrings(MacroAssembler* masm); - void GenerateBothStringStub(MacroAssembler* masm); - void GenerateGeneric(MacroAssembler* masm); - void GenerateGenericStub(MacroAssembler* masm); - void GenerateNumberStub(MacroAssembler* masm); - void GenerateInt32Stub(MacroAssembler* masm); - void GenerateLoadArguments(MacroAssembler* masm); - void GenerateOddballStub(MacroAssembler* masm); - void GenerateRegisterArgsPush(MacroAssembler* masm); - void GenerateReturn(MacroAssembler* masm); - void GenerateSmiStub(MacroAssembler* masm); - void GenerateStringStub(MacroAssembler* masm); - void GenerateTypeTransition(MacroAssembler* masm); - void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm); - void GenerateUninitializedStub(MacroAssembler* masm); - - // Entirely platform-specific methods are defined as static helper - // functions in the <arch>/code-stubs-<arch>.cc files. + void UpdateStatus(Handle<Object> object, + State* state); - virtual Code::Kind GetCodeKind() const { return Code::BINARY_OP_IC; } + bool can_encode_arg_value(int32_t value) const; + int encode_arg_value(int32_t value) const; + int32_t decode_arg_value(int value) const; + int encode_token(Token::Value op) const; + Token::Value decode_token(int op) const; - virtual InlineCacheState GetICState() { - return BinaryOpIC::ToState(Max(left_type_, right_type_)); + bool has_int_result() const { + return op_ == Token::BIT_XOR || op_ == Token::BIT_AND || + op_ == Token::BIT_OR || op_ == Token::SAR || op_ == Token::SHL; } - virtual void FinishCode(Handle<Code> code) { - code->set_stub_info(MinorKey()); - } + const char* StateToName(State state); + + void PrintBaseName(StringStream* stream); - friend class CodeGenerator; + Token::Value op_; + OverwriteMode mode_; + + Maybe<int> fixed_right_arg_; + State left_state_; + State right_state_; + State result_state_; }; @@ -1318,6 +1354,11 @@ class CEntryStub : public PlatformCodeStub { virtual bool IsPregenerated(Isolate* isolate) V8_OVERRIDE; static void GenerateAheadOfTime(Isolate* isolate); + protected: + virtual void VerifyPlatformFeatures(Isolate* isolate) V8_OVERRIDE { + ASSERT(CpuFeatures::VerifyCrossCompiling(SSE2)); + }; + private: void GenerateCore(MacroAssembler* masm, Label* throw_normal_exception, @@ -1705,7 +1746,9 @@ class DoubleToIStub : public PlatformCodeStub { DestinationRegisterBits::encode(destination.code_) | OffsetBits::encode(offset) | IsTruncatingBits::encode(is_truncating) | - SkipFastPathBits::encode(skip_fastpath); + SkipFastPathBits::encode(skip_fastpath) | + SSEBits::encode(CpuFeatures::IsSafeForSnapshot(SSE2) ? + CpuFeatures::IsSafeForSnapshot(SSE3) ? 2 : 1 : 0); } Register source() { @@ -1734,6 +1777,11 @@ class DoubleToIStub : public PlatformCodeStub { virtual bool SometimesSetsUpAFrame() { return false; } + protected: + virtual void VerifyPlatformFeatures(Isolate* isolate) V8_OVERRIDE { + ASSERT(CpuFeatures::VerifyCrossCompiling(SSE2)); + } + private: static const int kBitsPerRegisterNumber = 6; STATIC_ASSERT((1L << kBitsPerRegisterNumber) >= Register::kNumRegisters); @@ -1748,6 +1796,8 @@ class DoubleToIStub : public PlatformCodeStub { public BitField<int, 2 * kBitsPerRegisterNumber + 1, 3> {}; // NOLINT class SkipFastPathBits: public BitField<int, 2 * kBitsPerRegisterNumber + 4, 1> {}; // NOLINT + class SSEBits: + public BitField<int, 2 * kBitsPerRegisterNumber + 5, 2> {}; // NOLINT Major MajorKey() { return DoubleToI; } int MinorKey() { return bit_field_; } diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc index d33c7f06bd..573ddc6ce7 100644 --- a/deps/v8/src/codegen.cc +++ b/deps/v8/src/codegen.cc @@ -113,10 +113,12 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm, masm->GetCode(&desc); Handle<Code> code = isolate->factory()->NewCode(desc, flags, masm->CodeObject(), - false, is_crankshafted); + false, is_crankshafted, + info->prologue_offset()); isolate->counters()->total_compiled_code_size()->Increment( code->instruction_size()); - code->set_prologue_offset(info->prologue_offset()); + isolate->heap()->IncrementCodeGeneratedBytes(is_crankshafted, + code->instruction_size()); return code; } @@ -132,7 +134,9 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) { if (print_code) { // Print the source code if available. FunctionLiteral* function = info->function(); - if (code->kind() == Code::OPTIMIZED_FUNCTION) { + bool print_source = code->kind() == Code::OPTIMIZED_FUNCTION || + code->kind() == Code::FUNCTION; + if (print_source) { Handle<Script> script = info->script(); if (!script->IsUndefined() && !script->source()->IsUndefined()) { PrintF("--- Raw source ---\n"); @@ -160,12 +164,16 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) { } else { PrintF("--- Code ---\n"); } + if (print_source) { + PrintF("source_position = %d\n", function->start_position()); + } if (info->IsStub()) { CodeStub::Major major_key = info->code_stub()->MajorKey(); code->Disassemble(CodeStub::MajorName(major_key, false)); } else { code->Disassemble(*function->debug_name()->ToCString()); } + PrintF("--- End code ---\n"); } #endif // ENABLE_DISASSEMBLER } diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc index 47634ec22b..ed0a0c8e69 100644 --- a/deps/v8/src/compiler.cc +++ b/deps/v8/src/compiler.cc @@ -112,7 +112,7 @@ void CompilationInfo::Initialize(Isolate* isolate, zone_ = zone; deferred_handles_ = NULL; code_stub_ = NULL; - prologue_offset_ = kPrologueOffsetNotSet; + prologue_offset_ = Code::kPrologueOffsetNotSet; opt_count_ = shared_info().is_null() ? 0 : shared_info()->opt_count(); no_frame_ranges_ = isolate->cpu_profiler()->is_profiling() ? new List<OffsetRange>(2) : NULL; @@ -123,7 +123,7 @@ void CompilationInfo::Initialize(Isolate* isolate, mode_ = STUB; return; } - mode_ = isolate->use_crankshaft() ? mode : NONOPT; + mode_ = mode; abort_due_to_dependency_ = false; if (script_->type()->value() == Script::TYPE_NATIVE) { MarkAsNative(); @@ -260,7 +260,7 @@ static bool AlwaysFullCompiler(Isolate* isolate) { } -void OptimizingCompiler::RecordOptimizationStats() { +void RecompileJob::RecordOptimizationStats() { Handle<JSFunction> function = info()->closure(); int opt_count = function->shared()->opt_count(); function->shared()->set_opt_count(opt_count + 1); @@ -297,23 +297,60 @@ void OptimizingCompiler::RecordOptimizationStats() { // A return value of true indicates the compilation pipeline is still // going, not necessarily that we optimized the code. static bool MakeCrankshaftCode(CompilationInfo* info) { - OptimizingCompiler compiler(info); - OptimizingCompiler::Status status = compiler.CreateGraph(); + RecompileJob job(info); + RecompileJob::Status status = job.CreateGraph(); - if (status != OptimizingCompiler::SUCCEEDED) { - return status != OptimizingCompiler::FAILED; + if (status != RecompileJob::SUCCEEDED) { + return status != RecompileJob::FAILED; } - status = compiler.OptimizeGraph(); - if (status != OptimizingCompiler::SUCCEEDED) { - status = compiler.AbortOptimization(); - return status != OptimizingCompiler::FAILED; + status = job.OptimizeGraph(); + if (status != RecompileJob::SUCCEEDED) { + status = job.AbortOptimization(); + return status != RecompileJob::FAILED; } - status = compiler.GenerateAndInstallCode(); - return status != OptimizingCompiler::FAILED; + status = job.GenerateAndInstallCode(); + return status != RecompileJob::FAILED; } -OptimizingCompiler::Status OptimizingCompiler::CreateGraph() { +class HOptimizedGraphBuilderWithPotisions: public HOptimizedGraphBuilder { + public: + explicit HOptimizedGraphBuilderWithPotisions(CompilationInfo* info) + : HOptimizedGraphBuilder(info) { + } + +#define DEF_VISIT(type) \ + virtual void Visit##type(type* node) V8_OVERRIDE { \ + if (node->position() != RelocInfo::kNoPosition) { \ + SetSourcePosition(node->position()); \ + } \ + HOptimizedGraphBuilder::Visit##type(node); \ + } + EXPRESSION_NODE_LIST(DEF_VISIT) +#undef DEF_VISIT + +#define DEF_VISIT(type) \ + virtual void Visit##type(type* node) V8_OVERRIDE { \ + if (node->position() != RelocInfo::kNoPosition) { \ + SetSourcePosition(node->position()); \ + } \ + HOptimizedGraphBuilder::Visit##type(node); \ + } + STATEMENT_NODE_LIST(DEF_VISIT) +#undef DEF_VISIT + +#define DEF_VISIT(type) \ + virtual void Visit##type(type* node) V8_OVERRIDE { \ + HOptimizedGraphBuilder::Visit##type(node); \ + } + MODULE_NODE_LIST(DEF_VISIT) + DECLARATION_NODE_LIST(DEF_VISIT) + AUXILIARY_NODE_LIST(DEF_VISIT) +#undef DEF_VISIT +}; + + +RecompileJob::Status RecompileJob::CreateGraph() { ASSERT(isolate()->use_crankshaft()); ASSERT(info()->IsOptimizing()); ASSERT(!info()->IsCompilingForDebugging()); @@ -419,7 +456,9 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() { // Type-check the function. AstTyper::Run(info()); - graph_builder_ = new(info()->zone()) HOptimizedGraphBuilder(info()); + graph_builder_ = FLAG_emit_opt_code_positions + ? new(info()->zone()) HOptimizedGraphBuilderWithPotisions(info()) + : new(info()->zone()) HOptimizedGraphBuilder(info()); Timer t(this, &time_taken_to_create_graph_); graph_ = graph_builder_->CreateGraph(); @@ -452,7 +491,7 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() { } -OptimizingCompiler::Status OptimizingCompiler::OptimizeGraph() { +RecompileJob::Status RecompileJob::OptimizeGraph() { DisallowHeapAllocation no_allocation; DisallowHandleAllocation no_handles; DisallowHandleDereference no_deref; @@ -475,7 +514,7 @@ OptimizingCompiler::Status OptimizingCompiler::OptimizeGraph() { } -OptimizingCompiler::Status OptimizingCompiler::GenerateAndInstallCode() { +RecompileJob::Status RecompileJob::GenerateAndInstallCode() { ASSERT(last_status() == SUCCEEDED); ASSERT(!info()->HasAbortedDueToDependencyChange()); DisallowCodeDependencyChange no_dependency_change; @@ -555,6 +594,33 @@ static bool DebuggerWantsEagerCompilation(CompilationInfo* info, } +// Sets the expected number of properties based on estimate from compiler. +void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared, + int estimate) { + // See the comment in SetExpectedNofProperties. + if (shared->live_objects_may_exist()) return; + + // If no properties are added in the constructor, they are more likely + // to be added later. + if (estimate == 0) estimate = 2; + + // TODO(yangguo): check whether those heuristics are still up-to-date. + // We do not shrink objects that go into a snapshot (yet), so we adjust + // the estimate conservatively. + if (Serializer::enabled()) { + estimate += 2; + } else if (FLAG_clever_optimizations) { + // Inobject slack tracking will reclaim redundant inobject space later, + // so we can afford to adjust the estimate generously. + estimate += 8; + } else { + estimate += 3; + } + + shared->set_expected_nof_properties(estimate); +} + + static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) { Isolate* isolate = info->isolate(); PostponeInterruptsScope postpone(isolate); @@ -599,66 +665,70 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) { } } - // Measure how long it takes to do the compilation; only take the - // rest of the function into account to avoid overlap with the - // parsing statistics. - HistogramTimer* rate = info->is_eval() - ? info->isolate()->counters()->compile_eval() - : info->isolate()->counters()->compile(); - HistogramTimerScope timer(rate); - - // Compile the code. FunctionLiteral* lit = info->function(); LiveEditFunctionTracker live_edit_tracker(isolate, lit); - if (!MakeCode(info)) { - if (!isolate->has_pending_exception()) isolate->StackOverflow(); - return Handle<SharedFunctionInfo>::null(); - } + Handle<SharedFunctionInfo> result; + { + // Measure how long it takes to do the compilation; only take the + // rest of the function into account to avoid overlap with the + // parsing statistics. + HistogramTimer* rate = info->is_eval() + ? info->isolate()->counters()->compile_eval() + : info->isolate()->counters()->compile(); + HistogramTimerScope timer(rate); - // Allocate function. - ASSERT(!info->code().is_null()); - Handle<SharedFunctionInfo> result = - isolate->factory()->NewSharedFunctionInfo( - lit->name(), - lit->materialized_literal_count(), - lit->is_generator(), - info->code(), - ScopeInfo::Create(info->scope(), info->zone())); - - ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position()); - Compiler::SetFunctionInfo(result, lit, true, script); - - if (script->name()->IsString()) { - PROFILE(isolate, CodeCreateEvent( - info->is_eval() - ? Logger::EVAL_TAG - : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script), - *info->code(), - *result, - info, - String::cast(script->name()))); - GDBJIT(AddCode(Handle<String>(String::cast(script->name())), - script, - info->code(), - info)); - } else { - PROFILE(isolate, CodeCreateEvent( - info->is_eval() - ? Logger::EVAL_TAG - : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script), - *info->code(), - *result, - info, - isolate->heap()->empty_string())); - GDBJIT(AddCode(Handle<String>(), script, info->code(), info)); - } + // Compile the code. + if (!MakeCode(info)) { + if (!isolate->has_pending_exception()) isolate->StackOverflow(); + return Handle<SharedFunctionInfo>::null(); + } + + // Allocate function. + ASSERT(!info->code().is_null()); + result = + isolate->factory()->NewSharedFunctionInfo( + lit->name(), + lit->materialized_literal_count(), + lit->is_generator(), + info->code(), + ScopeInfo::Create(info->scope(), info->zone())); + + ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position()); + Compiler::SetFunctionInfo(result, lit, true, script); + + if (script->name()->IsString()) { + PROFILE(isolate, CodeCreateEvent( + info->is_eval() + ? Logger::EVAL_TAG + : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script), + *info->code(), + *result, + info, + String::cast(script->name()))); + GDBJIT(AddCode(Handle<String>(String::cast(script->name())), + script, + info->code(), + info)); + } else { + PROFILE(isolate, CodeCreateEvent( + info->is_eval() + ? Logger::EVAL_TAG + : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script), + *info->code(), + *result, + info, + isolate->heap()->empty_string())); + GDBJIT(AddCode(Handle<String>(), script, info->code(), info)); + } - // Hint to the runtime system used when allocating space for initial - // property space by setting the expected number of properties for - // the instances of the function. - SetExpectedNofPropertiesFromEstimate(result, lit->expected_property_count()); + // Hint to the runtime system used when allocating space for initial + // property space by setting the expected number of properties for + // the instances of the function. + SetExpectedNofPropertiesFromEstimate(result, + lit->expected_property_count()); - script->set_compilation_state(Script::COMPILATION_STATE_COMPILED); + script->set_compilation_state(Script::COMPILATION_STATE_COMPILED); + } #ifdef ENABLE_DEBUGGER_SUPPORT // Notify debugger @@ -1032,16 +1102,15 @@ bool Compiler::RecompileConcurrent(Handle<JSFunction> closure, info->SaveHandles(); if (Rewriter::Rewrite(*info) && Scope::Analyze(*info)) { - OptimizingCompiler* compiler = - new(info->zone()) OptimizingCompiler(*info); - OptimizingCompiler::Status status = compiler->CreateGraph(); - if (status == OptimizingCompiler::SUCCEEDED) { + RecompileJob* job = new(info->zone()) RecompileJob(*info); + RecompileJob::Status status = job->CreateGraph(); + if (status == RecompileJob::SUCCEEDED) { info.Detach(); shared->code()->set_profiler_ticks(0); - isolate->optimizing_compiler_thread()->QueueForOptimization(compiler); + isolate->optimizing_compiler_thread()->QueueForOptimization(job); ASSERT(!isolate->has_pending_exception()); return true; - } else if (status == OptimizingCompiler::BAILED_OUT) { + } else if (status == RecompileJob::BAILED_OUT) { isolate->clear_pending_exception(); InstallFullCode(*info); } @@ -1054,9 +1123,8 @@ bool Compiler::RecompileConcurrent(Handle<JSFunction> closure, } -Handle<Code> Compiler::InstallOptimizedCode( - OptimizingCompiler* optimizing_compiler) { - SmartPointer<CompilationInfo> info(optimizing_compiler->info()); +Handle<Code> Compiler::InstallOptimizedCode(RecompileJob* job) { + SmartPointer<CompilationInfo> info(job->info()); // The function may have already been optimized by OSR. Simply continue. // Except when OSR already disabled optimization for some reason. if (info->shared_info()->optimization_disabled()) { @@ -1077,24 +1145,24 @@ Handle<Code> Compiler::InstallOptimizedCode( isolate, Logger::TimerEventScope::v8_recompile_synchronous); // If crankshaft succeeded, install the optimized code else install // the unoptimized code. - OptimizingCompiler::Status status = optimizing_compiler->last_status(); + RecompileJob::Status status = job->last_status(); if (info->HasAbortedDueToDependencyChange()) { info->set_bailout_reason(kBailedOutDueToDependencyChange); - status = optimizing_compiler->AbortOptimization(); - } else if (status != OptimizingCompiler::SUCCEEDED) { + status = job->AbortOptimization(); + } else if (status != RecompileJob::SUCCEEDED) { info->set_bailout_reason(kFailedBailedOutLastTime); - status = optimizing_compiler->AbortOptimization(); + status = job->AbortOptimization(); } else if (isolate->DebuggerHasBreakPoints()) { info->set_bailout_reason(kDebuggerIsActive); - status = optimizing_compiler->AbortOptimization(); + status = job->AbortOptimization(); } else { - status = optimizing_compiler->GenerateAndInstallCode(); - ASSERT(status == OptimizingCompiler::SUCCEEDED || - status == OptimizingCompiler::BAILED_OUT); + status = job->GenerateAndInstallCode(); + ASSERT(status == RecompileJob::SUCCEEDED || + status == RecompileJob::BAILED_OUT); } InstallCodeCommon(*info); - if (status == OptimizingCompiler::SUCCEEDED) { + if (status == RecompileJob::SUCCEEDED) { Handle<Code> code = info->code(); ASSERT(info->shared_info()->scope_info() != ScopeInfo::Empty(isolate)); info->closure()->ReplaceCode(*code); @@ -1115,8 +1183,8 @@ Handle<Code> Compiler::InstallOptimizedCode( // profiler ticks to prevent too soon re-opt after a deopt. info->shared_info()->code()->set_profiler_ticks(0); ASSERT(!info->closure()->IsInRecompileQueue()); - return (status == OptimizingCompiler::SUCCEEDED) ? info->code() - : Handle<Code>::null(); + return (status == RecompileJob::SUCCEEDED) ? info->code() + : Handle<Code>::null(); } diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h index 8ceb61db9c..2d9e52a8e3 100644 --- a/deps/v8/src/compiler.h +++ b/deps/v8/src/compiler.h @@ -35,8 +35,6 @@ namespace v8 { namespace internal { -static const int kPrologueOffsetNotSet = -1; - class ScriptDataImpl; class HydrogenCodeStub; @@ -86,6 +84,7 @@ class CompilationInfo { ScriptDataImpl* pre_parse_data() const { return pre_parse_data_; } Handle<Context> context() const { return context_; } BailoutId osr_ast_id() const { return osr_ast_id_; } + uint32_t osr_pc_offset() const { return osr_pc_offset_; } int opt_count() const { return opt_count_; } int num_parameters() const; int num_heap_slots() const; @@ -268,12 +267,12 @@ class CompilationInfo { void set_bailout_reason(BailoutReason reason) { bailout_reason_ = reason; } int prologue_offset() const { - ASSERT_NE(kPrologueOffsetNotSet, prologue_offset_); + ASSERT_NE(Code::kPrologueOffsetNotSet, prologue_offset_); return prologue_offset_; } void set_prologue_offset(int prologue_offset) { - ASSERT_EQ(kPrologueOffsetNotSet, prologue_offset_); + ASSERT_EQ(Code::kPrologueOffsetNotSet, prologue_offset_); prologue_offset_ = prologue_offset; } @@ -505,14 +504,15 @@ class LChunk; // fail, bail-out to the full code generator or succeed. Apart from // their return value, the status of the phase last run can be checked // using last_status(). -class OptimizingCompiler: public ZoneObject { +class RecompileJob: public ZoneObject { public: - explicit OptimizingCompiler(CompilationInfo* info) + explicit RecompileJob(CompilationInfo* info) : info_(info), graph_builder_(NULL), graph_(NULL), chunk_(NULL), - last_status_(FAILED) { } + last_status_(FAILED), + awaiting_install_(false) { } enum Status { FAILED, BAILED_OUT, SUCCEEDED @@ -532,6 +532,13 @@ class OptimizingCompiler: public ZoneObject { return SetLastStatus(BAILED_OUT); } + void WaitForInstall() { + ASSERT(info_->is_osr()); + awaiting_install_ = true; + } + + bool IsWaitingForInstall() { return awaiting_install_; } + private: CompilationInfo* info_; HOptimizedGraphBuilder* graph_builder_; @@ -541,6 +548,7 @@ class OptimizingCompiler: public ZoneObject { TimeDelta time_taken_to_optimize_; TimeDelta time_taken_to_codegen_; Status last_status_; + bool awaiting_install_; MUST_USE_RESULT Status SetLastStatus(Status status) { last_status_ = status; @@ -549,9 +557,8 @@ class OptimizingCompiler: public ZoneObject { void RecordOptimizationStats(); struct Timer { - Timer(OptimizingCompiler* compiler, TimeDelta* location) - : compiler_(compiler), - location_(location) { + Timer(RecompileJob* job, TimeDelta* location) + : job_(job), location_(location) { ASSERT(location_ != NULL); timer_.Start(); } @@ -560,7 +567,7 @@ class OptimizingCompiler: public ZoneObject { *location_ += timer_.Elapsed(); } - OptimizingCompiler* compiler_; + RecompileJob* job_; ElapsedTimer timer_; TimeDelta* location_; }; @@ -625,7 +632,7 @@ class Compiler : public AllStatic { bool is_toplevel, Handle<Script> script); - static Handle<Code> InstallOptimizedCode(OptimizingCompiler* info); + static Handle<Code> InstallOptimizedCode(RecompileJob* job); #ifdef ENABLE_DEBUGGER_SUPPORT static bool MakeCodeForLiveEdit(CompilationInfo* info); diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc index 441ef9d9c3..710d30aa8e 100644 --- a/deps/v8/src/contexts.cc +++ b/deps/v8/src/contexts.cc @@ -259,7 +259,7 @@ Handle<Object> Context::Lookup(Handle<String> name, void Context::AddOptimizedFunction(JSFunction* function) { ASSERT(IsNativeContext()); -#ifdef DEBUG +#ifdef ENABLE_SLOW_ASSERTS if (FLAG_enable_slow_asserts) { Object* element = get(OPTIMIZED_FUNCTIONS_LIST); while (!element->IsUndefined()) { diff --git a/deps/v8/src/conversions-inl.h b/deps/v8/src/conversions-inl.h index 2f0a399d1a..7ba19ba0f1 100644 --- a/deps/v8/src/conversions-inl.h +++ b/deps/v8/src/conversions-inl.h @@ -355,7 +355,7 @@ double InternalStringToInt(UnicodeCache* unicode_cache, return JunkStringValue(); } - ASSERT(buffer_pos < kBufferSize); + SLOW_ASSERT(buffer_pos < kBufferSize); buffer[buffer_pos] = '\0'; Vector<const char> buffer_vector(buffer, buffer_pos); return negative ? -Strtod(buffer_vector, 0) : Strtod(buffer_vector, 0); @@ -692,7 +692,7 @@ double InternalStringToDouble(UnicodeCache* unicode_cache, exponent--; } - ASSERT(buffer_pos < kBufferSize); + SLOW_ASSERT(buffer_pos < kBufferSize); buffer[buffer_pos] = '\0'; double converted = Strtod(Vector<const char>(buffer, buffer_pos), exponent); diff --git a/deps/v8/src/conversions.cc b/deps/v8/src/conversions.cc index cdc42e34d9..5f1219eea9 100644 --- a/deps/v8/src/conversions.cc +++ b/deps/v8/src/conversions.cc @@ -31,6 +31,7 @@ #include "conversions-inl.h" #include "dtoa.h" +#include "list-inl.h" #include "strtod.h" #include "utils.h" @@ -45,8 +46,11 @@ namespace internal { double StringToDouble(UnicodeCache* unicode_cache, const char* str, int flags, double empty_string_val) { - const char* end = str + StrLength(str); - return InternalStringToDouble(unicode_cache, str, end, flags, + // We cast to const uint8_t* here to avoid instantiating the + // InternalStringToDouble() template for const char* as well. + const uint8_t* start = reinterpret_cast<const uint8_t*>(str); + const uint8_t* end = start + StrLength(str); + return InternalStringToDouble(unicode_cache, start, end, flags, empty_string_val); } @@ -55,11 +59,15 @@ double StringToDouble(UnicodeCache* unicode_cache, Vector<const char> str, int flags, double empty_string_val) { - const char* end = str.start() + str.length(); - return InternalStringToDouble(unicode_cache, str.start(), end, flags, + // We cast to const uint8_t* here to avoid instantiating the + // InternalStringToDouble() template for const char* as well. + const uint8_t* start = reinterpret_cast<const uint8_t*>(str.start()); + const uint8_t* end = start + str.length(); + return InternalStringToDouble(unicode_cache, start, end, flags, empty_string_val); } + double StringToDouble(UnicodeCache* unicode_cache, Vector<const uc16> str, int flags, diff --git a/deps/v8/src/counters.h b/deps/v8/src/counters.h index 93911d7216..821c25f8ce 100644 --- a/deps/v8/src/counters.h +++ b/deps/v8/src/counters.h @@ -259,22 +259,51 @@ class HistogramTimer : public Histogram { return Enabled() && timer_.IsStarted(); } + // TODO(bmeurer): Remove this when HistogramTimerScope is fixed. +#ifdef DEBUG + ElapsedTimer* timer() { return &timer_; } +#endif + private: ElapsedTimer timer_; }; // Helper class for scoping a HistogramTimer. +// TODO(bmeurer): The ifdeffery is an ugly hack around the fact that the +// Parser is currently reentrant (when it throws an error, we call back +// into JavaScript and all bets are off), but ElapsedTimer is not +// reentry-safe. Fix this properly and remove |allow_nesting|. class HistogramTimerScope BASE_EMBEDDED { public: - explicit HistogramTimerScope(HistogramTimer* timer) : - timer_(timer) { + explicit HistogramTimerScope(HistogramTimer* timer, + bool allow_nesting = false) +#ifdef DEBUG + : timer_(timer), + skipped_timer_start_(false) { + if (timer_->timer()->IsStarted() && allow_nesting) { + skipped_timer_start_ = true; + } else { + timer_->Start(); + } +#else + : timer_(timer) { timer_->Start(); +#endif } ~HistogramTimerScope() { +#ifdef DEBUG + if (!skipped_timer_start_) { + timer_->Stop(); + } +#else timer_->Stop(); +#endif } private: HistogramTimer* timer_; +#ifdef DEBUG + bool skipped_timer_start_; +#endif }; diff --git a/deps/v8/src/cpu-profiler.cc b/deps/v8/src/cpu-profiler.cc index e0f7aea18a..b1af621ccc 100644 --- a/deps/v8/src/cpu-profiler.cc +++ b/deps/v8/src/cpu-profiler.cc @@ -64,14 +64,15 @@ void ProfilerEventsProcessor::Enqueue(const CodeEventsContainer& event) { void ProfilerEventsProcessor::AddCurrentStack(Isolate* isolate) { TickSampleEventRecord record(last_code_event_id_); - TickSample* sample = &record.sample; - sample->state = isolate->current_vm_state(); - sample->pc = reinterpret_cast<Address>(sample); // Not NULL. - for (StackTraceFrameIterator it(isolate); - !it.done() && sample->frames_count < TickSample::kMaxFramesCount; - it.Advance()) { - sample->stack[sample->frames_count++] = it.frame()->pc(); + RegisterState regs; + StackFrameIterator it(isolate); + if (!it.done()) { + StackFrame* frame = it.frame(); + regs.sp = frame->sp(); + regs.fp = frame->fp(); + regs.pc = frame->pc(); } + record.sample.Init(isolate, regs); ticks_from_vm_buffer_.Enqueue(record); } @@ -260,7 +261,7 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code, SharedFunctionInfo* shared, CompilationInfo* info, - Name* source, int line) { + Name* source, int line, int column) { if (FilterOutCodeCreateEvent(tag)) return; CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION); CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_; @@ -270,7 +271,8 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, profiles_->GetFunctionName(shared->DebugName()), CodeEntry::kEmptyNamePrefix, profiles_->GetName(source), - line); + line, + column); if (info) { rec->entry->set_no_frame_ranges(info->ReleaseNoFrameRanges()); } @@ -435,8 +437,18 @@ void CpuProfiler::StartProcessorIfNotStarted() { logger->is_logging_ = false; generator_ = new ProfileGenerator(profiles_); Sampler* sampler = logger->sampler(); +#if V8_CC_MSVC && (_MSC_VER >= 1800) + // VS2013 reports "warning C4316: 'v8::internal::ProfilerEventsProcessor' + // : object allocated on the heap may not be aligned 64". We need to + // figure out if this is a legitimate warning or a compiler bug. + #pragma warning(push) + #pragma warning(disable:4316) +#endif processor_ = new ProfilerEventsProcessor( generator_, sampler, sampling_interval_); +#if V8_CC_MSVC && (_MSC_VER >= 1800) + #pragma warning(pop) +#endif is_profiling_ = true; // Enumerate stuff we already have in the heap. ASSERT(isolate_->heap()->HasBeenSetUp()); diff --git a/deps/v8/src/cpu-profiler.h b/deps/v8/src/cpu-profiler.h index 8aba5426d5..fcb9a67ddf 100644 --- a/deps/v8/src/cpu-profiler.h +++ b/deps/v8/src/cpu-profiler.h @@ -238,7 +238,7 @@ class CpuProfiler : public CodeEventListener { Code* code, SharedFunctionInfo* shared, CompilationInfo* info, - Name* source, int line); + Name* source, int line, int column); virtual void CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code, int args_count); virtual void CodeMovingGCEvent() {} diff --git a/deps/v8/src/d8-debug.cc b/deps/v8/src/d8-debug.cc index 602ae166be..379631cb7c 100644 --- a/deps/v8/src/d8-debug.cc +++ b/deps/v8/src/d8-debug.cc @@ -30,8 +30,6 @@ #include "d8.h" #include "d8-debug.h" #include "debug-agent.h" -#include "platform.h" -#include "platform/socket.h" namespace v8 { diff --git a/deps/v8/src/d8-posix.cc b/deps/v8/src/d8-posix.cc index 424dbbb393..81c15ae742 100644 --- a/deps/v8/src/d8-posix.cc +++ b/deps/v8/src/d8-posix.cc @@ -245,7 +245,8 @@ static bool GetTimeouts(const v8::FunctionCallbackInfo<v8::Value>& args, if (args[3]->IsNumber()) { *total_timeout = args[3]->Int32Value(); } else { - ThrowException(String::New("system: Argument 4 must be a number")); + args.GetIsolate()->ThrowException( + String::New("system: Argument 4 must be a number")); return false; } } @@ -253,7 +254,8 @@ static bool GetTimeouts(const v8::FunctionCallbackInfo<v8::Value>& args, if (args[2]->IsNumber()) { *read_timeout = args[2]->Int32Value(); } else { - ThrowException(String::New("system: Argument 3 must be a number")); + args.GetIsolate()->ThrowException( + String::New("system: Argument 3 must be a number")); return false; } } @@ -456,7 +458,8 @@ void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) { Handle<Array> command_args; if (args.Length() > 1) { if (!args[1]->IsArray()) { - ThrowException(String::New("system: Argument 2 must be an array")); + args.GetIsolate()->ThrowException( + String::New("system: Argument 2 must be an array")); return; } command_args = Handle<Array>::Cast(args[1]); @@ -464,11 +467,13 @@ void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) { command_args = Array::New(0); } if (command_args->Length() > ExecArgs::kMaxArgs) { - ThrowException(String::New("Too many arguments to system()")); + args.GetIsolate()->ThrowException( + String::New("Too many arguments to system()")); return; } if (args.Length() < 1) { - ThrowException(String::New("Too few arguments to system()")); + args.GetIsolate()->ThrowException( + String::New("Too few arguments to system()")); return; } @@ -483,11 +488,13 @@ void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) { int stdout_fds[2]; if (pipe(exec_error_fds) != 0) { - ThrowException(String::New("pipe syscall failed.")); + args.GetIsolate()->ThrowException( + String::New("pipe syscall failed.")); return; } if (pipe(stdout_fds) != 0) { - ThrowException(String::New("pipe syscall failed.")); + args.GetIsolate()->ThrowException( + String::New("pipe syscall failed.")); return; } @@ -531,17 +538,17 @@ void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) { void Shell::ChangeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) { if (args.Length() != 1) { const char* message = "chdir() takes one argument"; - ThrowException(String::New(message)); + args.GetIsolate()->ThrowException(String::New(message)); return; } String::Utf8Value directory(args[0]); if (*directory == NULL) { const char* message = "os.chdir(): String conversion of argument failed."; - ThrowException(String::New(message)); + args.GetIsolate()->ThrowException(String::New(message)); return; } if (chdir(*directory) != 0) { - ThrowException(String::New(strerror(errno))); + args.GetIsolate()->ThrowException(String::New(strerror(errno))); return; } } @@ -550,7 +557,7 @@ void Shell::ChangeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) { void Shell::SetUMask(const v8::FunctionCallbackInfo<v8::Value>& args) { if (args.Length() != 1) { const char* message = "umask() takes one argument"; - ThrowException(String::New(message)); + args.GetIsolate()->ThrowException(String::New(message)); return; } if (args[0]->IsNumber()) { @@ -560,7 +567,7 @@ void Shell::SetUMask(const v8::FunctionCallbackInfo<v8::Value>& args) { return; } else { const char* message = "umask() argument must be numeric"; - ThrowException(String::New(message)); + args.GetIsolate()->ThrowException(String::New(message)); return; } } @@ -616,18 +623,18 @@ void Shell::MakeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) { mask = args[1]->Int32Value(); } else { const char* message = "mkdirp() second argument must be numeric"; - ThrowException(String::New(message)); + args.GetIsolate()->ThrowException(String::New(message)); return; } } else if (args.Length() != 1) { const char* message = "mkdirp() takes one or two arguments"; - ThrowException(String::New(message)); + args.GetIsolate()->ThrowException(String::New(message)); return; } String::Utf8Value directory(args[0]); if (*directory == NULL) { const char* message = "os.mkdirp(): String conversion of argument failed."; - ThrowException(String::New(message)); + args.GetIsolate()->ThrowException(String::New(message)); return; } mkdirp(*directory, mask); @@ -637,13 +644,13 @@ void Shell::MakeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) { void Shell::RemoveDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) { if (args.Length() != 1) { const char* message = "rmdir() takes one or two arguments"; - ThrowException(String::New(message)); + args.GetIsolate()->ThrowException(String::New(message)); return; } String::Utf8Value directory(args[0]); if (*directory == NULL) { const char* message = "os.rmdir(): String conversion of argument failed."; - ThrowException(String::New(message)); + args.GetIsolate()->ThrowException(String::New(message)); return; } rmdir(*directory); @@ -653,7 +660,7 @@ void Shell::RemoveDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) { void Shell::SetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args) { if (args.Length() != 2) { const char* message = "setenv() takes two arguments"; - ThrowException(String::New(message)); + args.GetIsolate()->ThrowException(String::New(message)); return; } String::Utf8Value var(args[0]); @@ -661,13 +668,13 @@ void Shell::SetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args) { if (*var == NULL) { const char* message = "os.setenv(): String conversion of variable name failed."; - ThrowException(String::New(message)); + args.GetIsolate()->ThrowException(String::New(message)); return; } if (*value == NULL) { const char* message = "os.setenv(): String conversion of variable contents failed."; - ThrowException(String::New(message)); + args.GetIsolate()->ThrowException(String::New(message)); return; } setenv(*var, *value, 1); @@ -677,14 +684,14 @@ void Shell::SetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args) { void Shell::UnsetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args) { if (args.Length() != 1) { const char* message = "unsetenv() takes one argument"; - ThrowException(String::New(message)); + args.GetIsolate()->ThrowException(String::New(message)); return; } String::Utf8Value var(args[0]); if (*var == NULL) { const char* message = "os.setenv(): String conversion of variable name failed."; - ThrowException(String::New(message)); + args.GetIsolate()->ThrowException(String::New(message)); return; } unsetenv(*var); diff --git a/deps/v8/src/d8-readline.cc b/deps/v8/src/d8-readline.cc index 298518d72a..0226f31c0b 100644 --- a/deps/v8/src/d8-readline.cc +++ b/deps/v8/src/d8-readline.cc @@ -150,7 +150,7 @@ char* ReadLineEditor::CompletionGenerator(const char* text, int state) { static Persistent<Array> current_completions; Isolate* isolate = read_line_editor.isolate_; Locker lock(isolate); - HandleScope scope; + HandleScope scope(isolate); Handle<Array> completions; if (state == 0) { Local<String> full_text = String::New(rl_line_buffer, rl_point); @@ -167,8 +167,7 @@ char* ReadLineEditor::CompletionGenerator(const char* text, int state) { String::Utf8Value str(str_obj); return strdup(*str); } else { - current_completions.Dispose(isolate); - current_completions.Clear(); + current_completions.Reset(); return NULL; } } diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc index fb75d81c24..357c8a4899 100644 --- a/deps/v8/src/d8.cc +++ b/deps/v8/src/d8.cc @@ -49,6 +49,7 @@ #endif // !V8_SHARED #ifdef V8_SHARED +#include "../include/v8-defaults.h" #include "../include/v8-testing.h" #endif // V8_SHARED @@ -66,6 +67,7 @@ #include "natives.h" #include "platform.h" #include "v8.h" +#include "v8-defaults.h" #endif // V8_SHARED #if !defined(_WIN32) && !defined(_WIN64) @@ -158,6 +160,7 @@ i::OS::MemoryMappedFile* Shell::counters_file_ = NULL; CounterCollection Shell::local_counters_; CounterCollection* Shell::counters_ = &local_counters_; i::Mutex Shell::context_mutex_; +const i::TimeTicks Shell::kInitialTicks = i::TimeTicks::HighResolutionNow(); Persistent<Context> Shell::utility_context_; #endif // V8_SHARED @@ -263,7 +266,8 @@ PerIsolateData::RealmScope::RealmScope(PerIsolateData* data) : data_(data) { data_->realm_current_ = 0; data_->realm_switch_ = 0; data_->realms_ = new Persistent<Context>[1]; - data_->realms_[0].Reset(data_->isolate_, Context::GetEntered()); + data_->realms_[0].Reset(data_->isolate_, + data_->isolate_->GetEnteredContext()); data_->realm_shared_.Clear(); } @@ -286,11 +290,20 @@ int PerIsolateData::RealmFind(Handle<Context> context) { } +#ifndef V8_SHARED +// performance.now() returns a time stamp as double, measured in milliseconds. +void Shell::PerformanceNow(const v8::FunctionCallbackInfo<v8::Value>& args) { + i::TimeDelta delta = i::TimeTicks::HighResolutionNow() - kInitialTicks; + args.GetReturnValue().Set(delta.InMillisecondsF()); +} +#endif // V8_SHARED + + // Realm.current() returns the index of the currently active realm. void Shell::RealmCurrent(const v8::FunctionCallbackInfo<v8::Value>& args) { Isolate* isolate = args.GetIsolate(); PerIsolateData* data = PerIsolateData::Get(isolate); - int index = data->RealmFind(Context::GetEntered()); + int index = data->RealmFind(isolate->GetEnteredContext()); if (index == -1) return; args.GetReturnValue().Set(index); } @@ -869,6 +882,13 @@ Handle<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) { RealmSharedGet, RealmSharedSet); global_template->Set(String::New("Realm"), realm_template); +#ifndef V8_SHARED + Handle<ObjectTemplate> performance_template = ObjectTemplate::New(); + performance_template->Set(String::New("now"), + FunctionTemplate::New(PerformanceNow)); + global_template->Set(String::New("performance"), performance_template); +#endif // V8_SHARED + #if !defined(V8_SHARED) && !defined(_WIN32) && !defined(_WIN64) Handle<ObjectTemplate> os_templ = ObjectTemplate::New(); AddOSMethods(os_templ); @@ -939,8 +959,8 @@ Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) { i::Factory* factory = reinterpret_cast<i::Isolate*>(isolate)->factory(); i::JSArguments js_args = i::FLAG_js_arguments; i::Handle<i::FixedArray> arguments_array = - factory->NewFixedArray(js_args.argc()); - for (int j = 0; j < js_args.argc(); j++) { + factory->NewFixedArray(js_args.argc); + for (int j = 0; j < js_args.argc; j++) { i::Handle<i::String> arg = factory->NewStringFromUtf8(i::CStrVector(js_args[j])); arguments_array->set(j, *arg); @@ -1228,6 +1248,7 @@ SourceGroup::~SourceGroup() { void SourceGroup::Execute(Isolate* isolate) { + bool exception_was_thrown = false; for (int i = begin_offset_; i < end_offset_; ++i) { const char* arg = argv_[i]; if (strcmp(arg, "-e") == 0 && i + 1 < end_offset_) { @@ -1236,7 +1257,8 @@ void SourceGroup::Execute(Isolate* isolate) { Handle<String> file_name = String::New("unnamed"); Handle<String> source = String::New(argv_[i + 1]); if (!Shell::ExecuteString(isolate, source, file_name, false, true)) { - Shell::Exit(1); + exception_was_thrown = true; + break; } ++i; } else if (arg[0] == '-') { @@ -1251,10 +1273,14 @@ void SourceGroup::Execute(Isolate* isolate) { Shell::Exit(1); } if (!Shell::ExecuteString(isolate, source, file_name, false, true)) { - Shell::Exit(1); + exception_was_thrown = true; + break; } } } + if (exception_was_thrown != Shell::options.expected_to_throw) { + Shell::Exit(1); + } } @@ -1410,6 +1436,9 @@ bool Shell::SetOptions(int argc, char* argv[]) { options.dump_heap_constants = true; argv[i] = NULL; #endif + } else if (strcmp(argv[i], "--throws") == 0) { + options.expected_to_throw = true; + argv[i] = NULL; } #ifdef V8_SHARED else if (strcmp(argv[i], "--dump-counters") == 0) { @@ -1525,7 +1554,7 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[]) { // Start preemption if threads have been created and preemption is enabled. if (threads.length() > 0 && options.use_preemption) { - Locker::StartPreemption(options.preemption_interval); + Locker::StartPreemption(isolate, options.preemption_interval); } #endif // V8_SHARED } @@ -1543,7 +1572,7 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[]) { if (threads.length() > 0 && options.use_preemption) { Locker lock(isolate); - Locker::StopPreemption(); + Locker::StopPreemption(isolate); } #endif // V8_SHARED return 0; @@ -1648,6 +1677,7 @@ int Shell::Main(int argc, char* argv[]) { #else SetStandaloneFlagsViaCommandLine(); #endif + v8::SetDefaultResourceConstraintsForCurrentPlatform(); ShellArrayBufferAllocator array_buffer_allocator; v8::V8::SetArrayBufferAllocator(&array_buffer_allocator); int result = 0; diff --git a/deps/v8/src/d8.gyp b/deps/v8/src/d8.gyp index 15d342dece..097abc0465 100644 --- a/deps/v8/src/d8.gyp +++ b/deps/v8/src/d8.gyp @@ -31,7 +31,7 @@ 'console%': '', # Enable support for Intel VTune. Supported on ia32/x64 only 'v8_enable_vtunejit%': 0, - 'v8_enable_i18n_support%': 0, + 'v8_enable_i18n_support%': 1, }, 'includes': ['../build/toolchain.gypi', '../build/features.gypi'], 'targets': [ @@ -81,13 +81,13 @@ }], ['v8_enable_i18n_support==1', { 'dependencies': [ - '<(DEPTH)/third_party/icu/icu.gyp:icui18n', - '<(DEPTH)/third_party/icu/icu.gyp:icuuc', + '<(icu_gyp_path):icui18n', + '<(icu_gyp_path):icuuc', ], }], ['OS=="win" and v8_enable_i18n_support==1', { 'dependencies': [ - '<(DEPTH)/third_party/icu/icu.gyp:icudata', + '<(icu_gyp_path):icudata', ], }], ], diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h index 1ae1bcfe6e..411dfdda3e 100644 --- a/deps/v8/src/d8.h +++ b/deps/v8/src/d8.h @@ -232,6 +232,7 @@ class ShellOptions { interactive_shell(false), test_shell(false), dump_heap_constants(false), + expected_to_throw(false), num_isolates(1), isolate_sources(NULL) { } @@ -256,6 +257,7 @@ class ShellOptions { bool interactive_shell; bool test_shell; bool dump_heap_constants; + bool expected_to_throw; int num_isolates; SourceGroup* isolate_sources; }; @@ -300,6 +302,8 @@ class Shell : public i::AllStatic { Handle<String> command); static void DispatchDebugMessages(); #endif // ENABLE_DEBUGGER_SUPPORT + + static void PerformanceNow(const v8::FunctionCallbackInfo<v8::Value>& args); #endif // V8_SHARED static void RealmCurrent(const v8::FunctionCallbackInfo<v8::Value>& args); @@ -391,6 +395,7 @@ class Shell : public i::AllStatic { static CounterCollection* counters_; static i::OS::MemoryMappedFile* counters_file_; static i::Mutex context_mutex_; + static const i::TimeTicks kInitialTicks; static Counter* GetCounter(const char* name, bool is_histogram); static void InstallUtilityScript(Isolate* isolate); diff --git a/deps/v8/src/d8.js b/deps/v8/src/d8.js index 3efea06378..35b61d54ee 100644 --- a/deps/v8/src/d8.js +++ b/deps/v8/src/d8.js @@ -40,7 +40,7 @@ function log10(num) { function ToInspectableObject(obj) { if (!obj && typeof obj === 'object') { - return void 0; + return UNDEFINED; } else { return Object(obj); } @@ -333,7 +333,7 @@ function DebugRequest(cmd_line) { } if ((cmd === undefined) || !cmd) { - this.request_ = void 0; + this.request_ = UNDEFINED; return; } @@ -492,7 +492,7 @@ function DebugRequest(cmd_line) { case 'trace': case 'tr': // Return undefined to indicate command handled internally (no JSON). - this.request_ = void 0; + this.request_ = UNDEFINED; this.traceCommand_(args); break; @@ -500,7 +500,7 @@ function DebugRequest(cmd_line) { case '?': this.helpCommand_(args); // Return undefined to indicate command handled internally (no JSON). - this.request_ = void 0; + this.request_ = UNDEFINED; break; default: @@ -2124,7 +2124,7 @@ function SimpleObjectToJSON_(object) { var property_value_json; switch (typeof property_value) { case 'object': - if (property_value === null) { + if (IS_NULL(property_value)) { property_value_json = 'null'; } else if (typeof property_value.toJSONProtocol == 'function') { property_value_json = property_value.toJSONProtocol(true); @@ -2217,7 +2217,7 @@ function Stringify(x, depth) { case "symbol": return "Symbol(" + (x.name ? Stringify(x.name, depth) : "") + ")" case "object": - if (x === null) return "null"; + if (IS_NULL(x)) return "null"; if (x.constructor && x.constructor.name === "Array") { var elems = []; for (var i = 0; i < x.length; ++i) { @@ -2233,7 +2233,7 @@ function Stringify(x, depth) { var props = []; for (var name in x) { var desc = Object.getOwnPropertyDescriptor(x, name); - if (desc === void 0) continue; + if (IS_UNDEFINED(desc)) continue; if ("value" in desc) { props.push(name + ": " + Stringify(desc.value, depth - 1)); } diff --git a/deps/v8/src/date.js b/deps/v8/src/date.js index 62999e9de6..1b128c3a0a 100644 --- a/deps/v8/src/date.js +++ b/deps/v8/src/date.js @@ -41,7 +41,7 @@ function ThrowDateTypeError() { } -var timezone_cache_time = $NaN; +var timezone_cache_time = NAN; var timezone_cache_timezone; function LocalTimezone(t) { @@ -66,10 +66,10 @@ function UTC(time) { // ECMA 262 - 15.9.1.11 function MakeTime(hour, min, sec, ms) { - if (!$isFinite(hour)) return $NaN; - if (!$isFinite(min)) return $NaN; - if (!$isFinite(sec)) return $NaN; - if (!$isFinite(ms)) return $NaN; + if (!$isFinite(hour)) return NAN; + if (!$isFinite(min)) return NAN; + if (!$isFinite(sec)) return NAN; + if (!$isFinite(ms)) return NAN; return TO_INTEGER(hour) * msPerHour + TO_INTEGER(min) * msPerMinute + TO_INTEGER(sec) * msPerSecond @@ -90,7 +90,7 @@ function TimeInYear(year) { // MakeDay(2007, -33, 1) --> MakeDay(2004, 3, 1) // MakeDay(2007, 14, -50) --> MakeDay(2007, 8, 11) function MakeDay(year, month, date) { - if (!$isFinite(year) || !$isFinite(month) || !$isFinite(date)) return $NaN; + if (!$isFinite(year) || !$isFinite(month) || !$isFinite(date)) return NAN; // Convert to integer and map -0 to 0. year = TO_INTEGER_MAP_MINUS_ZERO(year); @@ -99,7 +99,7 @@ function MakeDay(year, month, date) { if (year < kMinYear || year > kMaxYear || month < kMinMonth || month > kMaxMonth) { - return $NaN; + return NAN; } // Now we rely on year and month being SMIs. @@ -115,15 +115,15 @@ function MakeDate(day, time) { // is no way that the time can be within range even after UTC // conversion we return NaN immediately instead of relying on // TimeClip to do it. - if ($abs(time) > MAX_TIME_BEFORE_UTC) return $NaN; + if ($abs(time) > MAX_TIME_BEFORE_UTC) return NAN; return time; } // ECMA 262 - 15.9.1.14 function TimeClip(time) { - if (!$isFinite(time)) return $NaN; - if ($abs(time) > MAX_TIME_MS) return $NaN; + if (!$isFinite(time)) return NAN; + if ($abs(time) > MAX_TIME_MS) return NAN; return TO_INTEGER(time); } @@ -132,7 +132,7 @@ function TimeClip(time) { // strings over and over again. var Date_cache = { // Cached time value. - time: $NaN, + time: NAN, // String input for which the cached time is valid. string: null }; @@ -269,7 +269,7 @@ var parse_buffer = $Array(8); // ECMA 262 - 15.9.4.2 function DateParse(string) { var arr = %DateParseString(ToString(string), parse_buffer); - if (IS_NULL(arr)) return $NaN; + if (IS_NULL(arr)) return NAN; var day = MakeDay(arr[0], arr[1], arr[2]); var time = MakeTime(arr[3], arr[4], arr[5], arr[6]); @@ -671,7 +671,7 @@ function DateGetYear() { function DateSetYear(year) { CHECK_DATE(this); year = ToNumber(year); - if (NUMBER_IS_NAN(year)) return SET_UTC_DATE_VALUE(this, $NaN); + if (NUMBER_IS_NAN(year)) return SET_UTC_DATE_VALUE(this, NAN); year = (0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99) ? 1900 + TO_INTEGER(year) : year; var t = LOCAL_DATE_VALUE(this); @@ -746,12 +746,12 @@ function DateToJSON(key) { function ResetDateCache() { // Reset the timezone cache: - timezone_cache_time = $NaN; + timezone_cache_time = NAN; timezone_cache_timezone = undefined; // Reset the date cache: cache = Date_cache; - cache.time = $NaN; + cache.time = NAN; cache.string = null; } @@ -762,7 +762,7 @@ function SetUpDate() { %CheckIsBootstrapping(); %SetCode($Date, DateConstructor); - %FunctionSetPrototype($Date, new $Date($NaN)); + %FunctionSetPrototype($Date, new $Date(NAN)); // Set up non-enumerable properties of the Date object itself. InstallFunctions($Date, DONT_ENUM, $Array( diff --git a/deps/v8/src/debug-debugger.js b/deps/v8/src/debug-debugger.js index 19209d4b95..b159ae3b29 100644 --- a/deps/v8/src/debug-debugger.js +++ b/deps/v8/src/debug-debugger.js @@ -448,7 +448,7 @@ ScriptBreakPoint.prototype.set = function (script) { // If the position is not found in the script (the script might be shorter // than it used to be) just ignore it. - if (position === null) return; + if (IS_NULL(position)) return; // Create a break point object and set the break point. break_point = MakeBreakPoint(position, this); @@ -2064,7 +2064,7 @@ DebugCommandProcessor.resolveValue_ = function(value_description) { } else if ("value" in value_description) { return value_description.value; } else if (value_description.type == UNDEFINED_TYPE) { - return void 0; + return UNDEFINED; } else if (value_description.type == NULL_TYPE) { return null; } else { diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc index 0496b8cb00..35970e5ee9 100644 --- a/deps/v8/src/debug.cc +++ b/deps/v8/src/debug.cc @@ -1793,10 +1793,14 @@ void Debug::HandleStepIn(Handle<JSFunction> function, // function to be called and not the code for Builtins::FunctionApply or // Builtins::FunctionCall. The receiver of call/apply is the target // function. - if (!holder.is_null() && holder->IsJSFunction() && - !JSFunction::cast(*holder)->IsBuiltin()) { + if (!holder.is_null() && holder->IsJSFunction()) { Handle<JSFunction> js_function = Handle<JSFunction>::cast(holder); - Debug::FloodWithOneShot(js_function); + if (!js_function->IsBuiltin()) { + Debug::FloodWithOneShot(js_function); + } else if (js_function->shared()->bound()) { + // Handle Function.prototype.bind + Debug::FloodBoundFunctionWithOneShot(js_function); + } } } else { Debug::FloodWithOneShot(function); @@ -2102,6 +2106,7 @@ void Debug::PrepareForBreakPoints() { if (!shared->allows_lazy_compilation()) continue; if (!shared->script()->IsScript()) continue; + if (function->IsBuiltin()) continue; if (shared->code()->gc_metadata() == active_code_marker) continue; Code::Kind kind = function->code()->kind(); @@ -3131,8 +3136,7 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event, v8::Local<v8::Function> fun = v8::Local<v8::Function>::Cast(api_exec_state->Get(fun_name)); - v8::Handle<v8::Boolean> running = - auto_continue ? v8::True() : v8::False(); + v8::Handle<v8::Boolean> running = v8::Boolean::New(auto_continue); static const int kArgc = 1; v8::Handle<Value> argv[kArgc] = { running }; cmd_processor = v8::Local<v8::Object>::Cast( diff --git a/deps/v8/src/debug.h b/deps/v8/src/debug.h index 2b5f43ab49..8e71ea6705 100644 --- a/deps/v8/src/debug.h +++ b/deps/v8/src/debug.h @@ -38,6 +38,7 @@ #include "frames-inl.h" #include "hashmap.h" #include "platform.h" +#include "platform/socket.h" #include "string-stream.h" #include "v8threads.h" diff --git a/deps/v8/src/marking-thread.cc b/deps/v8/src/defaults.cc index 58bca3662d..a03cf69b08 100644 --- a/deps/v8/src/marking-thread.cc +++ b/deps/v8/src/defaults.cc @@ -25,65 +25,46 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#include "marking-thread.h" +// The GYP based build ends up defining USING_V8_SHARED when compiling this +// file. +#undef USING_V8_SHARED +#include "../include/v8-defaults.h" +#include "platform.h" +#include "globals.h" #include "v8.h" -#include "isolate.h" -#include "v8threads.h" - namespace v8 { -namespace internal { - -MarkingThread::MarkingThread(Isolate* isolate) - : Thread("MarkingThread"), - isolate_(isolate), - heap_(isolate->heap()), - start_marking_semaphore_(0), - end_marking_semaphore_(0), - stop_semaphore_(0) { - NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false)); - id_ = NoBarrier_AtomicIncrement(&id_counter_, 1); -} - - -Atomic32 MarkingThread::id_counter_ = -1; - - -void MarkingThread::Run() { - Isolate::SetIsolateThreadLocals(isolate_, NULL); - DisallowHeapAllocation no_allocation; - DisallowHandleAllocation no_handles; - DisallowHandleDereference no_deref; - while (true) { - start_marking_semaphore_.Wait(); - if (Acquire_Load(&stop_thread_)) { - stop_semaphore_.Signal(); - return; - } - - end_marking_semaphore_.Signal(); +bool ConfigureResourceConstraintsForCurrentPlatform( + ResourceConstraints* constraints) { + if (constraints == NULL) { + return false; } -} - - -void MarkingThread::Stop() { - Release_Store(&stop_thread_, static_cast<AtomicWord>(true)); - start_marking_semaphore_.Signal(); - stop_semaphore_.Wait(); - Join(); -} - -void MarkingThread::StartMarking() { - start_marking_semaphore_.Signal(); + int lump_of_memory = (i::kPointerSize / 4) * i::MB; + + // The young_space_size should be a power of 2 and old_generation_size should + // be a multiple of Page::kPageSize. +#if V8_OS_ANDROID + constraints->set_max_young_space_size(8 * lump_of_memory); + constraints->set_max_old_space_size(256 * lump_of_memory); + constraints->set_max_executable_size(192 * lump_of_memory); +#else + constraints->set_max_young_space_size(16 * lump_of_memory); + constraints->set_max_old_space_size(700 * lump_of_memory); + constraints->set_max_executable_size(256 * lump_of_memory); +#endif + return true; } -void MarkingThread::WaitForMarkingThread() { - end_marking_semaphore_.Wait(); +bool SetDefaultResourceConstraintsForCurrentPlatform() { + ResourceConstraints constraints; + if (!ConfigureResourceConstraintsForCurrentPlatform(&constraints)) + return false; + return SetResourceConstraints(&constraints); } -} } // namespace v8::internal +} // namespace v8 diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc index c979a534d8..84e80b9d9a 100644 --- a/deps/v8/src/deoptimizer.cc +++ b/deps/v8/src/deoptimizer.cc @@ -1494,7 +1494,7 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator, } intptr_t caller_arg_count = 0; - bool arg_count_known = descriptor->stack_parameter_count_ == NULL; + bool arg_count_known = !descriptor->stack_parameter_count_.is_valid(); // Build the Arguments object for the caller's parameters and a pointer to it. output_frame_offset -= kPointerSize; @@ -1614,12 +1614,16 @@ Handle<Object> Deoptimizer::MaterializeNextHeapObject() { } } else { // Dispatch on the instance type of the object to be materialized. - Handle<Map> map = Handle<Map>::cast(MaterializeNextValue()); + // We also need to make sure that the representation of all fields + // in the given object are general enough to hold a tagged value. + Handle<Map> map = Map::GeneralizeAllFieldRepresentations( + Handle<Map>::cast(MaterializeNextValue()), Representation::Tagged()); switch (map->instance_type()) { case HEAP_NUMBER_TYPE: { - Handle<HeapNumber> number = - Handle<HeapNumber>::cast(MaterializeNextValue()); - materialized_objects_->Add(number); + Handle<HeapNumber> object = isolate_->factory()->NewHeapNumber(0.0); + materialized_objects_->Add(object); + Handle<Object> number = MaterializeNextValue(); + object->set_value(number->Number()); materialization_value_index_ += kDoubleSize / kPointerSize - 1; break; } @@ -1693,29 +1697,35 @@ void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) { // output frames are used to materialize arguments objects later on they need // to already contain valid heap numbers. for (int i = 0; i < deferred_heap_numbers_.length(); i++) { - HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i]; + HeapNumberMaterializationDescriptor<Address> d = deferred_heap_numbers_[i]; Handle<Object> num = isolate_->factory()->NewNumber(d.value()); if (trace_) { PrintF("Materialized a new heap number %p [%e] in slot %p\n", reinterpret_cast<void*>(*num), d.value(), - d.slot_address()); + d.destination()); } - Memory::Object_at(d.slot_address()) = *num; + Memory::Object_at(d.destination()) = *num; } // Materialize all heap numbers required for arguments/captured objects. - for (int i = 0; i < values.length(); i++) { - if (!values.at(i)->IsTheHole()) continue; - double double_value = deferred_objects_double_values_[i]; - Handle<Object> num = isolate_->factory()->NewNumber(double_value); + for (int i = 0; i < deferred_objects_double_values_.length(); i++) { + HeapNumberMaterializationDescriptor<int> d = + deferred_objects_double_values_[i]; + Handle<Object> num = isolate_->factory()->NewNumber(d.value()); if (trace_) { - PrintF("Materialized a new heap number %p [%e] for object\n", - reinterpret_cast<void*>(*num), double_value); + PrintF("Materialized a new heap number %p [%e] for object at %d\n", + reinterpret_cast<void*>(*num), + d.value(), + d.destination()); } - values.Set(i, num); + ASSERT(values.at(d.destination())->IsTheHole()); + values.Set(d.destination(), num); } + // Play it safe and clear all object double values before we continue. + deferred_objects_double_values_.Clear(); + // Materialize arguments/captured objects. if (!deferred_objects_.is_empty()) { List<Handle<Object> > materialized_objects(deferred_objects_.length()); @@ -1765,11 +1775,11 @@ void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame( Address parameters_bottom = parameters_top + parameters_size; Address expressions_bottom = expressions_top + expressions_size; for (int i = 0; i < deferred_heap_numbers_.length(); i++) { - HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i]; + HeapNumberMaterializationDescriptor<Address> d = deferred_heap_numbers_[i]; // Check of the heap number to materialize actually belong to the frame // being extracted. - Address slot = d.slot_address(); + Address slot = d.destination(); if (parameters_top <= slot && slot < parameters_bottom) { Handle<Object> num = isolate_->factory()->NewNumber(d.value()); @@ -1781,7 +1791,7 @@ void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame( "for parameter slot #%d\n", reinterpret_cast<void*>(*num), d.value(), - d.slot_address(), + d.destination(), index); } @@ -1797,7 +1807,7 @@ void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame( "for expression slot #%d\n", reinterpret_cast<void*>(*num), d.value(), - d.slot_address(), + d.destination(), index); } @@ -2337,85 +2347,6 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator, } -void Deoptimizer::PatchInterruptCode(Isolate* isolate, - Code* unoptimized) { - DisallowHeapAllocation no_gc; - Code* replacement_code = - isolate->builtins()->builtin(Builtins::kOnStackReplacement); - - // Iterate over the back edge table and patch every interrupt - // call to an unconditional call to the replacement code. - int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level(); - - for (FullCodeGenerator::BackEdgeTableIterator back_edges(unoptimized, &no_gc); - !back_edges.Done(); - back_edges.Next()) { - if (static_cast<int>(back_edges.loop_depth()) == loop_nesting_level) { - ASSERT_EQ(NOT_PATCHED, GetInterruptPatchState(isolate, - unoptimized, - back_edges.pc())); - PatchInterruptCodeAt(unoptimized, - back_edges.pc(), - replacement_code); - } - } - - unoptimized->set_back_edges_patched_for_osr(true); - ASSERT(Deoptimizer::VerifyInterruptCode( - isolate, unoptimized, loop_nesting_level)); -} - - -void Deoptimizer::RevertInterruptCode(Isolate* isolate, - Code* unoptimized) { - DisallowHeapAllocation no_gc; - Code* interrupt_code = - isolate->builtins()->builtin(Builtins::kInterruptCheck); - - // Iterate over the back edge table and revert the patched interrupt calls. - ASSERT(unoptimized->back_edges_patched_for_osr()); - int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level(); - - for (FullCodeGenerator::BackEdgeTableIterator back_edges(unoptimized, &no_gc); - !back_edges.Done(); - back_edges.Next()) { - if (static_cast<int>(back_edges.loop_depth()) <= loop_nesting_level) { - ASSERT_EQ(PATCHED_FOR_OSR, GetInterruptPatchState(isolate, - unoptimized, - back_edges.pc())); - RevertInterruptCodeAt(unoptimized, back_edges.pc(), interrupt_code); - } - } - - unoptimized->set_back_edges_patched_for_osr(false); - unoptimized->set_allow_osr_at_loop_nesting_level(0); - // Assert that none of the back edges are patched anymore. - ASSERT(Deoptimizer::VerifyInterruptCode(isolate, unoptimized, -1)); -} - - -#ifdef DEBUG -bool Deoptimizer::VerifyInterruptCode(Isolate* isolate, - Code* unoptimized, - int loop_nesting_level) { - DisallowHeapAllocation no_gc; - for (FullCodeGenerator::BackEdgeTableIterator back_edges(unoptimized, &no_gc); - !back_edges.Done(); - back_edges.Next()) { - uint32_t loop_depth = back_edges.loop_depth(); - CHECK_LE(static_cast<int>(loop_depth), Code::kMaxLoopNestingMarker); - // Assert that all back edges for shallower loops (and only those) - // have already been patched. - CHECK_EQ((static_cast<int>(loop_depth) <= loop_nesting_level), - GetInterruptPatchState(isolate, - unoptimized, - back_edges.pc()) != NOT_PATCHED); - } - return true; -} -#endif // DEBUG - - unsigned Deoptimizer::ComputeInputFrameSize() const { unsigned fixed_size = ComputeFixedSize(function_); // The fp-to-sp delta already takes the context and the function @@ -2484,18 +2415,19 @@ void Deoptimizer::AddObjectDuplication(intptr_t slot, int object_index) { void Deoptimizer::AddObjectTaggedValue(intptr_t value) { deferred_objects_tagged_values_.Add(reinterpret_cast<Object*>(value)); - deferred_objects_double_values_.Add(isolate()->heap()->nan_value()->value()); } void Deoptimizer::AddObjectDoubleValue(double value) { deferred_objects_tagged_values_.Add(isolate()->heap()->the_hole_value()); - deferred_objects_double_values_.Add(value); + HeapNumberMaterializationDescriptor<int> value_desc( + deferred_objects_tagged_values_.length() - 1, value); + deferred_objects_double_values_.Add(value_desc); } void Deoptimizer::AddDoubleValue(intptr_t slot_address, double value) { - HeapNumberMaterializationDescriptor value_desc( + HeapNumberMaterializationDescriptor<Address> value_desc( reinterpret_cast<Address>(slot_address), value); deferred_heap_numbers_.Add(value_desc); } @@ -2814,46 +2746,11 @@ int Translation::NumberOfOperandsFor(Opcode opcode) { #if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER) const char* Translation::StringFor(Opcode opcode) { +#define TRANSLATION_OPCODE_CASE(item) case item: return #item; switch (opcode) { - case BEGIN: - return "BEGIN"; - case JS_FRAME: - return "JS_FRAME"; - case ARGUMENTS_ADAPTOR_FRAME: - return "ARGUMENTS_ADAPTOR_FRAME"; - case CONSTRUCT_STUB_FRAME: - return "CONSTRUCT_STUB_FRAME"; - case GETTER_STUB_FRAME: - return "GETTER_STUB_FRAME"; - case SETTER_STUB_FRAME: - return "SETTER_STUB_FRAME"; - case COMPILED_STUB_FRAME: - return "COMPILED_STUB_FRAME"; - case REGISTER: - return "REGISTER"; - case INT32_REGISTER: - return "INT32_REGISTER"; - case UINT32_REGISTER: - return "UINT32_REGISTER"; - case DOUBLE_REGISTER: - return "DOUBLE_REGISTER"; - case STACK_SLOT: - return "STACK_SLOT"; - case INT32_STACK_SLOT: - return "INT32_STACK_SLOT"; - case UINT32_STACK_SLOT: - return "UINT32_STACK_SLOT"; - case DOUBLE_STACK_SLOT: - return "DOUBLE_STACK_SLOT"; - case LITERAL: - return "LITERAL"; - case DUPLICATED_OBJECT: - return "DUPLICATED_OBJECT"; - case ARGUMENTS_OBJECT: - return "ARGUMENTS_OBJECT"; - case CAPTURED_OBJECT: - return "CAPTURED_OBJECT"; + TRANSLATION_OPCODE_LIST(TRANSLATION_OPCODE_CASE) } +#undef TRANSLATION_OPCODE_CASE UNREACHABLE(); return ""; } diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h index 7ee5908f76..4e9d281ea5 100644 --- a/deps/v8/src/deoptimizer.h +++ b/deps/v8/src/deoptimizer.h @@ -60,17 +60,18 @@ class FrameDescription; class TranslationIterator; class DeoptimizedFrameInfo; +template<typename T> class HeapNumberMaterializationDescriptor BASE_EMBEDDED { public: - HeapNumberMaterializationDescriptor(Address slot_address, double val) - : slot_address_(slot_address), val_(val) { } + HeapNumberMaterializationDescriptor(T destination, double value) + : destination_(destination), value_(value) { } - Address slot_address() const { return slot_address_; } - double value() const { return val_; } + T destination() const { return destination_; } + double value() const { return value_; } private: - Address slot_address_; - double val_; + T destination_; + double value_; }; @@ -131,11 +132,6 @@ class Deoptimizer : public Malloced { DEBUGGER }; - enum InterruptPatchState { - NOT_PATCHED, - PATCHED_FOR_OSR - }; - static const int kBailoutTypesWithCodeEntry = SOFT + 1; struct JumpTableEntry { @@ -213,39 +209,6 @@ class Deoptimizer : public Malloced { // The size in bytes of the code required at a lazy deopt patch site. static int patch_size(); - // Patch all interrupts with allowed loop depth in the unoptimized code to - // unconditionally call replacement_code. - static void PatchInterruptCode(Isolate* isolate, - Code* unoptimized_code); - - // Patch the interrupt at the instruction before pc_after in - // the unoptimized code to unconditionally call replacement_code. - static void PatchInterruptCodeAt(Code* unoptimized_code, - Address pc_after, - Code* replacement_code); - - // Change all patched interrupts patched in the unoptimized code - // back to normal interrupts. - static void RevertInterruptCode(Isolate* isolate, - Code* unoptimized_code); - - // Change patched interrupt in the unoptimized code - // back to a normal interrupt. - static void RevertInterruptCodeAt(Code* unoptimized_code, - Address pc_after, - Code* interrupt_code); - -#ifdef DEBUG - static InterruptPatchState GetInterruptPatchState(Isolate* isolate, - Code* unoptimized_code, - Address pc_after); - - // Verify that all back edges of a certain loop depth are patched. - static bool VerifyInterruptCode(Isolate* isolate, - Code* unoptimized_code, - int loop_nesting_level); -#endif // DEBUG - ~Deoptimizer(); void MaterializeHeapObjects(JavaScriptFrameIterator* it); @@ -469,9 +432,10 @@ class Deoptimizer : public Malloced { // Deferred values to be materialized. List<Object*> deferred_objects_tagged_values_; - List<double> deferred_objects_double_values_; + List<HeapNumberMaterializationDescriptor<int> > + deferred_objects_double_values_; List<ObjectMaterializationDescriptor> deferred_objects_; - List<HeapNumberMaterializationDescriptor> deferred_heap_numbers_; + List<HeapNumberMaterializationDescriptor<Address> > deferred_heap_numbers_; // Output frame information. Only used during heap object materialization. List<Handle<JSFunction> > jsframe_functions_; @@ -542,7 +506,15 @@ class FrameDescription { void SetCallerFp(unsigned offset, intptr_t value); intptr_t GetRegister(unsigned n) const { - ASSERT(n < ARRAY_SIZE(registers_)); +#if DEBUG + // This convoluted ASSERT is needed to work around a gcc problem that + // improperly detects an array bounds overflow in optimized debug builds + // when using a plain ASSERT. + if (n >= ARRAY_SIZE(registers_)) { + ASSERT(false); + return 0; + } +#endif return registers_[n]; } @@ -717,29 +689,36 @@ class TranslationIterator BASE_EMBEDDED { }; +#define TRANSLATION_OPCODE_LIST(V) \ + V(BEGIN) \ + V(JS_FRAME) \ + V(CONSTRUCT_STUB_FRAME) \ + V(GETTER_STUB_FRAME) \ + V(SETTER_STUB_FRAME) \ + V(ARGUMENTS_ADAPTOR_FRAME) \ + V(COMPILED_STUB_FRAME) \ + V(DUPLICATED_OBJECT) \ + V(ARGUMENTS_OBJECT) \ + V(CAPTURED_OBJECT) \ + V(REGISTER) \ + V(INT32_REGISTER) \ + V(UINT32_REGISTER) \ + V(DOUBLE_REGISTER) \ + V(STACK_SLOT) \ + V(INT32_STACK_SLOT) \ + V(UINT32_STACK_SLOT) \ + V(DOUBLE_STACK_SLOT) \ + V(LITERAL) + + class Translation BASE_EMBEDDED { public: +#define DECLARE_TRANSLATION_OPCODE_ENUM(item) item, enum Opcode { - BEGIN, - JS_FRAME, - CONSTRUCT_STUB_FRAME, - GETTER_STUB_FRAME, - SETTER_STUB_FRAME, - ARGUMENTS_ADAPTOR_FRAME, - COMPILED_STUB_FRAME, - DUPLICATED_OBJECT, - ARGUMENTS_OBJECT, - CAPTURED_OBJECT, - REGISTER, - INT32_REGISTER, - UINT32_REGISTER, - DOUBLE_REGISTER, - STACK_SLOT, - INT32_STACK_SLOT, - UINT32_STACK_SLOT, - DOUBLE_STACK_SLOT, - LITERAL + TRANSLATION_OPCODE_LIST(DECLARE_TRANSLATION_OPCODE_ENUM) + LAST = LITERAL }; +#undef DECLARE_TRANSLATION_OPCODE_ENUM Translation(TranslationBuffer* buffer, int frame_count, int jsframe_count, Zone* zone) diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc index dd620fb345..d7898ddcd9 100644 --- a/deps/v8/src/disassembler.cc +++ b/deps/v8/src/disassembler.cc @@ -250,7 +250,7 @@ static int DecodeIt(Isolate* isolate, if (kind == Code::CALL_IC || kind == Code::KEYED_CALL_IC) { out.AddFormatted(", argc = %d", code->arguments_count()); } - } else if (kind == Code::STUB) { + } else if (kind == Code::STUB || kind == Code::HANDLER) { // Reverse lookup required as the minor key cannot be retrieved // from the code object. Object* obj = heap->code_stubs()->SlowReverseLookup(code); diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc index 89621cb369..0b745c4505 100644 --- a/deps/v8/src/elements.cc +++ b/deps/v8/src/elements.cc @@ -792,7 +792,7 @@ class ElementsAccessorBase : public ElementsAccessor { FixedArray* to, FixedArrayBase* from) { int len0 = to->length(); -#ifdef DEBUG +#ifdef ENABLE_SLOW_ASSERTS if (FLAG_enable_slow_asserts) { for (int i = 0; i < len0; i++) { ASSERT(!to->get(i)->IsTheHole()); diff --git a/deps/v8/src/extensions/externalize-string-extension.cc b/deps/v8/src/extensions/externalize-string-extension.cc index 5fd821b9c0..9fdb194e42 100644 --- a/deps/v8/src/extensions/externalize-string-extension.cc +++ b/deps/v8/src/extensions/externalize-string-extension.cc @@ -75,7 +75,7 @@ v8::Handle<v8::FunctionTemplate> ExternalizeStringExtension::GetNativeFunction( void ExternalizeStringExtension::Externalize( const v8::FunctionCallbackInfo<v8::Value>& args) { if (args.Length() < 1 || !args[0]->IsString()) { - v8::ThrowException(v8::String::New( + args.GetIsolate()->ThrowException(v8::String::New( "First parameter to externalizeString() must be a string.")); return; } @@ -84,7 +84,7 @@ void ExternalizeStringExtension::Externalize( if (args[1]->IsBoolean()) { force_two_byte = args[1]->BooleanValue(); } else { - v8::ThrowException(v8::String::New( + args.GetIsolate()->ThrowException(v8::String::New( "Second parameter to externalizeString() must be a boolean.")); return; } @@ -92,7 +92,7 @@ void ExternalizeStringExtension::Externalize( bool result = false; Handle<String> string = Utils::OpenHandle(*args[0].As<v8::String>()); if (string->IsExternalString()) { - v8::ThrowException(v8::String::New( + args.GetIsolate()->ThrowException(v8::String::New( "externalizeString() can't externalize twice.")); return; } @@ -120,7 +120,8 @@ void ExternalizeStringExtension::Externalize( if (!result) delete resource; } if (!result) { - v8::ThrowException(v8::String::New("externalizeString() failed.")); + args.GetIsolate()->ThrowException( + v8::String::New("externalizeString() failed.")); return; } } @@ -129,7 +130,7 @@ void ExternalizeStringExtension::Externalize( void ExternalizeStringExtension::IsAscii( const v8::FunctionCallbackInfo<v8::Value>& args) { if (args.Length() != 1 || !args[0]->IsString()) { - v8::ThrowException(v8::String::New( + args.GetIsolate()->ThrowException(v8::String::New( "isAsciiString() requires a single string argument.")); return; } diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc index acbaf3c862..1dd246fc48 100644 --- a/deps/v8/src/factory.cc +++ b/deps/v8/src/factory.cc @@ -79,6 +79,21 @@ Handle<FixedDoubleArray> Factory::NewFixedDoubleArray(int size, } +Handle<ConstantPoolArray> Factory::NewConstantPoolArray( + int number_of_int64_entries, + int number_of_ptr_entries, + int number_of_int32_entries) { + ASSERT(number_of_int64_entries > 0 || number_of_ptr_entries > 0 || + number_of_int32_entries > 0); + CALL_HEAP_FUNCTION( + isolate(), + isolate()->heap()->AllocateConstantPoolArray(number_of_int64_entries, + number_of_ptr_entries, + number_of_int32_entries), + ConstantPoolArray); +} + + Handle<NameDictionary> Factory::NewNameDictionary(int at_least_space_for) { ASSERT(0 <= at_least_space_for); CALL_HEAP_FUNCTION(isolate(), @@ -126,6 +141,18 @@ Handle<ObjectHashTable> Factory::NewObjectHashTable(int at_least_space_for) { } +Handle<WeakHashTable> Factory::NewWeakHashTable(int at_least_space_for) { + ASSERT(0 <= at_least_space_for); + CALL_HEAP_FUNCTION( + isolate(), + WeakHashTable::Allocate(isolate()->heap(), + at_least_space_for, + WeakHashTable::USE_DEFAULT_MINIMUM_CAPACITY, + TENURED), + WeakHashTable); +} + + Handle<DescriptorArray> Factory::NewDescriptorArray(int number_of_descriptors, int slack) { ASSERT(0 <= number_of_descriptors); @@ -511,15 +538,22 @@ Handle<Cell> Factory::NewCell(Handle<Object> value) { } -Handle<PropertyCell> Factory::NewPropertyCell(Handle<Object> value) { - AllowDeferredHandleDereference convert_to_cell; +Handle<PropertyCell> Factory::NewPropertyCellWithHole() { CALL_HEAP_FUNCTION( isolate(), - isolate()->heap()->AllocatePropertyCell(*value), + isolate()->heap()->AllocatePropertyCell(), PropertyCell); } +Handle<PropertyCell> Factory::NewPropertyCell(Handle<Object> value) { + AllowDeferredHandleDereference convert_to_cell; + Handle<PropertyCell> cell = NewPropertyCellWithHole(); + PropertyCell::SetValueInferType(cell, value); + return cell; +} + + Handle<AllocationSite> Factory::NewAllocationSite() { CALL_HEAP_FUNCTION( isolate(), @@ -598,8 +632,11 @@ Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) { Handle<FixedArray> Factory::CopySizeFixedArray(Handle<FixedArray> array, - int new_length) { - CALL_HEAP_FUNCTION(isolate(), array->CopySize(new_length), FixedArray); + int new_length, + PretenureFlag pretenure) { + CALL_HEAP_FUNCTION(isolate(), + array->CopySize(new_length, pretenure), + FixedArray); } @@ -609,6 +646,12 @@ Handle<FixedDoubleArray> Factory::CopyFixedDoubleArray( } +Handle<ConstantPoolArray> Factory::CopyConstantPoolArray( + Handle<ConstantPoolArray> array) { + CALL_HEAP_FUNCTION(isolate(), array->Copy(), ConstantPoolArray); +} + + Handle<JSFunction> Factory::BaseNewFunctionFromSharedFunctionInfo( Handle<SharedFunctionInfo> function_info, Handle<Map> function_map, @@ -972,10 +1015,12 @@ Handle<Code> Factory::NewCode(const CodeDesc& desc, Code::Flags flags, Handle<Object> self_ref, bool immovable, - bool crankshafted) { + bool crankshafted, + int prologue_offset) { CALL_HEAP_FUNCTION(isolate(), isolate()->heap()->CreateCode( - desc, flags, self_ref, immovable, crankshafted), + desc, flags, self_ref, immovable, crankshafted, + prologue_offset), Code); } @@ -1016,14 +1061,79 @@ Handle<JSModule> Factory::NewJSModule(Handle<Context> context, } -Handle<GlobalObject> Factory::NewGlobalObject( - Handle<JSFunction> constructor) { - CALL_HEAP_FUNCTION(isolate(), - isolate()->heap()->AllocateGlobalObject(*constructor), +// TODO(mstarzinger): Temporary wrapper until handlified. +static Handle<NameDictionary> NameDictionaryAdd(Handle<NameDictionary> dict, + Handle<Name> name, + Handle<Object> value, + PropertyDetails details) { + CALL_HEAP_FUNCTION(dict->GetIsolate(), + dict->Add(*name, *value, details), + NameDictionary); +} + + +static Handle<GlobalObject> NewGlobalObjectFromMap(Isolate* isolate, + Handle<Map> map) { + CALL_HEAP_FUNCTION(isolate, + isolate->heap()->Allocate(*map, OLD_POINTER_SPACE), GlobalObject); } +Handle<GlobalObject> Factory::NewGlobalObject(Handle<JSFunction> constructor) { + ASSERT(constructor->has_initial_map()); + Handle<Map> map(constructor->initial_map()); + ASSERT(map->is_dictionary_map()); + + // Make sure no field properties are described in the initial map. + // This guarantees us that normalizing the properties does not + // require us to change property values to PropertyCells. + ASSERT(map->NextFreePropertyIndex() == 0); + + // Make sure we don't have a ton of pre-allocated slots in the + // global objects. They will be unused once we normalize the object. + ASSERT(map->unused_property_fields() == 0); + ASSERT(map->inobject_properties() == 0); + + // Initial size of the backing store to avoid resize of the storage during + // bootstrapping. The size differs between the JS global object ad the + // builtins object. + int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512; + + // Allocate a dictionary object for backing storage. + int at_least_space_for = map->NumberOfOwnDescriptors() * 2 + initial_size; + Handle<NameDictionary> dictionary = NewNameDictionary(at_least_space_for); + + // The global object might be created from an object template with accessors. + // Fill these accessors into the dictionary. + Handle<DescriptorArray> descs(map->instance_descriptors()); + for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) { + PropertyDetails details = descs->GetDetails(i); + ASSERT(details.type() == CALLBACKS); // Only accessors are expected. + PropertyDetails d = PropertyDetails(details.attributes(), CALLBACKS, i + 1); + Handle<Name> name(descs->GetKey(i)); + Handle<Object> value(descs->GetCallbacksObject(i), isolate()); + Handle<PropertyCell> cell = NewPropertyCell(value); + NameDictionaryAdd(dictionary, name, cell, d); + } + + // Allocate the global object and initialize it with the backing store. + Handle<GlobalObject> global = NewGlobalObjectFromMap(isolate(), map); + isolate()->heap()->InitializeJSObjectFromMap(*global, *dictionary, *map); + + // Create a new map for the global object. + Handle<Map> new_map = Map::CopyDropDescriptors(map); + new_map->set_dictionary_map(true); + + // Set up the global object as a normalized object. + global->set_map(*new_map); + global->set_properties(*dictionary); + + // Make sure result is a global object with properties in dictionary. + ASSERT(global->IsGlobalObject() && !global->HasFastProperties()); + return global; +} + Handle<JSObject> Factory::NewJSObjectFromMap(Handle<Map> map, PretenureFlag pretenure, @@ -1083,16 +1193,6 @@ void Factory::SetContent(Handle<JSArray> array, } -void Factory::EnsureCanContainElements(Handle<JSArray> array, - Handle<FixedArrayBase> elements, - uint32_t length, - EnsureElementsMode mode) { - CALL_HEAP_FUNCTION_VOID( - isolate(), - array->EnsureCanContainElements(*elements, length, mode)); -} - - Handle<JSArrayBuffer> Factory::NewJSArrayBuffer() { Handle<JSFunction> array_buffer_fun( isolate()->context()->native_context()->array_buffer_fun()); diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h index 1bdf474337..ee25bf23d8 100644 --- a/deps/v8/src/factory.h +++ b/deps/v8/src/factory.h @@ -59,6 +59,11 @@ class Factory { int size, PretenureFlag pretenure = NOT_TENURED); + Handle<ConstantPoolArray> NewConstantPoolArray( + int number_of_int64_entries, + int number_of_ptr_entries, + int number_of_int32_entries); + Handle<SeededNumberDictionary> NewSeededNumberDictionary( int at_least_space_for); @@ -71,6 +76,8 @@ class Factory { Handle<ObjectHashTable> NewObjectHashTable(int at_least_space_for); + Handle<WeakHashTable> NewWeakHashTable(int at_least_space_for); + Handle<DescriptorArray> NewDescriptorArray(int number_of_descriptors, int slack = 0); Handle<DeoptimizationInputData> NewDeoptimizationInputData( @@ -241,6 +248,8 @@ class Factory { Handle<Cell> NewCell(Handle<Object> value); + Handle<PropertyCell> NewPropertyCellWithHole(); + Handle<PropertyCell> NewPropertyCell(Handle<Object> value); Handle<AllocationSite> NewAllocationSite(); @@ -265,11 +274,15 @@ class Factory { Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array); Handle<FixedArray> CopySizeFixedArray(Handle<FixedArray> array, - int new_length); + int new_length, + PretenureFlag pretenure = NOT_TENURED); Handle<FixedDoubleArray> CopyFixedDoubleArray( Handle<FixedDoubleArray> array); + Handle<ConstantPoolArray> CopyConstantPoolArray( + Handle<ConstantPoolArray> array); + // Numbers (e.g. literals) are pretenured by the parser. Handle<Object> NewNumber(double value, PretenureFlag pretenure = NOT_TENURED); @@ -295,7 +308,7 @@ class Factory { Handle<JSObject> NewJSObject(Handle<JSFunction> constructor, PretenureFlag pretenure = NOT_TENURED); - // Global objects are pretenured. + // Global objects are pretenured and initialized based on a constructor. Handle<GlobalObject> NewGlobalObject(Handle<JSFunction> constructor); // JS objects are pretenured when allocated by the bootstrapper and @@ -328,11 +341,6 @@ class Factory { void SetContent(Handle<JSArray> array, Handle<FixedArrayBase> elements); - void EnsureCanContainElements(Handle<JSArray> array, - Handle<FixedArrayBase> elements, - uint32_t length, - EnsureElementsMode mode); - Handle<JSArrayBuffer> NewJSArrayBuffer(); Handle<JSTypedArray> NewJSTypedArray(ExternalArrayType type); @@ -372,7 +380,8 @@ class Factory { Code::Flags flags, Handle<Object> self_reference, bool immovable = false, - bool crankshafted = false); + bool crankshafted = false, + int prologue_offset = Code::kPrologueOffsetNotSet); Handle<Code> CopyCode(Handle<Code> code); @@ -462,7 +471,15 @@ class Factory { &isolate()->heap()->roots_[Heap::k##camel_name##RootIndex])); \ } ROOT_LIST(ROOT_ACCESSOR) -#undef ROOT_ACCESSOR_ACCESSOR +#undef ROOT_ACCESSOR + +#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \ + inline Handle<Map> name##_map() { \ + return Handle<Map>(BitCast<Map**>( \ + &isolate()->heap()->roots_[Heap::k##Name##MapRootIndex])); \ + } + STRUCT_LIST(STRUCT_MAP_ACCESSOR) +#undef STRUCT_MAP_ACCESSOR #define STRING_ACCESSOR(name, str) \ inline Handle<String> name() { \ diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h index 08cd8304e4..865413e70d 100644 --- a/deps/v8/src/flag-definitions.h +++ b/deps/v8/src/flag-definitions.h @@ -90,44 +90,34 @@ #define DEFINE_implication(whenflag, thenflag) #endif +#define COMMA , #ifdef FLAG_MODE_DECLARE // Structure used to hold a collection of arguments to the JavaScript code. -#define JSARGUMENTS_INIT {{}} struct JSArguments { public: - inline int argc() const { - return static_cast<int>(storage_[0]); - } - inline const char** argv() const { - return reinterpret_cast<const char**>(storage_[1]); - } inline const char*& operator[] (int idx) const { - return argv()[idx]; - } - inline JSArguments& operator=(JSArguments args) { - set_argc(args.argc()); - set_argv(args.argv()); - return *this; + return argv[idx]; } static JSArguments Create(int argc, const char** argv) { JSArguments args; - args.set_argc(argc); - args.set_argv(argv); + args.argc = argc; + args.argv = argv; return args; } -private: - void set_argc(int argc) { - storage_[0] = argc; - } - void set_argv(const char** argv) { - storage_[1] = reinterpret_cast<AtomicWord>(argv); + int argc; + const char** argv; +}; + +struct MaybeBoolFlag { + static MaybeBoolFlag Create(bool has_value, bool value) { + MaybeBoolFlag flag; + flag.has_value = has_value; + flag.value = value; + return flag; } -public: - // Contains argc and argv. Unfortunately we have to store these two fields - // into a single one to avoid making the initialization macro (which would be - // "{ 0, NULL }") contain a coma. - AtomicWord storage_[2]; + bool has_value; + bool value; }; #endif @@ -148,10 +138,13 @@ public: #endif #define DEFINE_bool(nam, def, cmt) FLAG(BOOL, bool, nam, def, cmt) +#define DEFINE_maybe_bool(nam, cmt) FLAG(MAYBE_BOOL, MaybeBoolFlag, nam, \ + { false COMMA false }, cmt) #define DEFINE_int(nam, def, cmt) FLAG(INT, int, nam, def, cmt) #define DEFINE_float(nam, def, cmt) FLAG(FLOAT, double, nam, def, cmt) #define DEFINE_string(nam, def, cmt) FLAG(STRING, const char*, nam, def, cmt) -#define DEFINE_args(nam, def, cmt) FLAG(ARGS, JSArguments, nam, def, cmt) +#define DEFINE_args(nam, cmt) FLAG(ARGS, JSArguments, nam, \ + { 0 COMMA NULL }, cmt) #define DEFINE_ALIAS_bool(alias, nam) FLAG_ALIAS(BOOL, bool, alias, nam) #define DEFINE_ALIAS_int(alias, nam) FLAG_ALIAS(INT, int, alias, nam) @@ -183,17 +176,13 @@ DEFINE_bool(harmony_collections, false, "enable harmony collections (sets, maps, and weak maps)") DEFINE_bool(harmony_observation, false, "enable harmony object observation (implies harmony collections") -DEFINE_bool(harmony_typed_arrays, true, - "enable harmony typed arrays") -DEFINE_bool(harmony_array_buffer, true, - "enable harmony array buffer") -DEFINE_implication(harmony_typed_arrays, harmony_array_buffer) DEFINE_bool(harmony_generators, false, "enable harmony generators") DEFINE_bool(harmony_iteration, false, "enable harmony iteration (for-of)") DEFINE_bool(harmony_numeric_literals, false, "enable harmony numeric literals (0o77, 0b11)") DEFINE_bool(harmony_strings, false, "enable harmony string") DEFINE_bool(harmony_arrays, false, "enable harmony arrays") +DEFINE_bool(harmony_maths, false, "enable harmony math functions") DEFINE_bool(harmony, false, "enable all harmony features (except typeof)") DEFINE_implication(harmony, harmony_scoping) DEFINE_implication(harmony, harmony_modules) @@ -206,20 +195,21 @@ DEFINE_implication(harmony, harmony_iteration) DEFINE_implication(harmony, harmony_numeric_literals) DEFINE_implication(harmony, harmony_strings) DEFINE_implication(harmony, harmony_arrays) +DEFINE_implication(harmony, harmony_maths) DEFINE_implication(harmony_modules, harmony_scoping) DEFINE_implication(harmony_observation, harmony_collections) -// TODO[dslomov] add harmony => harmony_typed_arrays // Flags for experimental implementation features. DEFINE_bool(packed_arrays, true, "optimizes arrays that have no holes") DEFINE_bool(smi_only_arrays, true, "tracks arrays with only smi values") -DEFINE_bool(clever_optimizations, - true, +DEFINE_bool(clever_optimizations, true, "Optimize object size, Array shift, DOM strings and string +") DEFINE_bool(pretenuring, true, "allocate objects in old space") // TODO(hpayer): We will remove this flag as soon as we have pretenuring // support for specific allocation sites. DEFINE_bool(pretenuring_call_new, false, "pretenure call new") +DEFINE_bool(allocation_site_pretenuring, false, + "pretenure with allocation sites") DEFINE_bool(track_fields, true, "track fields with only smi values") DEFINE_bool(track_double_fields, true, "track fields with double values") DEFINE_bool(track_heap_object_fields, true, "track fields with heap values") @@ -229,6 +219,11 @@ DEFINE_implication(track_heap_object_fields, track_fields) DEFINE_implication(track_computed_fields, track_fields) DEFINE_bool(smi_binop, true, "support smi representation in binary operations") +// Flags for optimization types. +DEFINE_bool(optimize_for_size, false, + "Enables optimizations which favor memory size over execution " + "speed.") + // Flags for data representation optimizations DEFINE_bool(unbox_double_arrays, true, "automatically unbox arrays of doubles") DEFINE_bool(string_slices, true, "use string slices") @@ -240,7 +235,7 @@ DEFINE_bool(use_range, true, "use hydrogen range analysis") DEFINE_bool(use_gvn, true, "use hydrogen global value numbering") DEFINE_bool(use_canonicalizing, true, "use hydrogen instruction canonicalizing") DEFINE_bool(use_inlining, true, "use function inlining") -DEFINE_bool(use_escape_analysis, false, "use hydrogen escape analysis") +DEFINE_bool(use_escape_analysis, true, "use hydrogen escape analysis") DEFINE_bool(use_allocation_folding, true, "use allocation folding") DEFINE_int(max_inlining_levels, 5, "maximum number of inlining levels") DEFINE_int(max_inlined_source_size, 600, @@ -251,16 +246,17 @@ DEFINE_int(max_inlined_nodes_cumulative, 400, "maximum cumulative number of AST nodes considered for inlining") DEFINE_bool(loop_invariant_code_motion, true, "loop invariant code motion") DEFINE_bool(fast_math, true, "faster (but maybe less accurate) math functions") -DEFINE_bool(collect_megamorphic_maps_from_stub_cache, - true, +DEFINE_bool(collect_megamorphic_maps_from_stub_cache, true, "crankshaft harvests type feedback from stub cache") DEFINE_bool(hydrogen_stats, false, "print statistics for hydrogen") +DEFINE_bool(trace_check_elimination, false, "trace check elimination phase") DEFINE_bool(trace_hydrogen, false, "trace generated hydrogen to file") DEFINE_string(trace_hydrogen_filter, "*", "hydrogen tracing filter") DEFINE_bool(trace_hydrogen_stubs, false, "trace generated hydrogen for stubs") DEFINE_string(trace_hydrogen_file, NULL, "trace hydrogen to given file name") DEFINE_string(trace_phase, "HLZ", "trace generated IR for specified phases") DEFINE_bool(trace_inlining, false, "trace inlining decisions") +DEFINE_bool(trace_load_elimination, false, "trace load elimination") DEFINE_bool(trace_alloc, false, "trace register allocator") DEFINE_bool(trace_all_uses, false, "trace all use positions") DEFINE_bool(trace_range, false, "trace range analysis") @@ -274,11 +270,9 @@ DEFINE_bool(trace_migration, false, "trace object migration") DEFINE_bool(trace_generalization, false, "trace map generalization") DEFINE_bool(stress_pointer_maps, false, "pointer map for every instruction") DEFINE_bool(stress_environments, false, "environment for every instruction") -DEFINE_int(deopt_every_n_times, - 0, +DEFINE_int(deopt_every_n_times, 0, "deoptimize every n times a deopt point is passed") -DEFINE_int(deopt_every_n_garbage_collections, - 0, +DEFINE_int(deopt_every_n_garbage_collections, 0, "deoptimize every n garbage collections") DEFINE_bool(print_deopt_stress, false, "print number of possible deopt points") DEFINE_bool(trap_on_deopt, false, "put a break point before deoptimizing") @@ -295,11 +289,12 @@ DEFINE_bool(array_index_dehoisting, true, "perform array index dehoisting") DEFINE_bool(analyze_environment_liveness, true, "analyze liveness of environment slots and zap dead values") +DEFINE_bool(load_elimination, false, "use load elimination") +DEFINE_bool(check_elimination, false, "use check elimination") DEFINE_bool(dead_code_elimination, true, "use dead code elimination") DEFINE_bool(fold_constants, true, "use constant folding") DEFINE_bool(trace_dead_code_elimination, false, "trace dead code elimination") -DEFINE_bool(unreachable_code_elimination, false, - "eliminate unreachable code (hidden behind soft deopts)") +DEFINE_bool(unreachable_code_elimination, true, "eliminate unreachable code") DEFINE_bool(track_allocation_sites, true, "Use allocation site info to reduce transitions") DEFINE_bool(trace_osr, false, "trace on-stack replacement") @@ -316,6 +311,8 @@ DEFINE_bool(inline_construct, true, "inline constructor calls") DEFINE_bool(inline_arguments, true, "inline functions with arguments object") DEFINE_bool(inline_accessors, true, "inline JavaScript accessors") DEFINE_int(loop_weight, 1, "loop weight for representation inference") +DEFINE_int(escape_analysis_iterations, 1, + "maximum number of escape analysis fix-point iterations") DEFINE_bool(optimize_for_in, true, "optimize functions containing for-in loops") @@ -331,8 +328,11 @@ DEFINE_int(concurrent_recompilation_queue_length, 8, "the length of the concurrent compilation queue") DEFINE_int(concurrent_recompilation_delay, 0, "artificial compilation delay in ms") +DEFINE_bool(block_concurrent_recompilation, false, + "block queued jobs until released") DEFINE_bool(concurrent_osr, false, "concurrent on-stack replacement") +DEFINE_implication(concurrent_osr, concurrent_recompilation) DEFINE_bool(omit_map_checks_for_leaf_maps, true, "do not emit check maps for constant values that have a leaf map, " @@ -404,8 +404,7 @@ DEFINE_bool(enable_vldr_imm, false, DEFINE_string(expose_natives_as, NULL, "expose natives in global object") DEFINE_string(expose_debug_as, NULL, "expose debug in global object") DEFINE_bool(expose_gc, false, "expose gc extension") -DEFINE_string(expose_gc_as, - NULL, +DEFINE_string(expose_gc_as, NULL, "expose gc extension under the specified name") DEFINE_implication(expose_gc_as, expose_gc) DEFINE_bool(expose_externalize_string, false, @@ -426,8 +425,7 @@ DEFINE_bool(stack_trace_on_abort, true, DEFINE_bool(trace_codegen, false, "print name of functions for which code is generated") DEFINE_bool(trace, false, "trace function calls") -DEFINE_bool(mask_constants_with_cookie, - true, +DEFINE_bool(mask_constants_with_cookie, true, "use random jit cookie to mask large constants") // codegen.cc @@ -515,6 +513,8 @@ DEFINE_bool(collect_maps, true, "garbage collect maps from which no objects can be reached") DEFINE_bool(weak_embedded_maps_in_optimized_code, true, "make maps embedded in optimized code weak") +DEFINE_bool(weak_embedded_objects_in_optimized_code, true, + "make objects embedded in optimized code weak") DEFINE_bool(flush_code, true, "flush code that we expect not to use again (during full gc)") DEFINE_bool(flush_code_incrementally, true, @@ -533,18 +533,21 @@ DEFINE_bool(parallel_sweeping, true, "enable parallel sweeping") DEFINE_bool(concurrent_sweeping, false, "enable concurrent sweeping") DEFINE_int(sweeper_threads, 0, "number of parallel and concurrent sweeping threads") -DEFINE_bool(parallel_marking, false, "enable parallel marking") -DEFINE_int(marking_threads, 0, "number of parallel marking threads") #ifdef VERIFY_HEAP DEFINE_bool(verify_heap, false, "verify heap pointers before and after GC") #endif + +// heap-snapshot-generator.cc +DEFINE_bool(heap_profiler_trace_objects, false, + "Dump heap object allocations/movements/size_updates") + + // v8.cc DEFINE_bool(use_idle_notification, true, "Use idle notification to reduce memory footprint.") // ic.cc DEFINE_bool(use_ic, true, "use inline caching") -DEFINE_bool(js_accessor_ics, false, "create ics for js accessors") // macro-assembler-ia32.cc DEFINE_bool(native_code_counters, false, @@ -592,15 +595,17 @@ DEFINE_bool(trace_exception, false, "print stack trace when throwing exceptions") DEFINE_bool(preallocate_message_memory, false, "preallocate some memory to build stack traces.") -DEFINE_bool(randomize_hashes, - true, +DEFINE_bool(randomize_hashes, true, "randomize hashes to avoid predictable hash collisions " "(with snapshots this option cannot override the baked-in seed)") -DEFINE_int(hash_seed, - 0, +DEFINE_int(hash_seed, 0, "Fixed seed to use to hash property keys (0 means random)" "(with snapshots this option cannot override the baked-in seed)") +// snapshot-common.cc +DEFINE_bool(profile_deserialization, false, + "Print the time it takes to deserialize the snapshot.") + // v8.cc DEFINE_bool(preemption, false, "activate a 100ms timer that switches between V8 threads") @@ -610,6 +615,7 @@ DEFINE_bool(regexp_optimization, true, "generate optimized regexp code") // Testing flags test/cctest/test-{flags,api,serialization}.cc DEFINE_bool(testing_bool_flag, true, "testing_bool_flag") +DEFINE_maybe_bool(testing_maybe_bool_flag, "testing_maybe_bool_flag") DEFINE_int(testing_int_flag, 13, "testing_int_flag") DEFINE_float(testing_float_flag, 2.5, "float-flag") DEFINE_string(testing_string_flag, "Hello, world!", "string-flag") @@ -626,6 +632,10 @@ DEFINE_string(testing_serialization_file, "/tmp/serdes", DEFINE_string(extra_code, NULL, "A filename with extra code to be included in" " the snapshot (mksnapshot only)") +// code-stubs-hydrogen.cc +DEFINE_bool(profile_hydrogen_code_stub_compilation, false, + "Print the time it takes to lazily compile hydrogen code stubs.") + // // Dev shell flags // @@ -642,7 +652,7 @@ DEFINE_int(debugger_port, 5858, "Port to use for remote debugging") #endif // ENABLE_DEBUGGER_SUPPORT DEFINE_string(map_counters, "", "Map counters to a file") -DEFINE_args(js_arguments, JSARGUMENTS_INIT, +DEFINE_args(js_arguments, "Pass all remaining arguments to the script. Alias for \"--\".") #if defined(WEBOS__) @@ -686,8 +696,10 @@ DEFINE_bool(stress_compaction, false, #endif // checks.cc +#ifdef ENABLE_SLOW_ASSERTS DEFINE_bool(enable_slow_asserts, false, "enable asserts that are slow to execute") +#endif // codegen-ia32.cc / codegen-arm.cc / macro-assembler-*.cc DEFINE_bool(print_source, false, "pretty print source code") @@ -724,8 +736,7 @@ DEFINE_bool(print_interface_details, false, "print interface inference details") DEFINE_int(print_interface_depth, 5, "depth for printing interfaces") // objects.cc -DEFINE_bool(trace_normalization, - false, +DEFINE_bool(trace_normalization, false, "prints when objects are turned into dictionaries.") // runtime.cc @@ -739,12 +750,10 @@ DEFINE_bool(collect_heap_spill_statistics, false, DEFINE_bool(trace_isolates, false, "trace isolate state changes") // Regexp -DEFINE_bool(regexp_possessive_quantifier, - false, +DEFINE_bool(regexp_possessive_quantifier, false, "enable possessive quantifier syntax for testing") DEFINE_bool(trace_regexp_bytecodes, false, "trace regexp bytecode execution") -DEFINE_bool(trace_regexp_assembler, - false, +DEFINE_bool(trace_regexp_assembler, false, "trace regexp macro assembler calls.") // @@ -773,6 +782,7 @@ DEFINE_bool(prof_browser_mode, true, "Used with --prof, turns on browser-compatible mode for profiling.") DEFINE_bool(log_regexp, false, "Log regular expression execution.") DEFINE_string(logfile, "v8.log", "Specify the name of the log file.") +DEFINE_bool(logfile_per_isolate, true, "Separate log files for each isolate.") DEFINE_bool(ll_prof, false, "Enable low-level linux profiler.") DEFINE_string(gc_fake_mmap, "/tmp/__v8_gc__", "Specify the name of the file for fake gc mmap used in ll_prof") @@ -795,16 +805,18 @@ DEFINE_implication(log_internal_timer_events, prof) // elements.cc DEFINE_bool(trace_elements_transitions, false, "trace elements transitions") +DEFINE_bool(trace_creation_allocation_sites, false, + "trace the creation of allocation sites") + // code-stubs.cc DEFINE_bool(print_code_stubs, false, "print code stubs") -DEFINE_bool(test_secondary_stub_cache, - false, +DEFINE_bool(test_secondary_stub_cache, false, "test secondary stub cache by disabling the primary one") -DEFINE_bool(test_primary_stub_cache, - false, +DEFINE_bool(test_primary_stub_cache, false, "test primary stub cache by disabling the secondary one") + // codegen-ia32.cc / codegen-arm.cc DEFINE_bool(print_code, false, "print generated code") DEFINE_bool(print_opt_code, false, "print optimized code") @@ -812,8 +824,19 @@ DEFINE_bool(print_unopt_code, false, "print unoptimized code before " "printing optimized code based on it") DEFINE_bool(print_code_verbose, false, "print more information for code") DEFINE_bool(print_builtin_code, false, "print generated code for builtins") +DEFINE_bool(emit_opt_code_positions, false, + "annotate optimize code with source code positions") #ifdef ENABLE_DISASSEMBLER +DEFINE_bool(sodium, false, "print generated code output suitable for use with " + "the Sodium code viewer") + +DEFINE_implication(sodium, print_code_stubs) +DEFINE_implication(sodium, print_code) +DEFINE_implication(sodium, print_opt_code) +DEFINE_implication(sodium, emit_opt_code_positions) +DEFINE_implication(sodium, code_comments) + DEFINE_bool(print_all_code, false, "enable all flags related to printing code") DEFINE_implication(print_all_code, print_code) DEFINE_implication(print_all_code, print_opt_code) @@ -827,6 +850,16 @@ DEFINE_implication(print_all_code, trace_codegen) #endif #endif +// +// Read-only flags +// +#undef FLAG +#define FLAG FLAG_READONLY + +// assembler-arm.h +DEFINE_bool(enable_ool_constant_pool, false, + "enable use of out-of-line constant pools (ARM only)") + // Cleanup... #undef FLAG_FULL #undef FLAG_READONLY @@ -834,6 +867,7 @@ DEFINE_implication(print_all_code, trace_codegen) #undef FLAG_ALIAS #undef DEFINE_bool +#undef DEFINE_maybe_bool #undef DEFINE_int #undef DEFINE_string #undef DEFINE_float @@ -850,3 +884,5 @@ DEFINE_implication(print_all_code, trace_codegen) #undef FLAG_MODE_DEFINE_DEFAULTS #undef FLAG_MODE_META #undef FLAG_MODE_DEFINE_IMPLICATIONS + +#undef COMMA diff --git a/deps/v8/src/flags.cc b/deps/v8/src/flags.cc index 4e18cc8c80..0c36aed332 100644 --- a/deps/v8/src/flags.cc +++ b/deps/v8/src/flags.cc @@ -55,7 +55,8 @@ namespace { // to the actual flag, default value, comment, etc. This is designed to be POD // initialized as to avoid requiring static constructors. struct Flag { - enum FlagType { TYPE_BOOL, TYPE_INT, TYPE_FLOAT, TYPE_STRING, TYPE_ARGS }; + enum FlagType { TYPE_BOOL, TYPE_MAYBE_BOOL, TYPE_INT, TYPE_FLOAT, + TYPE_STRING, TYPE_ARGS }; FlagType type_; // What type of flag, bool, int, or string. const char* name_; // Name of the flag, ex "my_flag". @@ -75,6 +76,11 @@ struct Flag { return reinterpret_cast<bool*>(valptr_); } + MaybeBoolFlag* maybe_bool_variable() const { + ASSERT(type_ == TYPE_MAYBE_BOOL); + return reinterpret_cast<MaybeBoolFlag*>(valptr_); + } + int* int_variable() const { ASSERT(type_ == TYPE_INT); return reinterpret_cast<int*>(valptr_); @@ -133,6 +139,8 @@ struct Flag { switch (type_) { case TYPE_BOOL: return *bool_variable() == bool_default(); + case TYPE_MAYBE_BOOL: + return maybe_bool_variable()->has_value == false; case TYPE_INT: return *int_variable() == int_default(); case TYPE_FLOAT: @@ -145,7 +153,7 @@ struct Flag { return strcmp(str1, str2) == 0; } case TYPE_ARGS: - return args_variable()->argc() == 0; + return args_variable()->argc == 0; } UNREACHABLE(); return true; @@ -157,6 +165,9 @@ struct Flag { case TYPE_BOOL: *bool_variable() = bool_default(); break; + case TYPE_MAYBE_BOOL: + *maybe_bool_variable() = MaybeBoolFlag::Create(false, false); + break; case TYPE_INT: *int_variable() = int_default(); break; @@ -186,6 +197,7 @@ const size_t num_flags = sizeof(flags) / sizeof(*flags); static const char* Type2String(Flag::FlagType type) { switch (type) { case Flag::TYPE_BOOL: return "bool"; + case Flag::TYPE_MAYBE_BOOL: return "maybe_bool"; case Flag::TYPE_INT: return "int"; case Flag::TYPE_FLOAT: return "float"; case Flag::TYPE_STRING: return "string"; @@ -203,6 +215,11 @@ static SmartArrayPointer<const char> ToString(Flag* flag) { case Flag::TYPE_BOOL: buffer.Add("%s", (*flag->bool_variable() ? "true" : "false")); break; + case Flag::TYPE_MAYBE_BOOL: + buffer.Add("%s", flag->maybe_bool_variable()->has_value + ? (flag->maybe_bool_variable()->value ? "true" : "false") + : "unset"); + break; case Flag::TYPE_INT: buffer.Add("%d", *flag->int_variable()); break; @@ -216,9 +233,9 @@ static SmartArrayPointer<const char> ToString(Flag* flag) { } case Flag::TYPE_ARGS: { JSArguments args = *flag->args_variable(); - if (args.argc() > 0) { + if (args.argc > 0) { buffer.Add("%s", args[0]); - for (int i = 1; i < args.argc(); i++) { + for (int i = 1; i < args.argc; i++) { buffer.Add(" %s", args[i]); } } @@ -260,7 +277,7 @@ List<const char*>* FlagList::argv() { buffer.Add("--%s", args_flag->name()); args->Add(buffer.ToCString().Detach()); JSArguments jsargs = *args_flag->args_variable(); - for (int j = 0; j < jsargs.argc(); j++) { + for (int j = 0; j < jsargs.argc; j++) { args->Add(StrDup(jsargs[j])); } } @@ -380,6 +397,7 @@ int FlagList::SetFlagsFromCommandLine(int* argc, // if we still need a flag value, use the next argument if available if (flag->type() != Flag::TYPE_BOOL && + flag->type() != Flag::TYPE_MAYBE_BOOL && flag->type() != Flag::TYPE_ARGS && value == NULL) { if (i < *argc) { @@ -399,6 +417,9 @@ int FlagList::SetFlagsFromCommandLine(int* argc, case Flag::TYPE_BOOL: *flag->bool_variable() = !is_bool; break; + case Flag::TYPE_MAYBE_BOOL: + *flag->maybe_bool_variable() = MaybeBoolFlag::Create(true, !is_bool); + break; case Flag::TYPE_INT: *flag->int_variable() = strtol(value, &endp, 10); // NOLINT break; @@ -425,8 +446,9 @@ int FlagList::SetFlagsFromCommandLine(int* argc, } // handle errors - if ((flag->type() == Flag::TYPE_BOOL && value != NULL) || - (flag->type() != Flag::TYPE_BOOL && is_bool) || + bool is_bool_type = flag->type() == Flag::TYPE_BOOL || + flag->type() == Flag::TYPE_MAYBE_BOOL; + if ((is_bool_type && value != NULL) || (!is_bool_type && is_bool) || *endp != '\0') { PrintF(stderr, "Error: illegal value for flag %s of type %s\n" "Try --help for options\n", @@ -549,6 +571,7 @@ void FlagList::PrintHelp() { } +// static void FlagList::EnforceFlagImplications() { #define FLAG_MODE_DEFINE_IMPLICATIONS #include "flag-definitions.h" diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h index 2bbbd98ac0..d2dbfe2815 100644 --- a/deps/v8/src/frames.h +++ b/deps/v8/src/frames.h @@ -922,6 +922,13 @@ class StackFrameLocator BASE_EMBEDDED { }; +// Used specify the type of prologue to generate. +enum PrologueFrameMode { + BUILD_FUNCTION_FRAME, + BUILD_STUB_FRAME +}; + + // Reads all frames on the current stack and copies them into the current // zone memory. Vector<StackFrame*> CreateStackMap(Isolate* isolate, Zone* zone); diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc index 91a51731a5..fec9ee565d 100644 --- a/deps/v8/src/full-codegen.cc +++ b/deps/v8/src/full-codegen.cc @@ -193,12 +193,16 @@ void BreakableStatementChecker::VisitDebuggerStatement( } +void BreakableStatementChecker::VisitCaseClause(CaseClause* clause) { +} + + void BreakableStatementChecker::VisitFunctionLiteral(FunctionLiteral* expr) { } -void BreakableStatementChecker::VisitSharedFunctionInfoLiteral( - SharedFunctionInfoLiteral* expr) { +void BreakableStatementChecker::VisitNativeFunctionLiteral( + NativeFunctionLiteral* expr) { } @@ -341,8 +345,6 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) { code->set_has_deoptimization_support(info->HasDeoptimizationSupport()); code->set_handler_table(*cgen.handler_table()); #ifdef ENABLE_DEBUGGER_SUPPORT - code->set_has_debug_break_slots( - info->isolate()->debugger()->IsDebuggerActive()); code->set_compiled_optimizable(info->IsOptimizable()); #endif // ENABLE_DEBUGGER_SUPPORT code->set_allow_osr_at_loop_nesting_level(0); @@ -826,7 +828,7 @@ void FullCodeGenerator::SetReturnPosition(FunctionLiteral* fun) { void FullCodeGenerator::SetStatementPosition(Statement* stmt) { #ifdef ENABLE_DEBUGGER_SUPPORT if (!isolate()->debugger()->IsDebuggerActive()) { - CodeGenerator::RecordPositions(masm_, stmt->statement_pos()); + CodeGenerator::RecordPositions(masm_, stmt->position()); } else { // Check if the statement will be breakable without adding a debug break // slot. @@ -836,7 +838,7 @@ void FullCodeGenerator::SetStatementPosition(Statement* stmt) { // breakable. For breakable statements the actual recording of the // position will be postponed to the breakable code (typically an IC). bool position_recorded = CodeGenerator::RecordPositions( - masm_, stmt->statement_pos(), !checker.is_breakable()); + masm_, stmt->position(), !checker.is_breakable()); // If the position recording did record a new position generate a debug // break slot to make the statement breakable. if (position_recorded) { @@ -844,15 +846,15 @@ void FullCodeGenerator::SetStatementPosition(Statement* stmt) { } } #else - CodeGenerator::RecordPositions(masm_, stmt->statement_pos()); + CodeGenerator::RecordPositions(masm_, stmt->position()); #endif } -void FullCodeGenerator::SetExpressionPosition(Expression* expr, int pos) { +void FullCodeGenerator::SetExpressionPosition(Expression* expr) { #ifdef ENABLE_DEBUGGER_SUPPORT if (!isolate()->debugger()->IsDebuggerActive()) { - CodeGenerator::RecordPositions(masm_, pos); + CodeGenerator::RecordPositions(masm_, expr->position()); } else { // Check if the expression will be breakable without adding a debug break // slot. @@ -866,7 +868,7 @@ void FullCodeGenerator::SetExpressionPosition(Expression* expr, int pos) { // statement positions this is used for e.g. the condition expression of // a do while loop. bool position_recorded = CodeGenerator::RecordPositions( - masm_, pos, !checker.is_breakable()); + masm_, expr->position(), !checker.is_breakable()); // If the position recording did record a new position generate a debug // break slot to make the statement breakable. if (position_recorded) { @@ -1293,7 +1295,7 @@ void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) { // possible to break on the condition. __ bind(loop_statement.continue_label()); PrepareForBailoutForId(stmt->ContinueId(), NO_REGISTERS); - SetExpressionPosition(stmt->cond(), stmt->condition_position()); + SetExpressionPosition(stmt->cond()); VisitForControl(stmt->cond(), &book_keeping, loop_statement.break_label(), @@ -1515,6 +1517,11 @@ void FullCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) { } +void FullCodeGenerator::VisitCaseClause(CaseClause* clause) { + UNREACHABLE(); +} + + void FullCodeGenerator::VisitConditional(Conditional* expr) { Comment cmnt(masm_, "[ Conditional"); Label true_case, false_case, done; @@ -1522,8 +1529,7 @@ void FullCodeGenerator::VisitConditional(Conditional* expr) { PrepareForBailoutForId(expr->ThenId(), NO_REGISTERS); __ bind(&true_case); - SetExpressionPosition(expr->then_expression(), - expr->then_expression_position()); + SetExpressionPosition(expr->then_expression()); if (context()->IsTest()) { const TestContext* for_test = TestContext::cast(context()); VisitForControl(expr->then_expression(), @@ -1537,8 +1543,7 @@ void FullCodeGenerator::VisitConditional(Conditional* expr) { PrepareForBailoutForId(expr->ElseId(), NO_REGISTERS); __ bind(&false_case); - SetExpressionPosition(expr->else_expression(), - expr->else_expression_position()); + SetExpressionPosition(expr->else_expression()); VisitInDuplicateContext(expr->else_expression()); // If control flow falls through Visit, merge it with true case here. if (!context()->IsTest()) { @@ -1567,10 +1572,33 @@ void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) { } -void FullCodeGenerator::VisitSharedFunctionInfoLiteral( - SharedFunctionInfoLiteral* expr) { - Comment cmnt(masm_, "[ SharedFunctionInfoLiteral"); - EmitNewClosure(expr->shared_function_info(), false); +void FullCodeGenerator::VisitNativeFunctionLiteral( + NativeFunctionLiteral* expr) { + Comment cmnt(masm_, "[ NativeFunctionLiteral"); + + // Compute the function template for the native function. + Handle<String> name = expr->name(); + v8::Handle<v8::FunctionTemplate> fun_template = + expr->extension()->GetNativeFunction(v8::Utils::ToLocal(name)); + ASSERT(!fun_template.IsEmpty()); + + // Instantiate the function and create a shared function info from it. + Handle<JSFunction> fun = Utils::OpenHandle(*fun_template->GetFunction()); + const int literals = fun->NumberOfLiterals(); + Handle<Code> code = Handle<Code>(fun->shared()->code()); + Handle<Code> construct_stub = Handle<Code>(fun->shared()->construct_stub()); + bool is_generator = false; + Handle<SharedFunctionInfo> shared = + isolate()->factory()->NewSharedFunctionInfo(name, literals, is_generator, + code, Handle<ScopeInfo>(fun->shared()->scope_info())); + shared->set_construct_stub(*construct_stub); + + // Copy the function data to the shared function info. + shared->set_function_data(fun->shared()->function_data()); + int parameters = fun->shared()->formal_parameter_count(); + shared->set_formal_parameter_count(parameters); + + EmitNewClosure(shared, false); } @@ -1615,6 +1643,100 @@ bool FullCodeGenerator::TryLiteralCompare(CompareOperation* expr) { } +void BackEdgeTable::Patch(Isolate* isolate, + Code* unoptimized) { + DisallowHeapAllocation no_gc; + Code* patch = isolate->builtins()->builtin(Builtins::kOnStackReplacement); + + // Iterate over the back edge table and patch every interrupt + // call to an unconditional call to the replacement code. + int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level(); + + BackEdgeTable back_edges(unoptimized, &no_gc); + for (uint32_t i = 0; i < back_edges.length(); i++) { + if (static_cast<int>(back_edges.loop_depth(i)) == loop_nesting_level) { + ASSERT_EQ(INTERRUPT, GetBackEdgeState(isolate, + unoptimized, + back_edges.pc(i))); + PatchAt(unoptimized, back_edges.pc(i), ON_STACK_REPLACEMENT, patch); + } + } + + unoptimized->set_back_edges_patched_for_osr(true); + ASSERT(Verify(isolate, unoptimized, loop_nesting_level)); +} + + +void BackEdgeTable::Revert(Isolate* isolate, + Code* unoptimized) { + DisallowHeapAllocation no_gc; + Code* patch = isolate->builtins()->builtin(Builtins::kInterruptCheck); + + // Iterate over the back edge table and revert the patched interrupt calls. + ASSERT(unoptimized->back_edges_patched_for_osr()); + int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level(); + + BackEdgeTable back_edges(unoptimized, &no_gc); + for (uint32_t i = 0; i < back_edges.length(); i++) { + if (static_cast<int>(back_edges.loop_depth(i)) <= loop_nesting_level) { + ASSERT_NE(INTERRUPT, GetBackEdgeState(isolate, + unoptimized, + back_edges.pc(i))); + PatchAt(unoptimized, back_edges.pc(i), INTERRUPT, patch); + } + } + + unoptimized->set_back_edges_patched_for_osr(false); + unoptimized->set_allow_osr_at_loop_nesting_level(0); + // Assert that none of the back edges are patched anymore. + ASSERT(Verify(isolate, unoptimized, -1)); +} + + +void BackEdgeTable::AddStackCheck(CompilationInfo* info) { + DisallowHeapAllocation no_gc; + Isolate* isolate = info->isolate(); + Code* code = info->shared_info()->code(); + Address pc = code->instruction_start() + info->osr_pc_offset(); + ASSERT_EQ(ON_STACK_REPLACEMENT, GetBackEdgeState(isolate, code, pc)); + Code* patch = isolate->builtins()->builtin(Builtins::kOsrAfterStackCheck); + PatchAt(code, pc, OSR_AFTER_STACK_CHECK, patch); +} + + +void BackEdgeTable::RemoveStackCheck(CompilationInfo* info) { + DisallowHeapAllocation no_gc; + Isolate* isolate = info->isolate(); + Code* code = info->shared_info()->code(); + Address pc = code->instruction_start() + info->osr_pc_offset(); + if (GetBackEdgeState(isolate, code, pc) == OSR_AFTER_STACK_CHECK) { + Code* patch = isolate->builtins()->builtin(Builtins::kOnStackReplacement); + PatchAt(code, pc, ON_STACK_REPLACEMENT, patch); + } +} + + +#ifdef DEBUG +bool BackEdgeTable::Verify(Isolate* isolate, + Code* unoptimized, + int loop_nesting_level) { + DisallowHeapAllocation no_gc; + BackEdgeTable back_edges(unoptimized, &no_gc); + for (uint32_t i = 0; i < back_edges.length(); i++) { + uint32_t loop_depth = back_edges.loop_depth(i); + CHECK_LE(static_cast<int>(loop_depth), Code::kMaxLoopNestingMarker); + // Assert that all back edges for shallower loops (and only those) + // have already been patched. + CHECK_EQ((static_cast<int>(loop_depth) <= loop_nesting_level), + GetBackEdgeState(isolate, + unoptimized, + back_edges.pc(i)) != INTERRUPT); + } + return true; +} +#endif // DEBUG + + #undef __ diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h index 5580cb3e86..e27662e0e3 100644 --- a/deps/v8/src/full-codegen.h +++ b/deps/v8/src/full-codegen.h @@ -139,65 +139,6 @@ class FullCodeGenerator: public AstVisitor { #error Unsupported target architecture. #endif - class BackEdgeTableIterator { - public: - explicit BackEdgeTableIterator(Code* unoptimized, - DisallowHeapAllocation* required) { - ASSERT(unoptimized->kind() == Code::FUNCTION); - instruction_start_ = unoptimized->instruction_start(); - cursor_ = instruction_start_ + unoptimized->back_edge_table_offset(); - ASSERT(cursor_ < instruction_start_ + unoptimized->instruction_size()); - table_length_ = Memory::uint32_at(cursor_); - cursor_ += kTableLengthSize; - end_ = cursor_ + table_length_ * kEntrySize; - } - - bool Done() { return cursor_ >= end_; } - - void Next() { - ASSERT(!Done()); - cursor_ += kEntrySize; - } - - BailoutId ast_id() { - ASSERT(!Done()); - return BailoutId(static_cast<int>( - Memory::uint32_at(cursor_ + kAstIdOffset))); - } - - uint32_t loop_depth() { - ASSERT(!Done()); - return Memory::uint32_at(cursor_ + kLoopDepthOffset); - } - - uint32_t pc_offset() { - ASSERT(!Done()); - return Memory::uint32_at(cursor_ + kPcOffsetOffset); - } - - Address pc() { - ASSERT(!Done()); - return instruction_start_ + pc_offset(); - } - - uint32_t table_length() { return table_length_; } - - private: - static const int kTableLengthSize = kIntSize; - static const int kAstIdOffset = 0 * kIntSize; - static const int kPcOffsetOffset = 1 * kIntSize; - static const int kLoopDepthOffset = 2 * kIntSize; - static const int kEntrySize = 3 * kIntSize; - - Address cursor_; - Address end_; - Address instruction_start_; - uint32_t table_length_; - - DISALLOW_COPY_AND_ASSIGN(BackEdgeTableIterator); - }; - - private: class Breakable; class Iteration; @@ -635,7 +576,7 @@ class FullCodeGenerator: public AstVisitor { void SetFunctionPosition(FunctionLiteral* fun); void SetReturnPosition(FunctionLiteral* fun); void SetStatementPosition(Statement* stmt); - void SetExpressionPosition(Expression* expr, int pos); + void SetExpressionPosition(Expression* expr); void SetStatementPosition(int pos); void SetSourcePosition(int pos); @@ -940,6 +881,93 @@ class AccessorTable: public TemplateHashMap<Literal, }; +class BackEdgeTable { + public: + BackEdgeTable(Code* code, DisallowHeapAllocation* required) { + ASSERT(code->kind() == Code::FUNCTION); + instruction_start_ = code->instruction_start(); + Address table_address = instruction_start_ + code->back_edge_table_offset(); + length_ = Memory::uint32_at(table_address); + start_ = table_address + kTableLengthSize; + } + + uint32_t length() { return length_; } + + BailoutId ast_id(uint32_t index) { + return BailoutId(static_cast<int>( + Memory::uint32_at(entry_at(index) + kAstIdOffset))); + } + + uint32_t loop_depth(uint32_t index) { + return Memory::uint32_at(entry_at(index) + kLoopDepthOffset); + } + + uint32_t pc_offset(uint32_t index) { + return Memory::uint32_at(entry_at(index) + kPcOffsetOffset); + } + + Address pc(uint32_t index) { + return instruction_start_ + pc_offset(index); + } + + enum BackEdgeState { + INTERRUPT, + ON_STACK_REPLACEMENT, + OSR_AFTER_STACK_CHECK + }; + + // Patch all interrupts with allowed loop depth in the unoptimized code to + // unconditionally call replacement_code. + static void Patch(Isolate* isolate, + Code* unoptimized_code); + + // Patch the back edge to the target state, provided the correct callee. + static void PatchAt(Code* unoptimized_code, + Address pc, + BackEdgeState target_state, + Code* replacement_code); + + // Change all patched back edges back to normal interrupts. + static void Revert(Isolate* isolate, + Code* unoptimized_code); + + // Change a back edge patched for on-stack replacement to perform a + // stack check first. + static void AddStackCheck(CompilationInfo* info); + + // Remove the stack check, if available, and replace by on-stack replacement. + static void RemoveStackCheck(CompilationInfo* info); + + // Return the current patch state of the back edge. + static BackEdgeState GetBackEdgeState(Isolate* isolate, + Code* unoptimized_code, + Address pc_after); + +#ifdef DEBUG + // Verify that all back edges of a certain loop depth are patched. + static bool Verify(Isolate* isolate, + Code* unoptimized_code, + int loop_nesting_level); +#endif // DEBUG + + private: + Address entry_at(uint32_t index) { + ASSERT(index < length_); + return start_ + index * kEntrySize; + } + + static const int kTableLengthSize = kIntSize; + static const int kAstIdOffset = 0 * kIntSize; + static const int kPcOffsetOffset = 1 * kIntSize; + static const int kLoopDepthOffset = 2 * kIntSize; + static const int kEntrySize = 3 * kIntSize; + + Address start_; + Address instruction_start_; + uint32_t length_; +}; + + } } // namespace v8::internal #endif // V8_FULL_CODEGEN_H_ diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc index 1a98e49ff3..2ebe1c0088 100644 --- a/deps/v8/src/global-handles.cc +++ b/deps/v8/src/global-handles.cc @@ -79,7 +79,7 @@ class GlobalHandles::Node { Internals::kNodeIsPartiallyDependentShift); } -#ifdef ENABLE_EXTRA_CHECKS +#ifdef ENABLE_HANDLE_ZAPPING ~Node() { // TODO(1428): if it's a weak handle we should have invoked its callback. // Zap the values for eager trapping. diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h index 1977e68c82..3456030b7e 100644 --- a/deps/v8/src/globals.h +++ b/deps/v8/src/globals.h @@ -232,6 +232,8 @@ const uint32_t kMaxUInt32 = 0xFFFFFFFFu; const int kCharSize = sizeof(char); // NOLINT const int kShortSize = sizeof(short); // NOLINT const int kIntSize = sizeof(int); // NOLINT +const int kInt32Size = sizeof(int32_t); // NOLINT +const int kInt64Size = sizeof(int64_t); // NOLINT const int kDoubleSize = sizeof(double); // NOLINT const int kIntptrSize = sizeof(intptr_t); // NOLINT const int kPointerSize = sizeof(void*); // NOLINT @@ -248,10 +250,12 @@ const int kRandomStateSize = 2 * kIntSize; const int kPointerSizeLog2 = 3; const intptr_t kIntptrSignBit = V8_INT64_C(0x8000000000000000); const uintptr_t kUintptrAllBitsSet = V8_UINT64_C(0xFFFFFFFFFFFFFFFF); +const bool kIs64BitArch = true; #else const int kPointerSizeLog2 = 2; const intptr_t kIntptrSignBit = 0x80000000; const uintptr_t kUintptrAllBitsSet = 0xFFFFFFFFu; +const bool kIs64BitArch = false; #endif const int kBitsPerByte = 8; @@ -354,7 +358,7 @@ F FUNCTION_CAST(Address addr) { // Define DISABLE_ASAN macros. #if defined(__has_feature) #if __has_feature(address_sanitizer) -#define DISABLE_ASAN __attribute__((no_address_safety_analysis)) +#define DISABLE_ASAN __attribute__((no_sanitize_address)) #endif #endif diff --git a/deps/v8/src/handles-inl.h b/deps/v8/src/handles-inl.h index 5b879d8f08..ec69c3fdbe 100644 --- a/deps/v8/src/handles-inl.h +++ b/deps/v8/src/handles-inl.h @@ -130,16 +130,17 @@ void HandleScope::CloseScope(Isolate* isolate, v8::ImplementationUtilities::HandleScopeData* current = isolate->handle_scope_data(); - current->next = prev_next; + std::swap(current->next, prev_next); current->level--; if (current->limit != prev_limit) { current->limit = prev_limit; DeleteExtensions(isolate); - } - -#ifdef ENABLE_EXTRA_CHECKS - ZapRange(prev_next, prev_limit); +#ifdef ENABLE_HANDLE_ZAPPING + ZapRange(current->next, prev_limit); + } else { + ZapRange(current->next, prev_next); #endif + } } diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc index b3704df698..4cb1827d8e 100644 --- a/deps/v8/src/handles.cc +++ b/deps/v8/src/handles.cc @@ -101,7 +101,7 @@ void HandleScope::DeleteExtensions(Isolate* isolate) { } -#ifdef ENABLE_EXTRA_CHECKS +#ifdef ENABLE_HANDLE_ZAPPING void HandleScope::ZapRange(Object** start, Object** end) { ASSERT(end - start <= kHandleBlockSize); for (Object** p = start; p != end; p++) { @@ -150,54 +150,6 @@ Handle<JSGlobalProxy> ReinitializeJSGlobalProxy( } -void SetExpectedNofProperties(Handle<JSFunction> func, int nof) { - // If objects constructed from this function exist then changing - // 'estimated_nof_properties' is dangerous since the previous value might - // have been compiled into the fast construct stub. More over, the inobject - // slack tracking logic might have adjusted the previous value, so even - // passing the same value is risky. - if (func->shared()->live_objects_may_exist()) return; - - func->shared()->set_expected_nof_properties(nof); - if (func->has_initial_map()) { - Handle<Map> new_initial_map = - func->GetIsolate()->factory()->CopyMap( - Handle<Map>(func->initial_map())); - new_initial_map->set_unused_property_fields(nof); - func->set_initial_map(*new_initial_map); - } -} - - -static int ExpectedNofPropertiesFromEstimate(int estimate) { - // If no properties are added in the constructor, they are more likely - // to be added later. - if (estimate == 0) estimate = 2; - - // We do not shrink objects that go into a snapshot (yet), so we adjust - // the estimate conservatively. - if (Serializer::enabled()) return estimate + 2; - - // Inobject slack tracking will reclaim redundant inobject space later, - // so we can afford to adjust the estimate generously. - if (FLAG_clever_optimizations) { - return estimate + 8; - } else { - return estimate + 3; - } -} - - -void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared, - int estimate) { - // See the comment in SetExpectedNofProperties. - if (shared->live_objects_may_exist()) return; - - shared->set_expected_nof_properties( - ExpectedNofPropertiesFromEstimate(estimate)); -} - - void FlattenString(Handle<String> string) { CALL_HEAP_FUNCTION_VOID(string->GetIsolate(), string->TryFlatten()); } @@ -285,30 +237,6 @@ Handle<Object> LookupSingleCharacterStringFromCode(Isolate* isolate, } -Handle<String> SubString(Handle<String> str, - int start, - int end, - PretenureFlag pretenure) { - CALL_HEAP_FUNCTION(str->GetIsolate(), - str->SubString(start, end, pretenure), String); -} - - -Handle<JSObject> Copy(Handle<JSObject> obj) { - Isolate* isolate = obj->GetIsolate(); - CALL_HEAP_FUNCTION(isolate, - isolate->heap()->CopyJSObject(*obj), JSObject); -} - - -Handle<JSObject> DeepCopy(Handle<JSObject> obj) { - Isolate* isolate = obj->GetIsolate(); - CALL_HEAP_FUNCTION(isolate, - obj->DeepCopy(isolate), - JSObject); -} - - // Wrappers for scripts are kept alive and cached in weak global // handles referred from foreign objects held by the scripts as long as // they are used. When they are not used anymore, the garbage @@ -905,4 +833,15 @@ DeferredHandles* DeferredHandleScope::Detach() { } +void AddWeakObjectToCodeDependency(Heap* heap, + Handle<Object> object, + Handle<Code> code) { + heap->EnsureWeakObjectToCodeTable(); + Handle<DependentCode> dep(heap->LookupWeakObjectToCodeDependency(*object)); + dep = DependentCode::Insert(dep, DependentCode::kWeaklyEmbeddedGroup, code); + CALL_HEAP_FUNCTION_VOID(heap->isolate(), + heap->AddWeakObjectToCodeDependency(*object, *dep)); +} + + } } // namespace v8::internal diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h index c3e4dca1a6..cfdecac190 100644 --- a/deps/v8/src/handles.h +++ b/deps/v8/src/handles.h @@ -177,7 +177,7 @@ class HandleScope { // Extend the handle scope making room for more handles. static internal::Object** Extend(Isolate* isolate); -#ifdef ENABLE_EXTRA_CHECKS +#ifdef ENABLE_HANDLE_ZAPPING // Zaps the handles in the half-open interval [start, end). static void ZapRange(Object** start, Object** end); #endif @@ -255,10 +255,6 @@ Handle<Object> GetProperty(Isolate* isolate, Handle<Object> LookupSingleCharacterStringFromCode(Isolate* isolate, uint32_t index); -Handle<JSObject> Copy(Handle<JSObject> obj); - -Handle<JSObject> DeepCopy(Handle<JSObject> obj); - Handle<FixedArray> AddKeysFromJSArray(Handle<FixedArray>, Handle<JSArray> array); @@ -303,19 +299,6 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object, Handle<FixedArray> UnionOfKeys(Handle<FixedArray> first, Handle<FixedArray> second); -Handle<String> SubString(Handle<String> str, - int start, - int end, - PretenureFlag pretenure = NOT_TENURED); - -// Sets the expected number of properties for the function's instances. -void SetExpectedNofProperties(Handle<JSFunction> func, int nof); - -// Sets the expected number of properties based on estimate from compiler. -void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared, - int estimate); - - Handle<JSGlobalProxy> ReinitializeJSGlobalProxy( Handle<JSFunction> constructor, Handle<JSGlobalProxy> global); @@ -330,6 +313,9 @@ Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table, Handle<Object> key, Handle<Object> value); +void AddWeakObjectToCodeDependency(Heap* heap, + Handle<Object> object, + Handle<Code> code); // Seal off the current HandleScope so that new handles can only be created // if a new HandleScope is entered. diff --git a/deps/v8/src/harmony-math.js b/deps/v8/src/harmony-math.js new file mode 100644 index 0000000000..a4d3f2e8a5 --- /dev/null +++ b/deps/v8/src/harmony-math.js @@ -0,0 +1,60 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +'use strict'; + +// ES6 draft 09-27-13, section 20.2.2.28. +function MathSign(x) { + x = TO_NUMBER_INLINE(x); + if (x > 0) return 1; + if (x < 0) return -1; + if (x === 0) return x; + return NAN; +} + + +// ES6 draft 09-27-13, section 20.2.2.34. +function MathTrunc(x) { + x = TO_NUMBER_INLINE(x); + if (x > 0) return MathFloor(x); + if (x < 0) return MathCeil(x); + if (x === 0) return x; + return NAN; +} + + +function ExtendMath() { + %CheckIsBootstrapping(); + + // Set up the non-enumerable functions on the Math object. + InstallFunctions($Math, DONT_ENUM, $Array( + "sign", MathSign, + "trunc", MathTrunc + )); +} + +ExtendMath(); diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h index 4f1960386a..ad6f44f935 100644 --- a/deps/v8/src/heap-inl.h +++ b/deps/v8/src/heap-inl.h @@ -140,12 +140,11 @@ MaybeObject* Heap::AllocateOneByteInternalizedString(Vector<const uint8_t> str, // Compute map and object size. Map* map = ascii_internalized_string_map(); int size = SeqOneByteString::SizeFor(str.length()); + AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED); // Allocate string. Object* result; - { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize) - ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE) - : old_data_space_->AllocateRaw(size); + { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE); if (!maybe_result->ToObject(&result)) return maybe_result; } @@ -174,12 +173,11 @@ MaybeObject* Heap::AllocateTwoByteInternalizedString(Vector<const uc16> str, // Compute map and object size. Map* map = internalized_string_map(); int size = SeqTwoByteString::SizeFor(str.length()); + AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED); // Allocate string. Object* result; - { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize) - ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE) - : old_data_space_->AllocateRaw(size); + { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE); if (!maybe_result->ToObject(&result)) return maybe_result; } @@ -208,10 +206,17 @@ MaybeObject* Heap::CopyFixedDoubleArray(FixedDoubleArray* src) { } +MaybeObject* Heap::CopyConstantPoolArray(ConstantPoolArray* src) { + return CopyConstantPoolArrayWithMap(src, src->map()); +} + + MaybeObject* Heap::AllocateRaw(int size_in_bytes, AllocationSpace space, AllocationSpace retry_space) { - ASSERT(AllowHandleAllocation::IsAllowed() && gc_state_ == NOT_IN_GC); + ASSERT(AllowHandleAllocation::IsAllowed()); + ASSERT(AllowHeapAllocation::IsAllowed()); + ASSERT(gc_state_ == NOT_IN_GC); ASSERT(space != NEW_SPACE || retry_space == OLD_POINTER_SPACE || retry_space == OLD_DATA_SPACE || @@ -291,40 +296,6 @@ void Heap::FinalizeExternalString(String* string) { } -MaybeObject* Heap::AllocateRawMap() { -#ifdef DEBUG - isolate_->counters()->objs_since_last_full()->Increment(); - isolate_->counters()->objs_since_last_young()->Increment(); -#endif - MaybeObject* result = map_space_->AllocateRaw(Map::kSize); - if (result->IsFailure()) old_gen_exhausted_ = true; - return result; -} - - -MaybeObject* Heap::AllocateRawCell() { -#ifdef DEBUG - isolate_->counters()->objs_since_last_full()->Increment(); - isolate_->counters()->objs_since_last_young()->Increment(); -#endif - MaybeObject* result = cell_space_->AllocateRaw(Cell::kSize); - if (result->IsFailure()) old_gen_exhausted_ = true; - return result; -} - - -MaybeObject* Heap::AllocateRawPropertyCell() { -#ifdef DEBUG - isolate_->counters()->objs_since_last_full()->Increment(); - isolate_->counters()->objs_since_last_young()->Increment(); -#endif - MaybeObject* result = - property_cell_space_->AllocateRaw(PropertyCell::kSize); - if (result->IsFailure()) old_gen_exhausted_ = true; - return result; -} - - bool Heap::InNewSpace(Object* object) { bool result = new_space_.Contains(object); ASSERT(!result || // Either not in new space @@ -525,6 +496,13 @@ void Heap::ScavengeObject(HeapObject** p, HeapObject* object) { return; } + if (FLAG_trace_track_allocation_sites && object->IsJSObject()) { + if (AllocationMemento::FindForJSObject(JSObject::cast(object), true) != + NULL) { + object->GetIsolate()->heap()->allocation_mementos_found_++; + } + } + // AllocationMementos are unrooted and shouldn't survive a scavenge ASSERT(object->map() != object->GetHeap()->allocation_memento_map()); // Call the slow part of scavenge object. @@ -532,14 +510,6 @@ void Heap::ScavengeObject(HeapObject** p, HeapObject* object) { } -MaybeObject* Heap::AllocateEmptyJSArrayWithAllocationSite( - ElementsKind elements_kind, - Handle<AllocationSite> allocation_site) { - return AllocateJSArrayAndStorageWithAllocationSite(elements_kind, 0, 0, - allocation_site, DONT_INITIALIZE_ARRAY_ELEMENTS); -} - - bool Heap::CollectGarbage(AllocationSpace space, const char* gc_reason) { const char* collector_reason = NULL; GarbageCollector collector = SelectGarbageCollector(space, &collector_reason); @@ -847,15 +817,15 @@ AlwaysAllocateScope::~AlwaysAllocateScope() { #ifdef VERIFY_HEAP -NoWeakEmbeddedMapsVerificationScope::NoWeakEmbeddedMapsVerificationScope() { +NoWeakObjectVerificationScope::NoWeakObjectVerificationScope() { Isolate* isolate = Isolate::Current(); - isolate->heap()->no_weak_embedded_maps_verification_scope_depth_++; + isolate->heap()->no_weak_object_verification_scope_depth_++; } -NoWeakEmbeddedMapsVerificationScope::~NoWeakEmbeddedMapsVerificationScope() { +NoWeakObjectVerificationScope::~NoWeakObjectVerificationScope() { Isolate* isolate = Isolate::Current(); - isolate->heap()->no_weak_embedded_maps_verification_scope_depth_--; + isolate->heap()->no_weak_object_verification_scope_depth_--; } #endif diff --git a/deps/v8/src/heap-profiler.cc b/deps/v8/src/heap-profiler.cc index e66af3364d..6b159a98a3 100644 --- a/deps/v8/src/heap-profiler.cc +++ b/deps/v8/src/heap-profiler.cc @@ -27,6 +27,7 @@ #include "v8.h" +#include "deoptimizer.h" #include "heap-profiler.h" #include "heap-snapshot-generator-inl.h" @@ -35,7 +36,8 @@ namespace internal { HeapProfiler::HeapProfiler(Heap* heap) : snapshots_(new HeapSnapshotsCollection(heap)), - next_snapshot_uid_(1) { + next_snapshot_uid_(1), + is_tracking_allocations_(false) { } @@ -132,14 +134,86 @@ SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle<Object> obj) { } -void HeapProfiler::ObjectMoveEvent(Address from, Address to) { - snapshots_->ObjectMoveEvent(from, to); +void HeapProfiler::ObjectMoveEvent(Address from, Address to, int size) { + snapshots_->ObjectMoveEvent(from, to, size); } + +void HeapProfiler::NewObjectEvent(Address addr, int size) { + snapshots_->NewObjectEvent(addr, size); +} + + +void HeapProfiler::UpdateObjectSizeEvent(Address addr, int size) { + snapshots_->UpdateObjectSizeEvent(addr, size); +} + + void HeapProfiler::SetRetainedObjectInfo(UniqueId id, RetainedObjectInfo* info) { // TODO(yurus, marja): Don't route this information through GlobalHandles. heap()->isolate()->global_handles()->SetRetainedObjectInfo(id, info); } + +void HeapProfiler::StartHeapAllocationsRecording() { + StartHeapObjectsTracking(); + is_tracking_allocations_ = true; + DropCompiledCode(); + snapshots_->UpdateHeapObjectsMap(); +} + + +void HeapProfiler::StopHeapAllocationsRecording() { + StopHeapObjectsTracking(); + is_tracking_allocations_ = false; + DropCompiledCode(); +} + + +void HeapProfiler::RecordObjectAllocationFromMasm(Isolate* isolate, + Address obj, + int size) { + isolate->heap_profiler()->NewObjectEvent(obj, size); +} + + +void HeapProfiler::DropCompiledCode() { + Isolate* isolate = heap()->isolate(); + HandleScope scope(isolate); + + if (FLAG_concurrent_recompilation) { + isolate->optimizing_compiler_thread()->Flush(); + } + + Deoptimizer::DeoptimizeAll(isolate); + + Handle<Code> lazy_compile = + Handle<Code>(isolate->builtins()->builtin(Builtins::kLazyCompile)); + + heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask, + "switch allocations tracking"); + + DisallowHeapAllocation no_allocation; + + HeapIterator iterator(heap()); + HeapObject* obj = NULL; + while (((obj = iterator.next()) != NULL)) { + if (obj->IsJSFunction()) { + JSFunction* function = JSFunction::cast(obj); + SharedFunctionInfo* shared = function->shared(); + + if (!shared->allows_lazy_compilation()) continue; + if (!shared->script()->IsScript()) continue; + + Code::Kind kind = function->code()->kind(); + if (kind == Code::FUNCTION || kind == Code::BUILTIN) { + function->set_code(*lazy_compile); + shared->set_code(*lazy_compile); + } + } + } +} + + } } // namespace v8::internal diff --git a/deps/v8/src/heap-profiler.h b/deps/v8/src/heap-profiler.h index 5ae60fa923..74002278d4 100644 --- a/deps/v8/src/heap-profiler.h +++ b/deps/v8/src/heap-profiler.h @@ -37,14 +37,6 @@ namespace internal { class HeapSnapshot; class HeapSnapshotsCollection; -#define HEAP_PROFILE(heap, call) \ - do { \ - v8::internal::HeapProfiler* profiler = heap->isolate()->heap_profiler(); \ - if (profiler != NULL && profiler->is_profiling()) { \ - profiler->call; \ - } \ - } while (false) - class HeapProfiler { public: explicit HeapProfiler(Heap* heap); @@ -63,13 +55,22 @@ class HeapProfiler { void StartHeapObjectsTracking(); void StopHeapObjectsTracking(); + + static void RecordObjectAllocationFromMasm(Isolate* isolate, + Address obj, + int size); + SnapshotObjectId PushHeapObjectsStats(OutputStream* stream); int GetSnapshotsCount(); HeapSnapshot* GetSnapshot(int index); SnapshotObjectId GetSnapshotObjectId(Handle<Object> obj); void DeleteAllSnapshots(); - void ObjectMoveEvent(Address from, Address to); + void ObjectMoveEvent(Address from, Address to, int size); + + void NewObjectEvent(Address addr, int size); + + void UpdateObjectSizeEvent(Address addr, int size); void DefineWrapperClass( uint16_t class_id, v8::HeapProfiler::WrapperInfoCallback callback); @@ -82,12 +83,26 @@ class HeapProfiler { void SetRetainedObjectInfo(UniqueId id, RetainedObjectInfo* info); + bool is_tracking_allocations() { + return is_tracking_allocations_; + } + + void StartHeapAllocationsRecording(); + void StopHeapAllocationsRecording(); + + int FindUntrackedObjects() { + return snapshots_->FindUntrackedObjects(); + } + + void DropCompiledCode(); + private: Heap* heap() const { return snapshots_->heap(); } HeapSnapshotsCollection* snapshots_; unsigned next_snapshot_uid_; List<v8::HeapProfiler::WrapperInfoCallback> wrapper_callbacks_; + bool is_tracking_allocations_; }; } } // namespace v8::internal diff --git a/deps/v8/src/heap-snapshot-generator.cc b/deps/v8/src/heap-snapshot-generator.cc index bd47eec63b..10d113c3d1 100644 --- a/deps/v8/src/heap-snapshot-generator.cc +++ b/deps/v8/src/heap-snapshot-generator.cc @@ -29,6 +29,7 @@ #include "heap-snapshot-generator-inl.h" +#include "allocation-tracker.h" #include "heap-profiler.h" #include "debug.h" #include "types.h" @@ -397,7 +398,7 @@ void HeapObjectsMap::SnapshotGenerationFinished() { } -void HeapObjectsMap::MoveObject(Address from, Address to) { +void HeapObjectsMap::MoveObject(Address from, Address to, int object_size) { ASSERT(to != NULL); ASSERT(from != NULL); if (from == to) return; @@ -428,11 +429,39 @@ void HeapObjectsMap::MoveObject(Address from, Address to) { int from_entry_info_index = static_cast<int>(reinterpret_cast<intptr_t>(from_value)); entries_.at(from_entry_info_index).addr = to; + // Size of an object can change during its life, so to keep information + // about the object in entries_ consistent, we have to adjust size when the + // object is migrated. + if (FLAG_heap_profiler_trace_objects) { + PrintF("Move object from %p to %p old size %6d new size %6d\n", + from, + to, + entries_.at(from_entry_info_index).size, + object_size); + } + entries_.at(from_entry_info_index).size = object_size; to_entry->value = from_value; } } +void HeapObjectsMap::NewObject(Address addr, int size) { + if (FLAG_heap_profiler_trace_objects) { + PrintF("New object : %p %6d. Next address is %p\n", + addr, + size, + addr + size); + } + ASSERT(addr != NULL); + FindOrAddEntry(addr, size, false); +} + + +void HeapObjectsMap::UpdateObjectSize(Address addr, int size) { + FindOrAddEntry(addr, size, false); +} + + SnapshotObjectId HeapObjectsMap::FindEntry(Address addr) { HashMap::Entry* entry = entries_map_.Lookup(addr, ComputePointerHash(addr), false); @@ -445,7 +474,8 @@ SnapshotObjectId HeapObjectsMap::FindEntry(Address addr) { SnapshotObjectId HeapObjectsMap::FindOrAddEntry(Address addr, - unsigned int size) { + unsigned int size, + bool accessed) { ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy()); HashMap::Entry* entry = entries_map_.Lookup(addr, ComputePointerHash(addr), true); @@ -453,14 +483,20 @@ SnapshotObjectId HeapObjectsMap::FindOrAddEntry(Address addr, int entry_index = static_cast<int>(reinterpret_cast<intptr_t>(entry->value)); EntryInfo& entry_info = entries_.at(entry_index); - entry_info.accessed = true; + entry_info.accessed = accessed; + if (FLAG_heap_profiler_trace_objects) { + PrintF("Update object size : %p with old size %d and new size %d\n", + addr, + entry_info.size, + size); + } entry_info.size = size; return entry_info.id; } entry->value = reinterpret_cast<void*>(entries_.length()); SnapshotObjectId id = next_id_; next_id_ += kObjectIdStep; - entries_.Add(EntryInfo(id, addr, size)); + entries_.Add(EntryInfo(id, addr, size, accessed)); ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy()); return id; } @@ -472,6 +508,10 @@ void HeapObjectsMap::StopHeapObjectsTracking() { void HeapObjectsMap::UpdateHeapObjectsMap() { + if (FLAG_heap_profiler_trace_objects) { + PrintF("Begin HeapObjectsMap::UpdateHeapObjectsMap. map has %d entries.\n", + entries_map_.occupancy()); + } heap_->CollectAllGarbage(Heap::kMakeHeapIterableMask, "HeapSnapshotsCollection::UpdateHeapObjectsMap"); HeapIterator iterator(heap_); @@ -479,8 +519,129 @@ void HeapObjectsMap::UpdateHeapObjectsMap() { obj != NULL; obj = iterator.next()) { FindOrAddEntry(obj->address(), obj->Size()); + if (FLAG_heap_profiler_trace_objects) { + PrintF("Update object : %p %6d. Next address is %p\n", + obj->address(), + obj->Size(), + obj->address() + obj->Size()); + } } RemoveDeadEntries(); + if (FLAG_heap_profiler_trace_objects) { + PrintF("End HeapObjectsMap::UpdateHeapObjectsMap. map has %d entries.\n", + entries_map_.occupancy()); + } +} + + +namespace { + + +struct HeapObjectInfo { + HeapObjectInfo(HeapObject* obj, int expected_size) + : obj(obj), + expected_size(expected_size) { + } + + HeapObject* obj; + int expected_size; + + bool IsValid() const { return expected_size == obj->Size(); } + + void Print() const { + if (expected_size == 0) { + PrintF("Untracked object : %p %6d. Next address is %p\n", + obj->address(), + obj->Size(), + obj->address() + obj->Size()); + } else if (obj->Size() != expected_size) { + PrintF("Wrong size %6d: %p %6d. Next address is %p\n", + expected_size, + obj->address(), + obj->Size(), + obj->address() + obj->Size()); + } else { + PrintF("Good object : %p %6d. Next address is %p\n", + obj->address(), + expected_size, + obj->address() + obj->Size()); + } + } +}; + + +static int comparator(const HeapObjectInfo* a, const HeapObjectInfo* b) { + if (a->obj < b->obj) return -1; + if (a->obj > b->obj) return 1; + return 0; +} + + +} // namespace + + +int HeapObjectsMap::FindUntrackedObjects() { + List<HeapObjectInfo> heap_objects(1000); + + HeapIterator iterator(heap_); + int untracked = 0; + for (HeapObject* obj = iterator.next(); + obj != NULL; + obj = iterator.next()) { + HashMap::Entry* entry = entries_map_.Lookup( + obj->address(), ComputePointerHash(obj->address()), false); + if (entry == NULL) { + ++untracked; + if (FLAG_heap_profiler_trace_objects) { + heap_objects.Add(HeapObjectInfo(obj, 0)); + } + } else { + int entry_index = static_cast<int>( + reinterpret_cast<intptr_t>(entry->value)); + EntryInfo& entry_info = entries_.at(entry_index); + if (FLAG_heap_profiler_trace_objects) { + heap_objects.Add(HeapObjectInfo(obj, + static_cast<int>(entry_info.size))); + if (obj->Size() != static_cast<int>(entry_info.size)) + ++untracked; + } else { + CHECK_EQ(obj->Size(), static_cast<int>(entry_info.size)); + } + } + } + if (FLAG_heap_profiler_trace_objects) { + PrintF("\nBegin HeapObjectsMap::FindUntrackedObjects. %d entries in map.\n", + entries_map_.occupancy()); + heap_objects.Sort(comparator); + int last_printed_object = -1; + bool print_next_object = false; + for (int i = 0; i < heap_objects.length(); ++i) { + const HeapObjectInfo& object_info = heap_objects[i]; + if (!object_info.IsValid()) { + ++untracked; + if (last_printed_object != i - 1) { + if (i > 0) { + PrintF("%d objects were skipped\n", i - 1 - last_printed_object); + heap_objects[i - 1].Print(); + } + } + object_info.Print(); + last_printed_object = i; + print_next_object = true; + } else if (print_next_object) { + object_info.Print(); + print_next_object = false; + last_printed_object = i; + } + } + if (last_printed_object < heap_objects.length() - 1) { + PrintF("Last %d objects were skipped\n", + heap_objects.length() - 1 - last_printed_object); + } + PrintF("End HeapObjectsMap::FindUntrackedObjects. %d entries in map.\n\n", + entries_map_.occupancy()); + } + return untracked; } @@ -587,7 +748,8 @@ size_t HeapObjectsMap::GetUsedMemorySize() const { HeapSnapshotsCollection::HeapSnapshotsCollection(Heap* heap) : is_tracking_objects_(false), names_(heap), - ids_(heap) { + ids_(heap), + allocation_tracker_(NULL) { } @@ -597,10 +759,29 @@ static void DeleteHeapSnapshot(HeapSnapshot** snapshot_ptr) { HeapSnapshotsCollection::~HeapSnapshotsCollection() { + delete allocation_tracker_; snapshots_.Iterate(DeleteHeapSnapshot); } +void HeapSnapshotsCollection::StartHeapObjectsTracking() { + ids_.UpdateHeapObjectsMap(); + if (allocation_tracker_ == NULL) { + allocation_tracker_ = new AllocationTracker(&ids_, names()); + } + is_tracking_objects_ = true; +} + + +void HeapSnapshotsCollection::StopHeapObjectsTracking() { + ids_.StopHeapObjectsTracking(); + if (allocation_tracker_ != NULL) { + delete allocation_tracker_; + allocation_tracker_ = NULL; + } +} + + HeapSnapshot* HeapSnapshotsCollection::NewSnapshot(const char* name, unsigned uid) { is_tracking_objects_ = true; // Start watching for heap objects moves. @@ -644,6 +825,15 @@ Handle<HeapObject> HeapSnapshotsCollection::FindHeapObjectById( } +void HeapSnapshotsCollection::NewObjectEvent(Address addr, int size) { + DisallowHeapAllocation no_allocation; + ids_.NewObject(addr, size); + if (allocation_tracker_ != NULL) { + allocation_tracker_->NewObjectEvent(addr, size); + } +} + + size_t HeapSnapshotsCollection::GetUsedMemorySize() const { size_t size = sizeof(*this); size += names_.GetUsedMemorySize(); @@ -1301,6 +1491,10 @@ void V8HeapExplorer::ExtractAllocationSiteReferences(int entry, AllocationSite* site) { SetInternalReference(site, entry, "transition_info", site->transition_info(), AllocationSite::kTransitionInfoOffset); + SetInternalReference(site, entry, "nested_site", site->nested_site(), + AllocationSite::kNestedSiteOffset); + SetInternalReference(site, entry, "dependent_code", site->dependent_code(), + AllocationSite::kDependentCodeOffset); } @@ -2438,6 +2632,10 @@ const int HeapSnapshotJSONSerializer::kEdgeFieldsCount = 3; const int HeapSnapshotJSONSerializer::kNodeFieldsCount = 5; void HeapSnapshotJSONSerializer::Serialize(v8::OutputStream* stream) { + if (AllocationTracker* allocation_tracker = + snapshot_->collection()->allocation_tracker()) { + allocation_tracker->PrepareForSerialization(); + } ASSERT(writer_ == NULL); writer_ = new OutputStreamWriter(stream); SerializeImpl(); @@ -2461,6 +2659,16 @@ void HeapSnapshotJSONSerializer::SerializeImpl() { SerializeEdges(); if (writer_->aborted()) return; writer_->AddString("],\n"); + + writer_->AddString("\"trace_function_infos\":["); + SerializeTraceNodeInfos(); + if (writer_->aborted()) return; + writer_->AddString("],\n"); + writer_->AddString("\"trace_tree\":["); + SerializeTraceTree(); + if (writer_->aborted()) return; + writer_->AddString("],\n"); + writer_->AddString("\"strings\":["); SerializeStrings(); if (writer_->aborted()) return; @@ -2472,7 +2680,7 @@ void HeapSnapshotJSONSerializer::SerializeImpl() { int HeapSnapshotJSONSerializer::GetStringId(const char* s) { HashMap::Entry* cache_entry = strings_.Lookup( - const_cast<char*>(s), ObjectHash(s), true); + const_cast<char*>(s), StringHash(s), true); if (cache_entry->value == NULL) { cache_entry->value = reinterpret_cast<void*>(next_string_id_++); } @@ -2621,7 +2829,20 @@ void HeapSnapshotJSONSerializer::SerializeSnapshot() { JSON_S("shortcut") "," JSON_S("weak")) "," JSON_S("string_or_number") "," - JSON_S("node")))); + JSON_S("node")) "," + JSON_S("trace_function_info_fields") ":" JSON_A( + JSON_S("function_id") "," + JSON_S("name") "," + JSON_S("script_name") "," + JSON_S("script_id") "," + JSON_S("line") "," + JSON_S("column")) "," + JSON_S("trace_node_fields") ":" JSON_A( + JSON_S("id") "," + JSON_S("function_id") "," + JSON_S("count") "," + JSON_S("size") "," + JSON_S("children")))); #undef JSON_S #undef JSON_O #undef JSON_A @@ -2629,6 +2850,13 @@ void HeapSnapshotJSONSerializer::SerializeSnapshot() { writer_->AddNumber(snapshot_->entries().length()); writer_->AddString(",\"edge_count\":"); writer_->AddNumber(snapshot_->edges().length()); + writer_->AddString(",\"trace_function_count\":"); + uint32_t count = 0; + AllocationTracker* tracker = snapshot_->collection()->allocation_tracker(); + if (tracker) { + count = tracker->id_to_function_info()->occupancy(); + } + writer_->AddNumber(count); } @@ -2642,6 +2870,100 @@ static void WriteUChar(OutputStreamWriter* w, unibrow::uchar u) { } +void HeapSnapshotJSONSerializer::SerializeTraceTree() { + AllocationTracker* tracker = snapshot_->collection()->allocation_tracker(); + if (!tracker) return; + AllocationTraceTree* traces = tracker->trace_tree(); + SerializeTraceNode(traces->root()); +} + + +void HeapSnapshotJSONSerializer::SerializeTraceNode(AllocationTraceNode* node) { + // The buffer needs space for 4 unsigned ints, 4 commas, [ and \0 + const int kBufferSize = + 4 * MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned // NOLINT + + 4 + 1 + 1; + EmbeddedVector<char, kBufferSize> buffer; + int buffer_pos = 0; + buffer_pos = utoa(node->id(), buffer, buffer_pos); + buffer[buffer_pos++] = ','; + buffer_pos = utoa(node->function_id(), buffer, buffer_pos); + buffer[buffer_pos++] = ','; + buffer_pos = utoa(node->allocation_count(), buffer, buffer_pos); + buffer[buffer_pos++] = ','; + buffer_pos = utoa(node->allocation_size(), buffer, buffer_pos); + buffer[buffer_pos++] = ','; + buffer[buffer_pos++] = '['; + buffer[buffer_pos++] = '\0'; + writer_->AddString(buffer.start()); + + Vector<AllocationTraceNode*> children = node->children(); + for (int i = 0; i < children.length(); i++) { + if (i > 0) { + writer_->AddCharacter(','); + } + SerializeTraceNode(children[i]); + } + writer_->AddCharacter(']'); +} + + +// 0-based position is converted to 1-based during the serialization. +static int SerializePosition(int position, const Vector<char>& buffer, + int buffer_pos) { + if (position == -1) { + buffer[buffer_pos++] = '0'; + } else { + ASSERT(position >= 0); + buffer_pos = utoa(static_cast<unsigned>(position + 1), buffer, buffer_pos); + } + return buffer_pos; +} + + +void HeapSnapshotJSONSerializer::SerializeTraceNodeInfos() { + AllocationTracker* tracker = snapshot_->collection()->allocation_tracker(); + if (!tracker) return; + // The buffer needs space for 6 unsigned ints, 6 commas, \n and \0 + const int kBufferSize = + 6 * MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned // NOLINT + + 6 + 1 + 1; + EmbeddedVector<char, kBufferSize> buffer; + HashMap* id_to_function_info = tracker->id_to_function_info(); + bool first_entry = true; + for (HashMap::Entry* p = id_to_function_info->Start(); + p != NULL; + p = id_to_function_info->Next(p)) { + SnapshotObjectId id = + static_cast<SnapshotObjectId>(reinterpret_cast<intptr_t>(p->key)); + AllocationTracker::FunctionInfo* info = + reinterpret_cast<AllocationTracker::FunctionInfo* >(p->value); + int buffer_pos = 0; + if (first_entry) { + first_entry = false; + } else { + buffer[buffer_pos++] = ','; + } + buffer_pos = utoa(id, buffer, buffer_pos); + buffer[buffer_pos++] = ','; + buffer_pos = utoa(GetStringId(info->name), buffer, buffer_pos); + buffer[buffer_pos++] = ','; + buffer_pos = utoa(GetStringId(info->script_name), buffer, buffer_pos); + buffer[buffer_pos++] = ','; + // The cast is safe because script id is a non-negative Smi. + buffer_pos = utoa(static_cast<unsigned>(info->script_id), buffer, + buffer_pos); + buffer[buffer_pos++] = ','; + buffer_pos = SerializePosition(info->line, buffer, buffer_pos); + buffer[buffer_pos++] = ','; + buffer_pos = SerializePosition(info->column, buffer, buffer_pos); + buffer[buffer_pos++] = '\n'; + buffer[buffer_pos++] = '\0'; + writer_->AddString(buffer.start()); + } +} + + void HeapSnapshotJSONSerializer::SerializeString(const unsigned char* s) { writer_->AddCharacter('\n'); writer_->AddCharacter('\"'); @@ -2693,37 +3015,21 @@ void HeapSnapshotJSONSerializer::SerializeString(const unsigned char* s) { void HeapSnapshotJSONSerializer::SerializeStrings() { - List<HashMap::Entry*> sorted_strings; - SortHashMap(&strings_, &sorted_strings); + ScopedVector<const unsigned char*> sorted_strings( + strings_.occupancy() + 1); + for (HashMap::Entry* entry = strings_.Start(); + entry != NULL; + entry = strings_.Next(entry)) { + int index = static_cast<int>(reinterpret_cast<uintptr_t>(entry->value)); + sorted_strings[index] = reinterpret_cast<const unsigned char*>(entry->key); + } writer_->AddString("\"<dummy>\""); - for (int i = 0; i < sorted_strings.length(); ++i) { + for (int i = 1; i < sorted_strings.length(); ++i) { writer_->AddCharacter(','); - SerializeString( - reinterpret_cast<const unsigned char*>(sorted_strings[i]->key)); + SerializeString(sorted_strings[i]); if (writer_->aborted()) return; } } -template<typename T> -inline static int SortUsingEntryValue(const T* x, const T* y) { - uintptr_t x_uint = reinterpret_cast<uintptr_t>((*x)->value); - uintptr_t y_uint = reinterpret_cast<uintptr_t>((*y)->value); - if (x_uint > y_uint) { - return 1; - } else if (x_uint == y_uint) { - return 0; - } else { - return -1; - } -} - - -void HeapSnapshotJSONSerializer::SortHashMap( - HashMap* map, List<HashMap::Entry*>* sorted_entries) { - for (HashMap::Entry* p = map->Start(); p != NULL; p = map->Next(p)) - sorted_entries->Add(p); - sorted_entries->Sort(SortUsingEntryValue); -} - } } // namespace v8::internal diff --git a/deps/v8/src/heap-snapshot-generator.h b/deps/v8/src/heap-snapshot-generator.h index 7b0cf8f021..e4038b10f4 100644 --- a/deps/v8/src/heap-snapshot-generator.h +++ b/deps/v8/src/heap-snapshot-generator.h @@ -33,6 +33,8 @@ namespace v8 { namespace internal { +class AllocationTracker; +class AllocationTraceNode; class HeapEntry; class HeapSnapshot; @@ -227,8 +229,12 @@ class HeapObjectsMap { void SnapshotGenerationFinished(); SnapshotObjectId FindEntry(Address addr); - SnapshotObjectId FindOrAddEntry(Address addr, unsigned int size); - void MoveObject(Address from, Address to); + SnapshotObjectId FindOrAddEntry(Address addr, + unsigned int size, + bool accessed = true); + void MoveObject(Address from, Address to, int size); + void NewObject(Address addr, int size); + void UpdateObjectSize(Address addr, int size); SnapshotObjectId last_assigned_id() const { return next_id_ - kObjectIdStep; } @@ -247,6 +253,10 @@ class HeapObjectsMap { static const SnapshotObjectId kGcRootsFirstSubrootId; static const SnapshotObjectId kFirstAvailableObjectId; + int FindUntrackedObjects(); + + void UpdateHeapObjectsMap(); + private: struct EntryInfo { EntryInfo(SnapshotObjectId id, Address addr, unsigned int size) @@ -265,7 +275,6 @@ class HeapObjectsMap { uint32_t count; }; - void UpdateHeapObjectsMap(); void RemoveDeadEntries(); SnapshotObjectId next_id_; @@ -289,8 +298,8 @@ class HeapSnapshotsCollection { SnapshotObjectId PushHeapObjectsStats(OutputStream* stream) { return ids_.PushHeapObjectsStats(stream); } - void StartHeapObjectsTracking() { is_tracking_objects_ = true; } - void StopHeapObjectsTracking() { ids_.StopHeapObjectsTracking(); } + void StartHeapObjectsTracking(); + void StopHeapObjectsTracking(); HeapSnapshot* NewSnapshot(const char* name, unsigned uid); void SnapshotGenerationFinished(HeapSnapshot* snapshot); @@ -298,6 +307,7 @@ class HeapSnapshotsCollection { void RemoveSnapshot(HeapSnapshot* snapshot); StringsStorage* names() { return &names_; } + AllocationTracker* allocation_tracker() { return allocation_tracker_; } SnapshotObjectId FindObjectId(Address object_addr) { return ids_.FindEntry(object_addr); @@ -306,18 +316,29 @@ class HeapSnapshotsCollection { return ids_.FindOrAddEntry(object_addr, object_size); } Handle<HeapObject> FindHeapObjectById(SnapshotObjectId id); - void ObjectMoveEvent(Address from, Address to) { ids_.MoveObject(from, to); } + void ObjectMoveEvent(Address from, Address to, int size) { + ids_.MoveObject(from, to, size); + } + void NewObjectEvent(Address addr, int size); + void UpdateObjectSizeEvent(Address addr, int size) { + ids_.UpdateObjectSize(addr, size); + } SnapshotObjectId last_assigned_id() const { return ids_.last_assigned_id(); } size_t GetUsedMemorySize() const; + int FindUntrackedObjects() { return ids_.FindUntrackedObjects(); } + + void UpdateHeapObjectsMap() { ids_.UpdateHeapObjectsMap(); } + private: bool is_tracking_objects_; // Whether tracking object moves is needed. List<HeapSnapshot*> snapshots_; StringsStorage names_; // Mapping from HeapObject addresses to objects' uids. HeapObjectsMap ids_; + AllocationTracker* allocation_tracker_; DISALLOW_COPY_AND_ASSIGN(HeapSnapshotsCollection); }; @@ -628,7 +649,7 @@ class HeapSnapshotJSONSerializer { public: explicit HeapSnapshotJSONSerializer(HeapSnapshot* snapshot) : snapshot_(snapshot), - strings_(ObjectsMatch), + strings_(StringsMatch), next_node_id_(1), next_string_id_(1), writer_(NULL) { @@ -636,14 +657,16 @@ class HeapSnapshotJSONSerializer { void Serialize(v8::OutputStream* stream); private: - INLINE(static bool ObjectsMatch(void* key1, void* key2)) { - return key1 == key2; + INLINE(static bool StringsMatch(void* key1, void* key2)) { + return strcmp(reinterpret_cast<char*>(key1), + reinterpret_cast<char*>(key2)) == 0; } - INLINE(static uint32_t ObjectHash(const void* key)) { - return ComputeIntegerHash( - static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)), - v8::internal::kZeroHashSeed); + INLINE(static uint32_t StringHash(const void* string)) { + const char* s = reinterpret_cast<const char*>(string); + int len = static_cast<int>(strlen(s)); + return StringHasher::HashSequentialString( + s, len, v8::internal::kZeroHashSeed); } int GetStringId(const char* s); @@ -654,9 +677,11 @@ class HeapSnapshotJSONSerializer { void SerializeNode(HeapEntry* entry); void SerializeNodes(); void SerializeSnapshot(); + void SerializeTraceTree(); + void SerializeTraceNode(AllocationTraceNode* node); + void SerializeTraceNodeInfos(); void SerializeString(const unsigned char* s); void SerializeStrings(); - void SortHashMap(HashMap* map, List<HashMap::Entry*>* sorted_entries); static const int kEdgeFieldsCount; static const int kNodeFieldsCount; diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc index 24e4039422..fa358c5392 100644 --- a/deps/v8/src/heap.cc +++ b/deps/v8/src/heap.cc @@ -67,29 +67,14 @@ namespace internal { Heap::Heap() : isolate_(NULL), + code_range_size_(kIs64BitArch ? 512 * MB : 0), // semispace_size_ should be a power of 2 and old_generation_size_ should be // a multiple of Page::kPageSize. -#if V8_TARGET_ARCH_X64 -#define LUMP_OF_MEMORY (2 * MB) - code_range_size_(512*MB), -#else -#define LUMP_OF_MEMORY MB - code_range_size_(0), -#endif -#if defined(ANDROID) || V8_TARGET_ARCH_MIPS - reserved_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)), - max_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)), - initial_semispace_size_(Page::kPageSize), - max_old_generation_size_(192*MB), - max_executable_size_(max_old_generation_size_), -#else - reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)), - max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)), + reserved_semispace_size_(8 * (kPointerSize / 4) * MB), + max_semispace_size_(8 * (kPointerSize / 4) * MB), initial_semispace_size_(Page::kPageSize), - max_old_generation_size_(700ul * LUMP_OF_MEMORY), - max_executable_size_(256l * LUMP_OF_MEMORY), -#endif - + max_old_generation_size_(700ul * (kPointerSize / 4) * MB), + max_executable_size_(256ul * (kPointerSize / 4) * MB), // Variables set based on semispace_size_ and old_generation_size_ in // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_) // Will be 4 * reserved_semispace_size_ to ensure that young @@ -101,6 +86,7 @@ Heap::Heap() contexts_disposed_(0), global_ic_age_(0), flush_monomorphic_ics_(false), + allocation_mementos_found_(0), scan_on_scavenge_pages_(0), new_space_(this), old_pointer_space_(NULL), @@ -129,8 +115,6 @@ Heap::Heap() old_gen_exhausted_(false), store_buffer_rebuilder_(store_buffer()), hidden_string_(NULL), - global_gc_prologue_callback_(NULL), - global_gc_epilogue_callback_(NULL), gc_safe_size_of_old_object_(NULL), total_regexp_code_generated_(0), tracer_(NULL), @@ -157,9 +141,11 @@ Heap::Heap() mark_sweeps_since_idle_round_started_(0), gc_count_at_last_idle_gc_(0), scavenges_since_last_idle_round_(kIdleScavengeThreshold), + full_codegen_bytes_generated_(0), + crankshaft_codegen_bytes_generated_(0), gcs_since_last_deopt_(0), #ifdef VERIFY_HEAP - no_weak_embedded_maps_verification_scope_depth_(0), + no_weak_object_verification_scope_depth_(0), #endif promotion_queue_(this), configured_(false), @@ -172,6 +158,9 @@ Heap::Heap() max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE; #endif + // Ensure old_generation_size_ is a multiple of kPageSize. + ASSERT(MB >= Page::kPageSize); + intptr_t max_virtual = OS::MaxVirtualMemory(); if (max_virtual > 0) { @@ -461,6 +450,10 @@ void Heap::GarbageCollectionPrologue() { #endif // DEBUG store_buffer()->GCPrologue(); + + if (FLAG_concurrent_osr) { + isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs(); + } } @@ -521,10 +514,31 @@ void Heap::GarbageCollectionEpilogue() { isolate_->counters()->number_of_symbols()->Set( string_table()->NumberOfElements()); + if (full_codegen_bytes_generated_ + crankshaft_codegen_bytes_generated_ > 0) { + isolate_->counters()->codegen_fraction_crankshaft()->AddSample( + static_cast<int>((crankshaft_codegen_bytes_generated_ * 100.0) / + (crankshaft_codegen_bytes_generated_ + + full_codegen_bytes_generated_))); + } + if (CommittedMemory() > 0) { isolate_->counters()->external_fragmentation_total()->AddSample( static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory())); + isolate_->counters()->heap_fraction_new_space()-> + AddSample(static_cast<int>( + (new_space()->CommittedMemory() * 100.0) / CommittedMemory())); + isolate_->counters()->heap_fraction_old_pointer_space()->AddSample( + static_cast<int>( + (old_pointer_space()->CommittedMemory() * 100.0) / + CommittedMemory())); + isolate_->counters()->heap_fraction_old_data_space()->AddSample( + static_cast<int>( + (old_data_space()->CommittedMemory() * 100.0) / + CommittedMemory())); + isolate_->counters()->heap_fraction_code_space()-> + AddSample(static_cast<int>( + (code_space()->CommittedMemory() * 100.0) / CommittedMemory())); isolate_->counters()->heap_fraction_map_space()->AddSample( static_cast<int>( (map_space()->CommittedMemory() * 100.0) / CommittedMemory())); @@ -535,6 +549,9 @@ void Heap::GarbageCollectionEpilogue() { AddSample(static_cast<int>( (property_cell_space()->CommittedMemory() * 100.0) / CommittedMemory())); + isolate_->counters()->heap_fraction_lo_space()-> + AddSample(static_cast<int>( + (lo_space()->CommittedMemory() * 100.0) / CommittedMemory())); isolate_->counters()->heap_sample_total_committed()->AddSample( static_cast<int>(CommittedMemory() / KB)); @@ -548,6 +565,8 @@ void Heap::GarbageCollectionEpilogue() { heap_sample_property_cell_space_committed()-> AddSample(static_cast<int>( property_cell_space()->CommittedMemory() / KB)); + isolate_->counters()->heap_sample_code_space_committed()->AddSample( + static_cast<int>(code_space()->CommittedMemory() / KB)); } #define UPDATE_COUNTERS_FOR_SPACE(space) \ @@ -610,6 +629,11 @@ void Heap::CollectAllAvailableGarbage(const char* gc_reason) { // Note: as weak callbacks can execute arbitrary code, we cannot // hope that eventually there will be no weak callbacks invocations. // Therefore stop recollecting after several attempts. + if (FLAG_concurrent_recompilation) { + // The optimizing compiler may be unnecessarily holding on to memory. + DisallowHeapAllocation no_recursive_gc; + isolate()->optimizing_compiler_thread()->Flush(); + } mark_compact_collector()->SetFlags(kMakeHeapIterableMask | kReduceMemoryFootprintMask); isolate_->compilation_cache()->Clear(); @@ -1055,12 +1079,17 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector, void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) { - if (gc_type == kGCTypeMarkSweepCompact && global_gc_prologue_callback_) { - global_gc_prologue_callback_(); - } for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) { if (gc_type & gc_prologue_callbacks_[i].gc_type) { - gc_prologue_callbacks_[i].callback(gc_type, flags); + if (!gc_prologue_callbacks_[i].pass_isolate_) { + v8::GCPrologueCallback callback = + reinterpret_cast<v8::GCPrologueCallback>( + gc_prologue_callbacks_[i].callback); + callback(gc_type, flags); + } else { + v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate()); + gc_prologue_callbacks_[i].callback(isolate, gc_type, flags); + } } } } @@ -1069,12 +1098,18 @@ void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) { void Heap::CallGCEpilogueCallbacks(GCType gc_type) { for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) { if (gc_type & gc_epilogue_callbacks_[i].gc_type) { - gc_epilogue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags); + if (!gc_epilogue_callbacks_[i].pass_isolate_) { + v8::GCPrologueCallback callback = + reinterpret_cast<v8::GCPrologueCallback>( + gc_epilogue_callbacks_[i].callback); + callback(gc_type, kNoGCCallbackFlags); + } else { + v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate()); + gc_epilogue_callbacks_[i].callback( + isolate, gc_type, kNoGCCallbackFlags); + } } } - if (gc_type == kGCTypeMarkSweepCompact && global_gc_epilogue_callback_) { - global_gc_epilogue_callback_(); - } } @@ -1326,6 +1361,8 @@ class ScavengeWeakObjectRetainer : public WeakObjectRetainer { void Heap::Scavenge() { RelocationLock relocation_lock(this); + allocation_mementos_found_ = 0; + #ifdef VERIFY_HEAP if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this); #endif @@ -1473,6 +1510,11 @@ void Heap::Scavenge() { gc_state_ = NOT_IN_GC; scavenges_since_last_idle_round_++; + + if (FLAG_trace_track_allocation_sites && allocation_mementos_found_ > 0) { + PrintF("AllocationMementos found during scavenge = %d\n", + allocation_mementos_found_); + } } @@ -1947,6 +1989,7 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor, STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == 0); +STATIC_ASSERT((ConstantPoolArray::kHeaderSize & kDoubleAlignmentMask) == 0); INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap, @@ -2091,8 +2134,12 @@ class ScavengingVisitor : public StaticVisitorBase { if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) { // Update NewSpace stats if necessary. RecordCopiedObject(heap, target); - HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address())); Isolate* isolate = heap->isolate(); + HeapProfiler* heap_profiler = isolate->heap_profiler(); + if (heap_profiler->is_profiling()) { + heap_profiler->ObjectMoveEvent(source->address(), target->address(), + size); + } if (isolate->logger()->is_logging_code_events() || isolate->cpu_profiler()->is_profiling()) { if (target->IsSharedFunctionInfo()) { @@ -2129,12 +2176,10 @@ class ScavengingVisitor : public StaticVisitorBase { MaybeObject* maybe_result; if (object_contents == DATA_OBJECT) { - // TODO(mstarzinger): Turn this check into a regular assert soon! - CHECK(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE)); + ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE)); maybe_result = heap->old_data_space()->AllocateRaw(allocation_size); } else { - // TODO(mstarzinger): Turn this check into a regular assert soon! - CHECK(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE)); + ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE)); maybe_result = heap->old_pointer_space()->AllocateRaw(allocation_size); } @@ -2165,8 +2210,7 @@ class ScavengingVisitor : public StaticVisitorBase { return; } } - // TODO(mstarzinger): Turn this check into a regular assert soon! - CHECK(heap->AllowedToBeMigrated(object, NEW_SPACE)); + ASSERT(heap->AllowedToBeMigrated(object, NEW_SPACE)); MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size); heap->promotion_queue()->SetNewLimit(heap->new_space()->top()); Object* result = allocation->ToObjectUnchecked(); @@ -2392,7 +2436,7 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) { MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type, int instance_size) { Object* result; - MaybeObject* maybe_result = AllocateRawMap(); + MaybeObject* maybe_result = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE); if (!maybe_result->ToObject(&result)) return maybe_result; // Map::cast cannot be used due to uninitialized map field. @@ -2417,7 +2461,7 @@ MaybeObject* Heap::AllocateMap(InstanceType instance_type, int instance_size, ElementsKind elements_kind) { Object* result; - MaybeObject* maybe_result = AllocateRawMap(); + MaybeObject* maybe_result = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE); if (!maybe_result->To(&result)) return maybe_result; Map* map = reinterpret_cast<Map*>(result); @@ -2650,6 +2694,12 @@ bool Heap::CreateInitialMaps() { set_fixed_double_array_map(Map::cast(obj)); { MaybeObject* maybe_obj = + AllocateMap(CONSTANT_POOL_ARRAY_TYPE, kVariableSizeSentinel); + if (!maybe_obj->ToObject(&obj)) return false; + } + set_constant_pool_array_map(Map::cast(obj)); + + { MaybeObject* maybe_obj = AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel); if (!maybe_obj->ToObject(&obj)) return false; } @@ -2887,12 +2937,12 @@ bool Heap::CreateInitialMaps() { MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) { // Statically ensure that it is safe to allocate heap numbers in paged // spaces. + int size = HeapNumber::kSize; STATIC_ASSERT(HeapNumber::kSize <= Page::kNonCodeObjectAreaSize); - AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE; + AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure); Object* result; - { MaybeObject* maybe_result = - AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE); + { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE); if (!maybe_result->ToObject(&result)) return maybe_result; } @@ -2902,26 +2952,12 @@ MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) { } -MaybeObject* Heap::AllocateHeapNumber(double value) { - // Use general version, if we're forced to always allocate. - if (always_allocate()) return AllocateHeapNumber(value, TENURED); - - // This version of AllocateHeapNumber is optimized for - // allocation in new space. - STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxNonCodeHeapObjectSize); - Object* result; - { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map()); - HeapNumber::cast(result)->set_value(value); - return result; -} - - MaybeObject* Heap::AllocateCell(Object* value) { + int size = Cell::kSize; + STATIC_ASSERT(Cell::kSize <= Page::kNonCodeObjectAreaSize); + Object* result; - { MaybeObject* maybe_result = AllocateRawCell(); + { MaybeObject* maybe_result = AllocateRaw(size, CELL_SPACE, CELL_SPACE); if (!maybe_result->ToObject(&result)) return maybe_result; } HeapObject::cast(result)->set_map_no_write_barrier(cell_map()); @@ -2930,9 +2966,13 @@ MaybeObject* Heap::AllocateCell(Object* value) { } -MaybeObject* Heap::AllocatePropertyCell(Object* value) { +MaybeObject* Heap::AllocatePropertyCell() { + int size = PropertyCell::kSize; + STATIC_ASSERT(PropertyCell::kSize <= Page::kNonCodeObjectAreaSize); + Object* result; - MaybeObject* maybe_result = AllocateRawPropertyCell(); + MaybeObject* maybe_result = + AllocateRaw(size, PROPERTY_CELL_SPACE, PROPERTY_CELL_SPACE); if (!maybe_result->ToObject(&result)) return maybe_result; HeapObject::cast(result)->set_map_no_write_barrier( @@ -2940,10 +2980,8 @@ MaybeObject* Heap::AllocatePropertyCell(Object* value) { PropertyCell* cell = PropertyCell::cast(result); cell->set_dependent_code(DependentCode::cast(empty_fixed_array()), SKIP_WRITE_BARRIER); - cell->set_value(value); + cell->set_value(the_hole_value()); cell->set_type(Type::None()); - maybe_result = cell->SetValueInferType(value); - if (maybe_result->IsFailure()) return maybe_result; return result; } @@ -2958,17 +2996,16 @@ MaybeObject* Heap::AllocateBox(Object* value, PretenureFlag pretenure) { MaybeObject* Heap::AllocateAllocationSite() { - Object* result; + AllocationSite* site; MaybeObject* maybe_result = Allocate(allocation_site_map(), OLD_POINTER_SPACE); - if (!maybe_result->ToObject(&result)) return maybe_result; - AllocationSite* site = AllocationSite::cast(result); + if (!maybe_result->To(&site)) return maybe_result; site->Initialize(); // Link the site site->set_weak_next(allocation_sites_list()); set_allocation_sites_list(site); - return result; + return site; } @@ -4057,31 +4094,8 @@ MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) { if (length < 0 || length > ByteArray::kMaxLength) { return Failure::OutOfMemoryException(0x7); } - if (pretenure == NOT_TENURED) { - return AllocateByteArray(length); - } int size = ByteArray::SizeFor(length); - AllocationSpace space = - (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_DATA_SPACE; - Object* result; - { MaybeObject* maybe_result = AllocateRaw(size, space, space); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - - reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier( - byte_array_map()); - reinterpret_cast<ByteArray*>(result)->set_length(length); - return result; -} - - -MaybeObject* Heap::AllocateByteArray(int length) { - if (length < 0 || length > ByteArray::kMaxLength) { - return Failure::OutOfMemoryException(0x8); - } - int size = ByteArray::SizeFor(length); - AllocationSpace space = - (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : NEW_SPACE; + AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure); Object* result; { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE); if (!maybe_result->ToObject(&result)) return maybe_result; @@ -4112,11 +4126,10 @@ MaybeObject* Heap::AllocateExternalArray(int length, ExternalArrayType array_type, void* external_pointer, PretenureFlag pretenure) { - AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE; + int size = ExternalArray::kAlignedSize; + AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure); Object* result; - { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize, - space, - OLD_DATA_SPACE); + { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE); if (!maybe_result->ToObject(&result)) return maybe_result; } @@ -4134,7 +4147,8 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc, Code::Flags flags, Handle<Object> self_reference, bool immovable, - bool crankshafted) { + bool crankshafted, + int prologue_offset) { // Allocate ByteArray before the Code object, so that we do not risk // leaving uninitialized Code object (and breaking the heap). ByteArray* reloc_info; @@ -4184,10 +4198,18 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc, code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER); code->set_gc_metadata(Smi::FromInt(0)); code->set_ic_age(global_ic_age_); - code->set_prologue_offset(kPrologueOffsetNotSet); + code->set_prologue_offset(prologue_offset); if (code->kind() == Code::OPTIMIZED_FUNCTION) { code->set_marked_for_deoptimization(false); } + +#ifdef ENABLE_DEBUGGER_SUPPORT + if (code->kind() == Code::FUNCTION) { + code->set_has_debug_break_slots( + isolate_->debugger()->IsDebuggerActive()); + } +#endif + // Allow self references to created code object by patching the handle to // point to the newly allocated Code object. if (!self_reference.is_null()) { @@ -4310,6 +4332,7 @@ MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space, AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>( reinterpret_cast<Address>(result) + map->instance_size()); alloc_memento->set_map_no_write_barrier(allocation_memento_map()); + ASSERT(allocation_site->map() == allocation_site_map()); alloc_memento->set_allocation_site(*allocation_site, SKIP_WRITE_BARRIER); return result; } @@ -4414,10 +4437,6 @@ MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) { arguments_object_size = kArgumentsObjectSize; } - // This calls Copy directly rather than using Heap::AllocateRaw so we - // duplicate the check here. - ASSERT(AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC); - // Check that the size of the boilerplate matches our // expectations. The ArgumentsAccessStub::GenerateNewObject relies // on the size being a known constant. @@ -4553,9 +4572,8 @@ MaybeObject* Heap::AllocateJSObjectFromMap( } // Allocate the JSObject. - AllocationSpace space = - (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE; - if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE; + int size = map->instance_size(); + AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure); Object* obj; MaybeObject* maybe_obj = Allocate(map, space); if (!maybe_obj->To(&obj)) return maybe_obj; @@ -4588,8 +4606,8 @@ MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite( } // Allocate the JSObject. - AllocationSpace space = NEW_SPACE; - if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE; + int size = map->instance_size(); + AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, NOT_TENURED); Object* obj; MaybeObject* maybe_obj = AllocateWithAllocationSite(map, space, allocation_site); @@ -4745,20 +4763,6 @@ MaybeObject* Heap::AllocateJSArrayAndStorage( } -MaybeObject* Heap::AllocateJSArrayAndStorageWithAllocationSite( - ElementsKind elements_kind, - int length, - int capacity, - Handle<AllocationSite> allocation_site, - ArrayStorageAllocationMode mode) { - MaybeObject* maybe_array = AllocateJSArrayWithAllocationSite(elements_kind, - allocation_site); - JSArray* array; - if (!maybe_array->To(&array)) return maybe_array; - return AllocateJSArrayStorage(array, length, capacity, mode); -} - - MaybeObject* Heap::AllocateJSArrayStorage( JSArray* array, int length, @@ -4861,74 +4865,7 @@ MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler, } -MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) { - ASSERT(constructor->has_initial_map()); - Map* map = constructor->initial_map(); - ASSERT(map->is_dictionary_map()); - - // Make sure no field properties are described in the initial map. - // This guarantees us that normalizing the properties does not - // require us to change property values to PropertyCells. - ASSERT(map->NextFreePropertyIndex() == 0); - - // Make sure we don't have a ton of pre-allocated slots in the - // global objects. They will be unused once we normalize the object. - ASSERT(map->unused_property_fields() == 0); - ASSERT(map->inobject_properties() == 0); - - // Initial size of the backing store to avoid resize of the storage during - // bootstrapping. The size differs between the JS global object ad the - // builtins object. - int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512; - - // Allocate a dictionary object for backing storage. - NameDictionary* dictionary; - MaybeObject* maybe_dictionary = - NameDictionary::Allocate( - this, - map->NumberOfOwnDescriptors() * 2 + initial_size); - if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary; - - // The global object might be created from an object template with accessors. - // Fill these accessors into the dictionary. - DescriptorArray* descs = map->instance_descriptors(); - for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) { - PropertyDetails details = descs->GetDetails(i); - ASSERT(details.type() == CALLBACKS); // Only accessors are expected. - PropertyDetails d = PropertyDetails(details.attributes(), CALLBACKS, i + 1); - Object* value = descs->GetCallbacksObject(i); - MaybeObject* maybe_value = AllocatePropertyCell(value); - if (!maybe_value->ToObject(&value)) return maybe_value; - - MaybeObject* maybe_added = dictionary->Add(descs->GetKey(i), value, d); - if (!maybe_added->To(&dictionary)) return maybe_added; - } - - // Allocate the global object and initialize it with the backing store. - JSObject* global; - MaybeObject* maybe_global = Allocate(map, OLD_POINTER_SPACE); - if (!maybe_global->To(&global)) return maybe_global; - - InitializeJSObjectFromMap(global, dictionary, map); - - // Create a new map for the global object. - Map* new_map; - MaybeObject* maybe_map = map->CopyDropDescriptors(); - if (!maybe_map->To(&new_map)) return maybe_map; - new_map->set_dictionary_map(true); - - // Set up the global object as a normalized object. - global->set_map(new_map); - global->set_properties(dictionary); - - // Make sure result is a global object with properties in dictionary. - ASSERT(global->IsGlobalObject()); - ASSERT(!global->HasFastProperties()); - return global; -} - - -MaybeObject* Heap::CopyJSObject(JSObject* source) { +MaybeObject* Heap::CopyJSObject(JSObject* source, AllocationSite* site) { // Never used to copy functions. If functions need to be copied we // have to be careful to clear the literals array. SLOW_ASSERT(!source->IsJSFunction()); @@ -4938,6 +4875,9 @@ MaybeObject* Heap::CopyJSObject(JSObject* source) { int object_size = map->instance_size(); Object* clone; + ASSERT(site == NULL || (AllocationSite::CanTrack(map->instance_type()) && + map->instance_type() == JS_ARRAY_TYPE)); + WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER; // If we're forced to always allocate, we use the general allocation @@ -4958,7 +4898,10 @@ MaybeObject* Heap::CopyJSObject(JSObject* source) { } else { wb_mode = SKIP_WRITE_BARRIER; - { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size); + { int adjusted_object_size = site != NULL + ? object_size + AllocationMemento::kSize + : object_size; + MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size); if (!maybe_clone->ToObject(&clone)) return maybe_clone; } SLOW_ASSERT(InNewSpace(clone)); @@ -4967,115 +4910,21 @@ MaybeObject* Heap::CopyJSObject(JSObject* source) { CopyBlock(HeapObject::cast(clone)->address(), source->address(), object_size); - } - - SLOW_ASSERT( - JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind()); - FixedArrayBase* elements = FixedArrayBase::cast(source->elements()); - FixedArray* properties = FixedArray::cast(source->properties()); - // Update elements if necessary. - if (elements->length() > 0) { - Object* elem; - { MaybeObject* maybe_elem; - if (elements->map() == fixed_cow_array_map()) { - maybe_elem = FixedArray::cast(elements); - } else if (source->HasFastDoubleElements()) { - maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements)); - } else { - maybe_elem = CopyFixedArray(FixedArray::cast(elements)); - } - if (!maybe_elem->ToObject(&elem)) return maybe_elem; - } - JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode); - } - // Update properties if necessary. - if (properties->length() > 0) { - Object* prop; - { MaybeObject* maybe_prop = CopyFixedArray(properties); - if (!maybe_prop->ToObject(&prop)) return maybe_prop; - } - JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode); - } - // Return the new clone. - return clone; -} - - -MaybeObject* Heap::CopyJSObjectWithAllocationSite( - JSObject* source, - AllocationSite* site) { - // Never used to copy functions. If functions need to be copied we - // have to be careful to clear the literals array. - SLOW_ASSERT(!source->IsJSFunction()); - - // Make the clone. - Map* map = source->map(); - int object_size = map->instance_size(); - Object* clone; - - ASSERT(AllocationSite::CanTrack(map->instance_type())); - ASSERT(map->instance_type() == JS_ARRAY_TYPE); - WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER; - - // If we're forced to always allocate, we use the general allocation - // functions which may leave us with an object in old space. - int adjusted_object_size = object_size; - if (always_allocate()) { - // We'll only track origin if we are certain to allocate in new space - const int kMinFreeNewSpaceAfterGC = InitialSemiSpaceSize() * 3/4; - if ((object_size + AllocationMemento::kSize) < kMinFreeNewSpaceAfterGC) { - adjusted_object_size += AllocationMemento::kSize; - } - - { MaybeObject* maybe_clone = - AllocateRaw(adjusted_object_size, NEW_SPACE, OLD_POINTER_SPACE); - if (!maybe_clone->ToObject(&clone)) return maybe_clone; - } - Address clone_address = HeapObject::cast(clone)->address(); - CopyBlock(clone_address, - source->address(), - object_size); - // Update write barrier for all fields that lie beyond the header. - int write_barrier_offset = adjusted_object_size > object_size - ? JSArray::kSize + AllocationMemento::kSize - : JSObject::kHeaderSize; - if (((object_size - write_barrier_offset) / kPointerSize) > 0) { - RecordWrites(clone_address, - write_barrier_offset, - (object_size - write_barrier_offset) / kPointerSize); - } - // Track allocation site information, if we failed to allocate it inline. - if (InNewSpace(clone) && - adjusted_object_size == object_size) { - MaybeObject* maybe_alloc_memento = - AllocateStruct(ALLOCATION_MEMENTO_TYPE); - AllocationMemento* alloc_memento; - if (maybe_alloc_memento->To(&alloc_memento)) { - alloc_memento->set_map_no_write_barrier(allocation_memento_map()); - alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER); + if (site != NULL) { + AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>( + reinterpret_cast<Address>(clone) + object_size); + alloc_memento->set_map_no_write_barrier(allocation_memento_map()); + ASSERT(site->map() == allocation_site_map()); + alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER); + HeapProfiler* profiler = isolate()->heap_profiler(); + if (profiler->is_tracking_allocations()) { + profiler->UpdateObjectSizeEvent(HeapObject::cast(clone)->address(), + object_size); + profiler->NewObjectEvent(alloc_memento->address(), + AllocationMemento::kSize); } } - } else { - wb_mode = SKIP_WRITE_BARRIER; - adjusted_object_size += AllocationMemento::kSize; - - { MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size); - if (!maybe_clone->ToObject(&clone)) return maybe_clone; - } - SLOW_ASSERT(InNewSpace(clone)); - // Since we know the clone is allocated in new space, we can copy - // the contents without worrying about updating the write barrier. - CopyBlock(HeapObject::cast(clone)->address(), - source->address(), - object_size); - } - - if (adjusted_object_size > object_size) { - AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>( - reinterpret_cast<Address>(clone) + object_size); - alloc_memento->set_map_no_write_barrier(allocation_memento_map()); - alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER); } SLOW_ASSERT( @@ -5366,12 +5215,11 @@ MaybeObject* Heap::AllocateInternalizedStringImpl( map = internalized_string_map(); size = SeqTwoByteString::SizeFor(chars); } + AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED); // Allocate string. Object* result; - { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize) - ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE) - : old_data_space_->AllocateRaw(size); + { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE); if (!maybe_result->ToObject(&result)) return maybe_result; } @@ -5410,16 +5258,10 @@ MaybeObject* Heap::AllocateRawOneByteString(int length, } int size = SeqOneByteString::SizeFor(length); ASSERT(size <= SeqOneByteString::kMaxSize); - AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE; - AllocationSpace retry_space = OLD_DATA_SPACE; - - if (size > Page::kMaxNonCodeHeapObjectSize) { - // Allocate in large object space, retry space will be ignored. - space = LO_SPACE; - } + AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure); Object* result; - { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space); + { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE); if (!maybe_result->ToObject(&result)) return maybe_result; } @@ -5440,16 +5282,10 @@ MaybeObject* Heap::AllocateRawTwoByteString(int length, } int size = SeqTwoByteString::SizeFor(length); ASSERT(size <= SeqTwoByteString::kMaxSize); - AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE; - AllocationSpace retry_space = OLD_DATA_SPACE; - - if (size > Page::kMaxNonCodeHeapObjectSize) { - // Allocate in large object space, retry space will be ignored. - space = LO_SPACE; - } + AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure); Object* result; - { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space); + { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE); if (!maybe_result->ToObject(&result)) return maybe_result; } @@ -5474,24 +5310,6 @@ MaybeObject* Heap::AllocateJSArray( } -MaybeObject* Heap::AllocateJSArrayWithAllocationSite( - ElementsKind elements_kind, - Handle<AllocationSite> allocation_site) { - Context* native_context = isolate()->context()->native_context(); - JSFunction* array_function = native_context->array_function(); - Map* map = array_function->initial_map(); - Object* maybe_map_array = native_context->js_array_maps(); - if (!maybe_map_array->IsUndefined()) { - Object* maybe_transitioned_map = - FixedArray::cast(maybe_map_array)->get(elements_kind); - if (!maybe_transitioned_map->IsUndefined()) { - map = Map::cast(maybe_transitioned_map); - } - } - return AllocateJSObjectFromMapWithAllocationSite(map, allocation_site); -} - - MaybeObject* Heap::AllocateEmptyFixedArray() { int size = FixedArray::SizeFor(0); Object* result; @@ -5512,25 +5330,10 @@ MaybeObject* Heap::AllocateEmptyExternalArray(ExternalArrayType array_type) { } -MaybeObject* Heap::AllocateRawFixedArray(int length) { - if (length < 0 || length > FixedArray::kMaxLength) { - return Failure::OutOfMemoryException(0xd); - } - ASSERT(length > 0); - // Use the general function if we're forced to always allocate. - if (always_allocate()) return AllocateFixedArray(length, TENURED); - // Allocate the raw data for a fixed array. - int size = FixedArray::SizeFor(length); - return size <= Page::kMaxNonCodeHeapObjectSize - ? new_space_.AllocateRaw(size) - : lo_space_->AllocateRaw(size, NOT_EXECUTABLE); -} - - MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) { int len = src->length(); Object* obj; - { MaybeObject* maybe_obj = AllocateRawFixedArray(len); + { MaybeObject* maybe_obj = AllocateRawFixedArray(len, NOT_TENURED); if (!maybe_obj->ToObject(&obj)) return maybe_obj; } if (InNewSpace(obj)) { @@ -5570,21 +5373,24 @@ MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src, } -MaybeObject* Heap::AllocateFixedArray(int length) { - ASSERT(length >= 0); - if (length == 0) return empty_fixed_array(); - Object* result; - { MaybeObject* maybe_result = AllocateRawFixedArray(length); - if (!maybe_result->ToObject(&result)) return maybe_result; +MaybeObject* Heap::CopyConstantPoolArrayWithMap(ConstantPoolArray* src, + Map* map) { + int int64_entries = src->count_of_int64_entries(); + int ptr_entries = src->count_of_ptr_entries(); + int int32_entries = src->count_of_int32_entries(); + Object* obj; + { MaybeObject* maybe_obj = + AllocateConstantPoolArray(int64_entries, ptr_entries, int32_entries); + if (!maybe_obj->ToObject(&obj)) return maybe_obj; } - // Initialize header. - FixedArray* array = reinterpret_cast<FixedArray*>(result); - array->set_map_no_write_barrier(fixed_array_map()); - array->set_length(length); - // Initialize body. - ASSERT(!InNewSpace(undefined_value())); - MemsetPointer(array->data_start(), undefined_value(), length); - return result; + HeapObject* dst = HeapObject::cast(obj); + dst->set_map_no_write_barrier(map); + CopyBlock( + dst->address() + ConstantPoolArray::kLengthOffset, + src->address() + ConstantPoolArray::kLengthOffset, + ConstantPoolArray::SizeFor(int64_entries, ptr_entries, int32_entries) + - ConstantPoolArray::kLengthOffset); + return obj; } @@ -5593,35 +5399,26 @@ MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) { return Failure::OutOfMemoryException(0xe); } int size = FixedArray::SizeFor(length); - AllocationSpace space = - (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE; - AllocationSpace retry_space = OLD_POINTER_SPACE; + AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure); - if (size > Page::kMaxNonCodeHeapObjectSize) { - // Allocate in large object space, retry space will be ignored. - space = LO_SPACE; - } - - return AllocateRaw(size, space, retry_space); + return AllocateRaw(size, space, OLD_POINTER_SPACE); } -MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller( - Heap* heap, - int length, - PretenureFlag pretenure, - Object* filler) { +MaybeObject* Heap::AllocateFixedArrayWithFiller(int length, + PretenureFlag pretenure, + Object* filler) { ASSERT(length >= 0); - ASSERT(heap->empty_fixed_array()->IsFixedArray()); - if (length == 0) return heap->empty_fixed_array(); + ASSERT(empty_fixed_array()->IsFixedArray()); + if (length == 0) return empty_fixed_array(); - ASSERT(!heap->InNewSpace(filler)); + ASSERT(!InNewSpace(filler)); Object* result; - { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure); + { MaybeObject* maybe_result = AllocateRawFixedArray(length, pretenure); if (!maybe_result->ToObject(&result)) return maybe_result; } - HeapObject::cast(result)->set_map_no_write_barrier(heap->fixed_array_map()); + HeapObject::cast(result)->set_map_no_write_barrier(fixed_array_map()); FixedArray* array = FixedArray::cast(result); array->set_length(length); MemsetPointer(array->data_start(), filler, length); @@ -5630,19 +5427,13 @@ MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller( MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) { - return AllocateFixedArrayWithFiller(this, - length, - pretenure, - undefined_value()); + return AllocateFixedArrayWithFiller(length, pretenure, undefined_value()); } MaybeObject* Heap::AllocateFixedArrayWithHoles(int length, PretenureFlag pretenure) { - return AllocateFixedArrayWithFiller(this, - length, - pretenure, - the_hole_value()); + return AllocateFixedArrayWithFiller(length, pretenure, the_hole_value()); } @@ -5650,7 +5441,7 @@ MaybeObject* Heap::AllocateUninitializedFixedArray(int length) { if (length == 0) return empty_fixed_array(); Object* obj; - { MaybeObject* maybe_obj = AllocateRawFixedArray(length); + { MaybeObject* maybe_obj = AllocateRawFixedArray(length, NOT_TENURED); if (!maybe_obj->ToObject(&obj)) return maybe_obj; } @@ -5720,24 +5511,52 @@ MaybeObject* Heap::AllocateRawFixedDoubleArray(int length, return Failure::OutOfMemoryException(0xf); } int size = FixedDoubleArray::SizeFor(length); - AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE; - AllocationSpace retry_space = OLD_DATA_SPACE; - #ifndef V8_HOST_ARCH_64_BIT size += kPointerSize; #endif + AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure); - if (size > Page::kMaxNonCodeHeapObjectSize) { - // Allocate in large object space, retry space will be ignored. - space = LO_SPACE; + HeapObject* object; + { MaybeObject* maybe_object = AllocateRaw(size, space, OLD_DATA_SPACE); + if (!maybe_object->To<HeapObject>(&object)) return maybe_object; } + return EnsureDoubleAligned(this, object, size); +} + + +MaybeObject* Heap::AllocateConstantPoolArray(int number_of_int64_entries, + int number_of_ptr_entries, + int number_of_int32_entries) { + ASSERT(number_of_int64_entries > 0 || number_of_ptr_entries > 0 || + number_of_int32_entries > 0); + int size = ConstantPoolArray::SizeFor(number_of_int64_entries, + number_of_ptr_entries, + number_of_int32_entries); +#ifndef V8_HOST_ARCH_64_BIT + size += kPointerSize; +#endif + AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED); + HeapObject* object; - { MaybeObject* maybe_object = AllocateRaw(size, space, retry_space); + { MaybeObject* maybe_object = AllocateRaw(size, space, OLD_POINTER_SPACE); if (!maybe_object->To<HeapObject>(&object)) return maybe_object; } + object = EnsureDoubleAligned(this, object, size); + HeapObject::cast(object)->set_map_no_write_barrier(constant_pool_array_map()); - return EnsureDoubleAligned(this, object, size); + ConstantPoolArray* constant_pool = + reinterpret_cast<ConstantPoolArray*>(object); + constant_pool->SetEntryCounts(number_of_int64_entries, + number_of_ptr_entries, + number_of_int32_entries); + MemsetPointer( + HeapObject::RawField( + constant_pool, + constant_pool->OffsetOfElementAt(constant_pool->first_ptr_index())), + undefined_value(), + number_of_ptr_entries); + return constant_pool; } @@ -5937,8 +5756,7 @@ STRUCT_LIST(MAKE_CASE) return Failure::InternalError(); } int size = map->instance_size(); - AllocationSpace space = - (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_POINTER_SPACE; + AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED); Object* result; { MaybeObject* maybe_result = Allocate(map, space); if (!maybe_result->ToObject(&result)) return maybe_result; @@ -6965,6 +6783,7 @@ bool Heap::CreateHeapObjects() { native_contexts_list_ = undefined_value(); array_buffers_list_ = undefined_value(); allocation_sites_list_ = undefined_value(); + weak_object_to_code_table_ = undefined_value(); return true; } @@ -7068,15 +6887,17 @@ void Heap::TearDown() { } -void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) { +void Heap::AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback, + GCType gc_type, + bool pass_isolate) { ASSERT(callback != NULL); - GCPrologueCallbackPair pair(callback, gc_type); + GCPrologueCallbackPair pair(callback, gc_type, pass_isolate); ASSERT(!gc_prologue_callbacks_.Contains(pair)); return gc_prologue_callbacks_.Add(pair); } -void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) { +void Heap::RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback) { ASSERT(callback != NULL); for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) { if (gc_prologue_callbacks_[i].callback == callback) { @@ -7088,15 +6909,17 @@ void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) { } -void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) { +void Heap::AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback, + GCType gc_type, + bool pass_isolate) { ASSERT(callback != NULL); - GCEpilogueCallbackPair pair(callback, gc_type); + GCEpilogueCallbackPair pair(callback, gc_type, pass_isolate); ASSERT(!gc_epilogue_callbacks_.Contains(pair)); return gc_epilogue_callbacks_.Add(pair); } -void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) { +void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback) { ASSERT(callback != NULL); for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) { if (gc_epilogue_callbacks_[i].callback == callback) { @@ -7108,6 +6931,37 @@ void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) { } +MaybeObject* Heap::AddWeakObjectToCodeDependency(Object* obj, + DependentCode* dep) { + ASSERT(!InNewSpace(obj)); + ASSERT(!InNewSpace(dep)); + MaybeObject* maybe_obj = + WeakHashTable::cast(weak_object_to_code_table_)->Put(obj, dep); + WeakHashTable* table; + if (!maybe_obj->To(&table)) return maybe_obj; + if (ShouldZapGarbage() && weak_object_to_code_table_ != table) { + WeakHashTable::cast(weak_object_to_code_table_)->Zap(the_hole_value()); + } + set_weak_object_to_code_table(table); + ASSERT_EQ(dep, WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj)); + return weak_object_to_code_table_; +} + + +DependentCode* Heap::LookupWeakObjectToCodeDependency(Object* obj) { + Object* dep = WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj); + if (dep->IsDependentCode()) return DependentCode::cast(dep); + return DependentCode::cast(empty_fixed_array()); +} + + +void Heap::EnsureWeakObjectToCodeTable() { + if (!weak_object_to_code_table()->IsHashTable()) { + set_weak_object_to_code_table(*isolate()->factory()->NewWeakHashTable(16)); + } +} + + #ifdef DEBUG class PrintHandleVisitor: public ObjectVisitor { @@ -8090,6 +7944,18 @@ void Heap::CheckpointObjectStats() { static_cast<int>(object_sizes_last_time_[index])); FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT) #undef ADJUST_LAST_TIME_OBJECT_COUNT +#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \ + index = FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge; \ + counters->count_of_CODE_AGE_##name()->Increment( \ + static_cast<int>(object_counts_[index])); \ + counters->count_of_CODE_AGE_##name()->Decrement( \ + static_cast<int>(object_counts_last_time_[index])); \ + counters->size_of_CODE_AGE_##name()->Increment( \ + static_cast<int>(object_sizes_[index])); \ + counters->size_of_CODE_AGE_##name()->Decrement( \ + static_cast<int>(object_sizes_last_time_[index])); + CODE_AGE_LIST_WITH_NO_AGE(ADJUST_LAST_TIME_OBJECT_COUNT) +#undef ADJUST_LAST_TIME_OBJECT_COUNT OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_)); OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_)); diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h index 4dfa076ebd..96cda586b7 100644 --- a/deps/v8/src/heap.h +++ b/deps/v8/src/heap.h @@ -71,6 +71,7 @@ namespace internal { V(Map, scope_info_map, ScopeInfoMap) \ V(Map, fixed_cow_array_map, FixedCOWArrayMap) \ V(Map, fixed_double_array_map, FixedDoubleArrayMap) \ + V(Map, constant_pool_array_map, ConstantPoolArrayMap) \ V(Object, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \ V(Map, hash_table_map, HashTableMap) \ V(FixedArray, empty_fixed_array, EmptyFixedArray) \ @@ -292,7 +293,10 @@ namespace internal { V(throw_string, "throw") \ V(done_string, "done") \ V(value_string, "value") \ - V(next_string, "next") + V(next_string, "next") \ + V(byte_length_string, "byteLength") \ + V(byte_offset_string, "byteOffset") \ + V(buffer_string, "buffer") // Forward declarations. class GCTracer; @@ -635,10 +639,6 @@ class Heap { pretenure); } - inline MUST_USE_RESULT MaybeObject* AllocateEmptyJSArrayWithAllocationSite( - ElementsKind elements_kind, - Handle<AllocationSite> allocation_site); - // Allocate a JSArray with a specified length but elements that are left // uninitialized. MUST_USE_RESULT MaybeObject* AllocateJSArrayAndStorage( @@ -648,13 +648,6 @@ class Heap { ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS, PretenureFlag pretenure = NOT_TENURED); - MUST_USE_RESULT MaybeObject* AllocateJSArrayAndStorageWithAllocationSite( - ElementsKind elements_kind, - int length, - int capacity, - Handle<AllocationSite> allocation_site, - ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS); - MUST_USE_RESULT MaybeObject* AllocateJSArrayStorage( JSArray* array, int length, @@ -668,19 +661,12 @@ class Heap { int length, PretenureFlag pretenure = NOT_TENURED); - // Allocates and initializes a new global object based on a constructor. - // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation - // failed. - // Please note this does not perform a garbage collection. - MUST_USE_RESULT MaybeObject* AllocateGlobalObject(JSFunction* constructor); - // Returns a deep copy of the JavaScript object. // Properties and elements are copied too. // Returns failure if allocation failed. - MUST_USE_RESULT MaybeObject* CopyJSObject(JSObject* source); - - MUST_USE_RESULT MaybeObject* CopyJSObjectWithAllocationSite( - JSObject* source, AllocationSite* site); + // Optionally takes an AllocationSite to be appended in an AllocationMemento. + MUST_USE_RESULT MaybeObject* CopyJSObject(JSObject* source, + AllocationSite* site = NULL); // Allocates the function prototype. // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation @@ -876,14 +862,9 @@ class Heap { // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. // Please note this does not perform a garbage collection. - MUST_USE_RESULT MaybeObject* AllocateByteArray(int length, - PretenureFlag pretenure); - - // Allocate a non-tenured byte array of the specified length - // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation - // failed. - // Please note this does not perform a garbage collection. - MUST_USE_RESULT MaybeObject* AllocateByteArray(int length); + MUST_USE_RESULT MaybeObject* AllocateByteArray( + int length, + PretenureFlag pretenure = NOT_TENURED); // Allocates an external array of the specified length and type. // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation @@ -901,22 +882,6 @@ class Heap { // Please note this does not perform a garbage collection. MUST_USE_RESULT MaybeObject* AllocateSymbol(); - // Allocate a tenured simple cell. - // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation - // failed. - // Please note this does not perform a garbage collection. - MUST_USE_RESULT MaybeObject* AllocateCell(Object* value); - - // Allocate a tenured JS global property cell. - // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation - // failed. - // Please note this does not perform a garbage collection. - MUST_USE_RESULT MaybeObject* AllocatePropertyCell(Object* value); - - // Allocate Box. - MUST_USE_RESULT MaybeObject* AllocateBox(Object* value, - PretenureFlag pretenure); - // Allocate a tenured AllocationSite. It's payload is null MUST_USE_RESULT MaybeObject* AllocateAllocationSite(); @@ -924,10 +889,9 @@ class Heap { // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. // Please note this does not perform a garbage collection. - MUST_USE_RESULT MaybeObject* AllocateFixedArray(int length, - PretenureFlag pretenure); - // Allocates a fixed array initialized with undefined values - MUST_USE_RESULT MaybeObject* AllocateFixedArray(int length); + MUST_USE_RESULT MaybeObject* AllocateFixedArray( + int length, + PretenureFlag pretenure = NOT_TENURED); // Allocates an uninitialized fixed array. It must be filled by the caller. // @@ -958,6 +922,16 @@ class Heap { MUST_USE_RESULT MaybeObject* CopyFixedDoubleArrayWithMap( FixedDoubleArray* src, Map* map); + // Make a copy of src and return it. Returns + // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed. + MUST_USE_RESULT inline MaybeObject* CopyConstantPoolArray( + ConstantPoolArray* src); + + // Make a copy of src, set the map, and return the copy. Returns + // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed. + MUST_USE_RESULT MaybeObject* CopyConstantPoolArrayWithMap( + ConstantPoolArray* src, Map* map); + // Allocates a fixed array initialized with the hole values. // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. @@ -966,9 +940,10 @@ class Heap { int length, PretenureFlag pretenure = NOT_TENURED); - MUST_USE_RESULT MaybeObject* AllocateRawFixedDoubleArray( - int length, - PretenureFlag pretenure); + MUST_USE_RESULT MaybeObject* AllocateConstantPoolArray( + int first_int64_index, + int first_ptr_index, + int first_int32_index); // Allocates a fixed double array with uninitialized values. Returns // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed. @@ -1056,10 +1031,7 @@ class Heap { // Allocated a HeapNumber from value. MUST_USE_RESULT MaybeObject* AllocateHeapNumber( - double value, - PretenureFlag pretenure); - // pretenure = NOT_TENURED - MUST_USE_RESULT MaybeObject* AllocateHeapNumber(double value); + double value, PretenureFlag pretenure = NOT_TENURED); // Converts an int into either a Smi or a HeapNumber object. // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation @@ -1153,11 +1125,13 @@ class Heap { // self_reference. This allows generated code to reference its own Code // object by containing this pointer. // Please note this function does not perform a garbage collection. - MUST_USE_RESULT MaybeObject* CreateCode(const CodeDesc& desc, - Code::Flags flags, - Handle<Object> self_reference, - bool immovable = false, - bool crankshafted = false); + MUST_USE_RESULT MaybeObject* CreateCode( + const CodeDesc& desc, + Code::Flags flags, + Handle<Object> self_reference, + bool immovable = false, + bool crankshafted = false, + int prologue_offset = Code::kPrologueOffsetNotSet); MUST_USE_RESULT MaybeObject* CopyCode(Code* code); @@ -1272,22 +1246,15 @@ class Heap { void GarbageCollectionGreedyCheck(); #endif - void AddGCPrologueCallback( - GCPrologueCallback callback, GCType gc_type_filter); - void RemoveGCPrologueCallback(GCPrologueCallback callback); - - void AddGCEpilogueCallback( - GCEpilogueCallback callback, GCType gc_type_filter); - void RemoveGCEpilogueCallback(GCEpilogueCallback callback); + void AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback, + GCType gc_type_filter, + bool pass_isolate = true); + void RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback); - void SetGlobalGCPrologueCallback(GCCallback callback) { - ASSERT((callback == NULL) ^ (global_gc_prologue_callback_ == NULL)); - global_gc_prologue_callback_ = callback; - } - void SetGlobalGCEpilogueCallback(GCCallback callback) { - ASSERT((callback == NULL) ^ (global_gc_epilogue_callback_ == NULL)); - global_gc_epilogue_callback_ = callback; - } + void AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback, + GCType gc_type_filter, + bool pass_isolate = true); + void RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback); // Heap root getters. We have versions with and without type::cast() here. // You can't use type::cast during GC because the assert fails. @@ -1337,6 +1304,8 @@ class Heap { Object* allocation_sites_list() { return allocation_sites_list_; } Object** allocation_sites_list_address() { return &allocation_sites_list_; } + Object* weak_object_to_code_table() { return weak_object_to_code_table_; } + // Number of mark-sweeps. unsigned int ms_count() { return ms_count_; } @@ -1428,8 +1397,8 @@ class Heap { void Verify(); - bool weak_embedded_maps_verification_enabled() { - return no_weak_embedded_maps_verification_scope_depth_ == 0; + bool weak_embedded_objects_verification_enabled() { + return no_weak_object_verification_scope_depth_ == 0; } #endif @@ -1530,11 +1499,6 @@ class Heap { inline intptr_t AdjustAmountOfExternalAllocatedMemory( intptr_t change_in_bytes); - // Allocate uninitialized fixed array. - MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length); - MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length, - PretenureFlag pretenure); - // This is only needed for testing high promotion mode. void SetNewSpaceHighPromotionModeActive(bool mode) { new_space_high_promotion_mode_active_ = mode; @@ -1692,6 +1656,14 @@ class Heap { total_regexp_code_generated_ += size; } + void IncrementCodeGeneratedBytes(bool is_crankshafted, int size) { + if (is_crankshafted) { + crankshaft_codegen_bytes_generated_ += size; + } else { + full_codegen_bytes_generated_ += size; + } + } + // Returns maximum GC pause. double get_max_gc_pause() { return max_gc_pause_; } @@ -1838,26 +1810,30 @@ class Heap { FIRST_CODE_KIND_SUB_TYPE = LAST_TYPE + 1, FIRST_FIXED_ARRAY_SUB_TYPE = FIRST_CODE_KIND_SUB_TYPE + Code::NUMBER_OF_KINDS, - OBJECT_STATS_COUNT = - FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1 + FIRST_CODE_AGE_SUB_TYPE = + FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1, + OBJECT_STATS_COUNT = FIRST_CODE_AGE_SUB_TYPE + Code::kLastCodeAge + 1 }; - void RecordObjectStats(InstanceType type, int sub_type, size_t size) { + void RecordObjectStats(InstanceType type, size_t size) { ASSERT(type <= LAST_TYPE); - if (sub_type < 0) { - object_counts_[type]++; - object_sizes_[type] += size; - } else { - if (type == CODE_TYPE) { - ASSERT(sub_type < Code::NUMBER_OF_KINDS); - object_counts_[FIRST_CODE_KIND_SUB_TYPE + sub_type]++; - object_sizes_[FIRST_CODE_KIND_SUB_TYPE + sub_type] += size; - } else if (type == FIXED_ARRAY_TYPE) { - ASSERT(sub_type <= LAST_FIXED_ARRAY_SUB_TYPE); - object_counts_[FIRST_FIXED_ARRAY_SUB_TYPE + sub_type]++; - object_sizes_[FIRST_FIXED_ARRAY_SUB_TYPE + sub_type] += size; - } - } + object_counts_[type]++; + object_sizes_[type] += size; + } + + void RecordCodeSubTypeStats(int code_sub_type, int code_age, size_t size) { + ASSERT(code_sub_type < Code::NUMBER_OF_KINDS); + ASSERT(code_age < Code::kLastCodeAge); + object_counts_[FIRST_CODE_KIND_SUB_TYPE + code_sub_type]++; + object_sizes_[FIRST_CODE_KIND_SUB_TYPE + code_sub_type] += size; + object_counts_[FIRST_CODE_AGE_SUB_TYPE + code_age]++; + object_sizes_[FIRST_CODE_AGE_SUB_TYPE + code_age] += size; + } + + void RecordFixedArraySubTypeStats(int array_sub_type, size_t size) { + ASSERT(array_sub_type <= LAST_FIXED_ARRAY_SUB_TYPE); + object_counts_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]++; + object_sizes_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type] += size; } void CheckpointObjectStats(); @@ -1887,6 +1863,16 @@ class Heap { Heap* heap_; }; + MaybeObject* AddWeakObjectToCodeDependency(Object* obj, DependentCode* dep); + + DependentCode* LookupWeakObjectToCodeDependency(Object* obj); + + void InitializeWeakObjectToCodeTable() { + set_weak_object_to_code_table(undefined_value()); + } + + void EnsureWeakObjectToCodeTable(); + private: Heap(); @@ -1920,6 +1906,9 @@ class Heap { bool flush_monomorphic_ics_; + // AllocationMementos found in new space. + int allocation_mementos_found_; + int scan_on_scavenge_pages_; NewSpace new_space_; @@ -1998,10 +1987,16 @@ class Heap { bool old_gen_exhausted_; // Weak list heads, threaded through the objects. + // List heads are initilized lazily and contain the undefined_value at start. Object* native_contexts_list_; Object* array_buffers_list_; Object* allocation_sites_list_; + // WeakHashTable that maps objects embedded in optimized code to dependent + // code list. It is initilized lazily and contains the undefined_value at + // start. + Object* weak_object_to_code_table_; + StoreBufferRebuilder store_buffer_rebuilder_; struct StringTypeTable { @@ -2032,32 +2027,37 @@ class Heap { // GC callback function, called before and after mark-compact GC. // Allocations in the callback function are disallowed. struct GCPrologueCallbackPair { - GCPrologueCallbackPair(GCPrologueCallback callback, GCType gc_type) - : callback(callback), gc_type(gc_type) { + GCPrologueCallbackPair(v8::Isolate::GCPrologueCallback callback, + GCType gc_type, + bool pass_isolate) + : callback(callback), gc_type(gc_type), pass_isolate_(pass_isolate) { } bool operator==(const GCPrologueCallbackPair& pair) const { return pair.callback == callback; } - GCPrologueCallback callback; + v8::Isolate::GCPrologueCallback callback; GCType gc_type; + // TODO(dcarney): remove variable + bool pass_isolate_; }; List<GCPrologueCallbackPair> gc_prologue_callbacks_; struct GCEpilogueCallbackPair { - GCEpilogueCallbackPair(GCEpilogueCallback callback, GCType gc_type) - : callback(callback), gc_type(gc_type) { + GCEpilogueCallbackPair(v8::Isolate::GCPrologueCallback callback, + GCType gc_type, + bool pass_isolate) + : callback(callback), gc_type(gc_type), pass_isolate_(pass_isolate) { } bool operator==(const GCEpilogueCallbackPair& pair) const { return pair.callback == callback; } - GCEpilogueCallback callback; + v8::Isolate::GCPrologueCallback callback; GCType gc_type; + // TODO(dcarney): remove variable + bool pass_isolate_; }; List<GCEpilogueCallbackPair> gc_epilogue_callbacks_; - GCCallback global_gc_prologue_callback_; - GCCallback global_gc_epilogue_callback_; - // Support for computing object sizes during GC. HeapObjectCallback gc_safe_size_of_old_object_; static int GcSafeSizeOfOldObject(HeapObject* object); @@ -2080,17 +2080,28 @@ class Heap { inline void UpdateOldSpaceLimits(); - // Allocate an uninitialized object in map space. The behavior is identical - // to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't - // have to test the allocation space argument and (b) can reduce code size - // (since both AllocateRaw and AllocateRawMap are inlined). - MUST_USE_RESULT inline MaybeObject* AllocateRawMap(); + // Selects the proper allocation space depending on the given object + // size, pretenuring decision, and preferred old-space. + static AllocationSpace SelectSpace(int object_size, + AllocationSpace preferred_old_space, + PretenureFlag pretenure) { + ASSERT(preferred_old_space == OLD_POINTER_SPACE || + preferred_old_space == OLD_DATA_SPACE); + if (object_size > Page::kMaxNonCodeHeapObjectSize) return LO_SPACE; + return (pretenure == TENURED) ? preferred_old_space : NEW_SPACE; + } + + // Allocate an uninitialized fixed array. + MUST_USE_RESULT MaybeObject* AllocateRawFixedArray( + int length, PretenureFlag pretenure); - // Allocate an uninitialized object in the simple cell space. - MUST_USE_RESULT inline MaybeObject* AllocateRawCell(); + // Allocate an uninitialized fixed double array. + MUST_USE_RESULT MaybeObject* AllocateRawFixedDoubleArray( + int length, PretenureFlag pretenure); - // Allocate an uninitialized object in the global property cell space. - MUST_USE_RESULT inline MaybeObject* AllocateRawPropertyCell(); + // Allocate an initialized fixed array with the given filler value. + MUST_USE_RESULT MaybeObject* AllocateFixedArrayWithFiller( + int length, PretenureFlag pretenure, Object* filler); // Initializes a JSObject based on its map. void InitializeJSObjectFromMap(JSObject* obj, @@ -2116,10 +2127,6 @@ class Heap { ElementsKind elements_kind, PretenureFlag pretenure = NOT_TENURED); - MUST_USE_RESULT MaybeObject* AllocateJSArrayWithAllocationSite( - ElementsKind elements_kind, - Handle<AllocationSite> allocation_site); - // Allocate empty fixed array. MUST_USE_RESULT MaybeObject* AllocateEmptyFixedArray(); @@ -2130,6 +2137,16 @@ class Heap { // Allocate empty fixed double array. MUST_USE_RESULT MaybeObject* AllocateEmptyFixedDoubleArray(); + // Allocate a tenured simple cell. + MUST_USE_RESULT MaybeObject* AllocateCell(Object* value); + + // Allocate a tenured JS global property cell initialized with the hole. + MUST_USE_RESULT MaybeObject* AllocatePropertyCell(); + + // Allocate Box. + MUST_USE_RESULT MaybeObject* AllocateBox(Object* value, + PretenureFlag pretenure); + // Performs a minor collection in new generation. void Scavenge(); @@ -2286,6 +2303,15 @@ class Heap { void ClearObjectStats(bool clear_last_time_stats = false); + void set_weak_object_to_code_table(Object* value) { + ASSERT(!InNewSpace(value)); + weak_object_to_code_table_ = value; + } + + Object** weak_object_to_code_table_address() { + return &weak_object_to_code_table_; + } + static const int kInitialStringTableSize = 2048; static const int kInitialEvalCacheSize = 64; static const int kInitialNumberStringCacheSize = 256; @@ -2335,13 +2361,17 @@ class Heap { unsigned int gc_count_at_last_idle_gc_; int scavenges_since_last_idle_round_; + // These two counters are monotomically increasing and never reset. + size_t full_codegen_bytes_generated_; + size_t crankshaft_codegen_bytes_generated_; + // If the --deopt_every_n_garbage_collections flag is set to a positive value, // this variable holds the number of garbage collections since the last // deoptimization triggered by garbage collection. int gcs_since_last_deopt_; #ifdef VERIFY_HEAP - int no_weak_embedded_maps_verification_scope_depth_; + int no_weak_object_verification_scope_depth_; #endif static const int kMaxMarkSweepsInIdleRound = 7; @@ -2375,7 +2405,7 @@ class Heap { friend class MarkCompactMarkingVisitor; friend class MapCompact; #ifdef VERIFY_HEAP - friend class NoWeakEmbeddedMapsVerificationScope; + friend class NoWeakObjectVerificationScope; #endif DISALLOW_COPY_AND_ASSIGN(Heap); @@ -2440,10 +2470,10 @@ class AlwaysAllocateScope { }; #ifdef VERIFY_HEAP -class NoWeakEmbeddedMapsVerificationScope { +class NoWeakObjectVerificationScope { public: - inline NoWeakEmbeddedMapsVerificationScope(); - inline ~NoWeakEmbeddedMapsVerificationScope(); + inline NoWeakObjectVerificationScope(); + inline ~NoWeakObjectVerificationScope(); }; #endif diff --git a/deps/v8/src/hydrogen-alias-analysis.h b/deps/v8/src/hydrogen-alias-analysis.h index 73e116e63e..21a54625ff 100644 --- a/deps/v8/src/hydrogen-alias-analysis.h +++ b/deps/v8/src/hydrogen-alias-analysis.h @@ -88,15 +88,6 @@ class HAliasAnalyzer : public ZoneObject { inline bool NoAlias(HValue* a, HValue* b) { return Query(a, b) == kNoAlias; } - - // Returns the actual value of an instruction. In the case of a chain - // of informative definitions, return the root of the chain. - HValue* ActualValue(HValue* obj) { - while (obj->IsInformativeDefinition()) { // Walk a chain of idefs. - obj = obj->RedefinedOperand(); - } - return obj; - } }; diff --git a/deps/v8/src/hydrogen-canonicalize.cc b/deps/v8/src/hydrogen-canonicalize.cc index 4d96415e6a..d3f72e9339 100644 --- a/deps/v8/src/hydrogen-canonicalize.cc +++ b/deps/v8/src/hydrogen-canonicalize.cc @@ -26,6 +26,7 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "hydrogen-canonicalize.h" +#include "hydrogen-redundant-phi.h" namespace v8 { namespace internal { @@ -57,8 +58,15 @@ void HCanonicalizePhase::Run() { } } } + // Perform actual Canonicalization pass. + HRedundantPhiEliminationPhase redundant_phi_eliminator(graph()); for (int i = 0; i < blocks->length(); ++i) { + // Eliminate redundant phis in the block first; changes to their inputs + // might have made them redundant, and eliminating them creates more + // opportunities for constant folding and strength reduction. + redundant_phi_eliminator.ProcessBlock(blocks->at(i)); + // Now canonicalize each instruction. for (HInstructionIterator it(blocks->at(i)); !it.Done(); it.Advance()) { HInstruction* instr = it.Current(); HValue* value = instr->Canonicalize(); diff --git a/deps/v8/src/hydrogen-check-elimination.cc b/deps/v8/src/hydrogen-check-elimination.cc new file mode 100644 index 0000000000..f712a39db8 --- /dev/null +++ b/deps/v8/src/hydrogen-check-elimination.cc @@ -0,0 +1,357 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "hydrogen-check-elimination.h" +#include "hydrogen-alias-analysis.h" + +namespace v8 { +namespace internal { + +static const int kMaxTrackedObjects = 10; +typedef UniqueSet<Map>* MapSet; + +// The main datastructure used during check elimination, which stores a +// set of known maps for each object. +class HCheckTable { + public: + explicit HCheckTable(Zone* zone) : zone_(zone) { + Kill(); + redundant_ = 0; + narrowed_ = 0; + empty_ = 0; + removed_ = 0; + compares_true_ = 0; + compares_false_ = 0; + transitions_ = 0; + loads_ = 0; + } + + void ReduceCheckMaps(HCheckMaps* instr) { + HValue* object = instr->value()->ActualValue(); + int index = Find(object); + if (index >= 0) { + // entry found; + MapSet a = known_maps_[index]; + MapSet i = instr->map_set().Copy(zone_); + if (a->IsSubset(i)) { + // The first check is more strict; the second is redundant. + if (checks_[index] != NULL) { + instr->DeleteAndReplaceWith(checks_[index]); + redundant_++; + } else { + instr->DeleteAndReplaceWith(instr->value()); + removed_++; + } + return; + } + i = i->Intersect(a, zone_); + if (i->size() == 0) { + // Intersection is empty; probably megamorphic, which is likely to + // deopt anyway, so just leave things as they are. + empty_++; + } else { + // TODO(titzer): replace the first check with a more strict check. + narrowed_++; + } + } else { + // No entry; insert a new one. + Insert(object, instr, instr->map_set().Copy(zone_)); + } + } + + void ReduceCheckValue(HCheckValue* instr) { + // Canonicalize HCheckValues; they might have their values load-eliminated. + HValue* value = instr->Canonicalize(); + if (value == NULL) { + instr->DeleteAndReplaceWith(instr->value()); + removed_++; + } else if (value != instr) { + instr->DeleteAndReplaceWith(value); + redundant_++; + } + } + + void ReduceLoadNamedField(HLoadNamedField* instr) { + // Reduce a load of the map field when it is known to be a constant. + if (!IsMapAccess(instr->access())) return; + + HValue* object = instr->object()->ActualValue(); + MapSet maps = FindMaps(object); + if (maps == NULL || maps->size() != 1) return; // Not a constant. + + Unique<Map> map = maps->at(0); + HConstant* constant = HConstant::CreateAndInsertBefore( + instr->block()->graph()->zone(), map, true, instr); + instr->DeleteAndReplaceWith(constant); + loads_++; + } + + void ReduceCheckMapValue(HCheckMapValue* instr) { + if (!instr->map()->IsConstant()) return; // Nothing to learn. + + HValue* object = instr->value()->ActualValue(); + // Match a HCheckMapValue(object, HConstant(map)) + Unique<Map> map = MapConstant(instr->map()); + MapSet maps = FindMaps(object); + if (maps != NULL) { + if (maps->Contains(map)) { + if (maps->size() == 1) { + // Object is known to have exactly this map. + instr->DeleteAndReplaceWith(NULL); + removed_++; + } else { + // Only one map survives the check. + maps->Clear(); + maps->Add(map, zone_); + } + } + } else { + // No prior information. + Insert(object, map); + } + } + + void ReduceStoreNamedField(HStoreNamedField* instr) { + HValue* object = instr->object()->ActualValue(); + if (instr->has_transition()) { + // This store transitions the object to a new map. + Kill(object); + Insert(object, MapConstant(instr->transition())); + } else if (IsMapAccess(instr->access())) { + // This is a store directly to the map field of the object. + Kill(object); + if (!instr->value()->IsConstant()) return; + Insert(object, MapConstant(instr->value())); + } else if (instr->CheckGVNFlag(kChangesMaps)) { + // This store indirectly changes the map of the object. + Kill(instr->object()); + UNREACHABLE(); + } + } + + void ReduceCompareMap(HCompareMap* instr) { + MapSet maps = FindMaps(instr->value()->ActualValue()); + if (maps == NULL) return; + if (maps->Contains(instr->map())) { + // TODO(titzer): replace with goto true branch + if (maps->size() == 1) compares_true_++; + } else { + // TODO(titzer): replace with goto false branch + compares_false_++; + } + } + + void ReduceTransitionElementsKind(HTransitionElementsKind* instr) { + MapSet maps = FindMaps(instr->object()->ActualValue()); + // Can only learn more about an object that already has a known set of maps. + if (maps == NULL) return; + if (maps->Contains(instr->original_map())) { + // If the object has the original map, it will be transitioned. + maps->Remove(instr->original_map()); + maps->Add(instr->transitioned_map(), zone_); + } else { + // Object does not have the given map, thus the transition is redundant. + instr->DeleteAndReplaceWith(instr->object()); + transitions_++; + } + } + + // Kill everything in the table. + void Kill() { + memset(objects_, 0, sizeof(objects_)); + } + + // Kill everything in the table that may alias {object}. + void Kill(HValue* object) { + for (int i = 0; i < kMaxTrackedObjects; i++) { + if (objects_[i] == NULL) continue; + if (aliasing_.MayAlias(objects_[i], object)) objects_[i] = NULL; + } + ASSERT(Find(object) < 0); + } + + void Print() { + for (int i = 0; i < kMaxTrackedObjects; i++) { + if (objects_[i] == NULL) continue; + PrintF(" checkmaps-table @%d: object #%d ", i, objects_[i]->id()); + if (checks_[i] != NULL) { + PrintF("check #%d ", checks_[i]->id()); + } + MapSet list = known_maps_[i]; + PrintF("%d maps { ", list->size()); + for (int j = 0; j < list->size(); j++) { + if (j > 0) PrintF(", "); + PrintF("%" V8PRIxPTR, list->at(j).Hashcode()); + } + PrintF(" }\n"); + } + } + + void PrintStats() { + if (redundant_ > 0) PrintF(" redundant = %2d\n", redundant_); + if (removed_ > 0) PrintF(" removed = %2d\n", removed_); + if (narrowed_ > 0) PrintF(" narrowed = %2d\n", narrowed_); + if (loads_ > 0) PrintF(" loads = %2d\n", loads_); + if (empty_ > 0) PrintF(" empty = %2d\n", empty_); + if (compares_true_ > 0) PrintF(" cmp_true = %2d\n", compares_true_); + if (compares_false_ > 0) PrintF(" cmp_false = %2d\n", compares_false_); + if (transitions_ > 0) PrintF(" transitions = %2d\n", transitions_); + } + + private: + int Find(HValue* object) { + for (int i = 0; i < kMaxTrackedObjects; i++) { + if (objects_[i] == NULL) continue; + if (aliasing_.MustAlias(objects_[i], object)) return i; + } + return -1; + } + + MapSet FindMaps(HValue* object) { + int index = Find(object); + return index < 0 ? NULL : known_maps_[index]; + } + + void Insert(HValue* object, Unique<Map> map) { + MapSet list = new(zone_) UniqueSet<Map>(); + list->Add(map, zone_); + Insert(object, NULL, list); + } + + void Insert(HValue* object, HCheckMaps* check, MapSet maps) { + for (int i = 0; i < kMaxTrackedObjects; i++) { + // TODO(titzer): drop old entries instead of disallowing new ones. + if (objects_[i] == NULL) { + objects_[i] = object; + checks_[i] = check; + known_maps_[i] = maps; + return; + } + } + } + + bool IsMapAccess(HObjectAccess access) { + return access.IsInobject() && access.offset() == JSObject::kMapOffset; + } + + Unique<Map> MapConstant(HValue* value) { + return Unique<Map>::cast(HConstant::cast(value)->GetUnique()); + } + + Zone* zone_; + HValue* objects_[kMaxTrackedObjects]; + HValue* checks_[kMaxTrackedObjects]; + MapSet known_maps_[kMaxTrackedObjects]; + HAliasAnalyzer aliasing_; + int redundant_; + int removed_; + int narrowed_; + int loads_; + int empty_; + int compares_true_; + int compares_false_; + int transitions_; +}; + + +void HCheckEliminationPhase::Run() { + for (int i = 0; i < graph()->blocks()->length(); i++) { + EliminateLocalChecks(graph()->blocks()->at(i)); + } +} + + +// For code de-uglification. +#define TRACE(x) if (FLAG_trace_check_elimination) PrintF x + + +// Eliminate checks local to a block. +void HCheckEliminationPhase::EliminateLocalChecks(HBasicBlock* block) { + HCheckTable table(zone()); + TRACE(("-- check-elim B%d ------------------------------------------------\n", + block->block_id())); + + for (HInstructionIterator it(block); !it.Done(); it.Advance()) { + bool changed = false; + HInstruction* instr = it.Current(); + + switch (instr->opcode()) { + case HValue::kCheckMaps: { + table.ReduceCheckMaps(HCheckMaps::cast(instr)); + changed = true; + break; + } + case HValue::kCheckValue: { + table.ReduceCheckValue(HCheckValue::cast(instr)); + changed = true; + break; + } + case HValue::kLoadNamedField: { + table.ReduceLoadNamedField(HLoadNamedField::cast(instr)); + changed = true; + break; + } + case HValue::kStoreNamedField: { + table.ReduceStoreNamedField(HStoreNamedField::cast(instr)); + changed = true; + break; + } + case HValue::kCompareMap: { + table.ReduceCompareMap(HCompareMap::cast(instr)); + changed = true; + break; + } + case HValue::kTransitionElementsKind: { + table.ReduceTransitionElementsKind( + HTransitionElementsKind::cast(instr)); + changed = true; + break; + } + case HValue::kCheckMapValue: { + table.ReduceCheckMapValue(HCheckMapValue::cast(instr)); + changed = true; + break; + } + default: { + // If the instruction changes maps uncontrollably, kill the whole town. + if (instr->CheckGVNFlag(kChangesMaps)) { + table.Kill(); + changed = true; + } + } + // Improvements possible: + // - eliminate HCheckSmi and HCheckHeapObject + } + + if (changed && FLAG_trace_check_elimination) table.Print(); + } + + if (FLAG_trace_check_elimination) table.PrintStats(); +} + + +} } // namespace v8::internal diff --git a/deps/v8/src/marking-thread.h b/deps/v8/src/hydrogen-check-elimination.h index 021cd5b48c..fa01964f6f 100644 --- a/deps/v8/src/marking-thread.h +++ b/deps/v8/src/hydrogen-check-elimination.h @@ -25,42 +25,28 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#ifndef V8_MARKING_THREAD_H_ -#define V8_MARKING_THREAD_H_ +#ifndef V8_HYDROGEN_CHECK_ELIMINATION_H_ +#define V8_HYDROGEN_CHECK_ELIMINATION_H_ -#include "atomicops.h" -#include "flags.h" -#include "platform.h" -#include "v8utils.h" - -#include "spaces.h" - -#include "heap.h" +#include "hydrogen.h" namespace v8 { namespace internal { -class MarkingThread : public Thread { + +// Remove CheckMaps instructions through flow- and branch-sensitive analysis. +class HCheckEliminationPhase : public HPhase { public: - explicit MarkingThread(Isolate* isolate); - ~MarkingThread() {} + explicit HCheckEliminationPhase(HGraph* graph) + : HPhase("H_Check Elimination", graph) { } void Run(); - void Stop(); - void StartMarking(); - void WaitForMarkingThread(); private: - Isolate* isolate_; - Heap* heap_; - Semaphore start_marking_semaphore_; - Semaphore end_marking_semaphore_; - Semaphore stop_semaphore_; - volatile AtomicWord stop_thread_; - int id_; - static Atomic32 id_counter_; + void EliminateLocalChecks(HBasicBlock* block); }; + } } // namespace v8::internal -#endif // V8_MARKING_THREAD_H_ +#endif // V8_HYDROGEN_CHECK_ELIMINATION_H_ diff --git a/deps/v8/src/hydrogen-dce.cc b/deps/v8/src/hydrogen-dce.cc index 0e7253d5a4..e101ee5bcc 100644 --- a/deps/v8/src/hydrogen-dce.cc +++ b/deps/v8/src/hydrogen-dce.cc @@ -31,56 +31,60 @@ namespace v8 { namespace internal { -bool HDeadCodeEliminationPhase::MarkLive(HValue* ref, HValue* instr) { - if (instr->CheckFlag(HValue::kIsLive)) return false; - instr->SetFlag(HValue::kIsLive); - - if (FLAG_trace_dead_code_elimination) { - HeapStringAllocator allocator; - StringStream stream(&allocator); - if (ref != NULL) { - ref->PrintTo(&stream); - } else { - stream.Add("root "); +void HDeadCodeEliminationPhase::MarkLive( + HValue* instr, ZoneList<HValue*>* worklist) { + if (instr->CheckFlag(HValue::kIsLive)) return; // Already live. + + if (FLAG_trace_dead_code_elimination) PrintLive(NULL, instr); + + // Transitively mark all inputs of live instructions live. + worklist->Add(instr, zone()); + while (!worklist->is_empty()) { + HValue* instr = worklist->RemoveLast(); + instr->SetFlag(HValue::kIsLive); + for (int i = 0; i < instr->OperandCount(); ++i) { + HValue* input = instr->OperandAt(i); + if (!input->CheckFlag(HValue::kIsLive)) { + input->SetFlag(HValue::kIsLive); + worklist->Add(input, zone()); + if (FLAG_trace_dead_code_elimination) PrintLive(instr, input); + } } - stream.Add(" -> "); - instr->PrintTo(&stream); - PrintF("[MarkLive %s]\n", *stream.ToCString()); } +} + - return true; +void HDeadCodeEliminationPhase::PrintLive(HValue* ref, HValue* instr) { + HeapStringAllocator allocator; + StringStream stream(&allocator); + if (ref != NULL) { + ref->PrintTo(&stream); + } else { + stream.Add("root "); + } + stream.Add(" -> "); + instr->PrintTo(&stream); + PrintF("[MarkLive %s]\n", *stream.ToCString()); } void HDeadCodeEliminationPhase::MarkLiveInstructions() { - ZoneList<HValue*> worklist(graph()->blocks()->length(), zone()); + ZoneList<HValue*> worklist(10, zone()); - // Mark initial root instructions for dead code elimination. + // Transitively mark all live instructions, starting from roots. for (int i = 0; i < graph()->blocks()->length(); ++i) { HBasicBlock* block = graph()->blocks()->at(i); for (HInstructionIterator it(block); !it.Done(); it.Advance()) { HInstruction* instr = it.Current(); - if (instr->CannotBeEliminated() && MarkLive(NULL, instr)) { - worklist.Add(instr, zone()); - } + if (instr->CannotBeEliminated()) MarkLive(instr, &worklist); } for (int j = 0; j < block->phis()->length(); j++) { HPhi* phi = block->phis()->at(j); - if (phi->CannotBeEliminated() && MarkLive(NULL, phi)) { - worklist.Add(phi, zone()); - } + if (phi->CannotBeEliminated()) MarkLive(phi, &worklist); } } - // Transitively mark all inputs of live instructions live. - while (!worklist.is_empty()) { - HValue* instr = worklist.RemoveLast(); - for (int i = 0; i < instr->OperandCount(); ++i) { - if (MarkLive(instr, instr->OperandAt(i))) { - worklist.Add(instr->OperandAt(i), zone()); - } - } - } + ASSERT(worklist.is_empty()); // Should have processed everything. } @@ -93,10 +97,8 @@ void HDeadCodeEliminationPhase::RemoveDeadInstructions() { for (HInstructionIterator it(block); !it.Done(); it.Advance()) { HInstruction* instr = it.Current(); if (!instr->CheckFlag(HValue::kIsLive)) { - // Instruction has not been marked live; assume it is dead and remove. - // TODO(titzer): we don't remove constants because some special ones - // might be used by later phases and are assumed to be in the graph - if (!instr->IsConstant()) instr->DeleteAndReplaceWith(NULL); + // Instruction has not been marked live, so remove it. + instr->DeleteAndReplaceWith(NULL); } else { // Clear the liveness flag to leave the graph clean for the next DCE. instr->ClearFlag(HValue::kIsLive); diff --git a/deps/v8/src/hydrogen-dce.h b/deps/v8/src/hydrogen-dce.h index 19749f279a..2d73b380e4 100644 --- a/deps/v8/src/hydrogen-dce.h +++ b/deps/v8/src/hydrogen-dce.h @@ -45,7 +45,8 @@ class HDeadCodeEliminationPhase : public HPhase { } private: - bool MarkLive(HValue* ref, HValue* instr); + void MarkLive(HValue* instr, ZoneList<HValue*>* worklist); + void PrintLive(HValue* ref, HValue* instr); void MarkLiveInstructions(); void RemoveDeadInstructions(); }; diff --git a/deps/v8/src/hydrogen-deoptimizing-mark.cc b/deps/v8/src/hydrogen-deoptimizing-mark.cc deleted file mode 100644 index 626848e012..0000000000 --- a/deps/v8/src/hydrogen-deoptimizing-mark.cc +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include "hydrogen-deoptimizing-mark.h" - -namespace v8 { -namespace internal { - -void HPropagateDeoptimizingMarkPhase::MarkAsDeoptimizing() { - HBasicBlock* block = graph()->entry_block(); - ZoneList<HBasicBlock*> stack(graph()->blocks()->length(), zone()); - while (block != NULL) { - const ZoneList<HBasicBlock*>* dominated_blocks(block->dominated_blocks()); - if (!dominated_blocks->is_empty()) { - if (block->IsDeoptimizing()) { - for (int i = 0; i < dominated_blocks->length(); ++i) { - dominated_blocks->at(i)->MarkAsDeoptimizing(); - } - } - for (int i = 1; i < dominated_blocks->length(); ++i) { - stack.Add(dominated_blocks->at(i), zone()); - } - block = dominated_blocks->at(0); - } else if (!stack.is_empty()) { - // Pop next block from stack. - block = stack.RemoveLast(); - } else { - // All blocks processed. - block = NULL; - } - } -} - - -void HPropagateDeoptimizingMarkPhase::NullifyUnreachableInstructions() { - if (!FLAG_unreachable_code_elimination) return; - for (int i = 0; i < graph()->blocks()->length(); ++i) { - HBasicBlock* block = graph()->blocks()->at(i); - bool nullify = false; - const ZoneList<HBasicBlock*>* predecessors = block->predecessors(); - int predecessors_length = predecessors->length(); - bool all_predecessors_deoptimizing = (predecessors_length > 0); - for (int j = 0; j < predecessors_length; ++j) { - if (!predecessors->at(j)->IsDeoptimizing()) { - all_predecessors_deoptimizing = false; - break; - } - } - if (all_predecessors_deoptimizing) nullify = true; - for (HInstructionIterator it(block); !it.Done(); it.Advance()) { - HInstruction* instr = it.Current(); - // Leave the basic structure of the graph intact. - if (instr->IsBlockEntry()) continue; - if (instr->IsControlInstruction()) continue; - if (instr->IsSimulate()) continue; - if (instr->IsEnterInlined()) continue; - if (instr->IsLeaveInlined()) continue; - if (nullify) { - HInstruction* last_dummy = NULL; - for (int j = 0; j < instr->OperandCount(); ++j) { - HValue* operand = instr->OperandAt(j); - // Insert an HDummyUse for each operand, unless the operand - // is an HDummyUse itself. If it's even from the same block, - // remember it as a potential replacement for the instruction. - if (operand->IsDummyUse()) { - if (operand->block() == instr->block() && - last_dummy == NULL) { - last_dummy = HInstruction::cast(operand); - } - continue; - } - if (operand->IsControlInstruction()) { - // Inserting a dummy use for a value that's not defined anywhere - // will fail. Some instructions define fake inputs on such - // values as control flow dependencies. - continue; - } - HDummyUse* dummy = new(graph()->zone()) HDummyUse(operand); - dummy->InsertBefore(instr); - last_dummy = dummy; - } - if (last_dummy == NULL) last_dummy = graph()->GetConstant1(); - instr->DeleteAndReplaceWith(last_dummy); - continue; - } - if (instr->IsDeoptimize()) { - ASSERT(block->IsDeoptimizing()); - nullify = true; - } - } - } -} - - -void HPropagateDeoptimizingMarkPhase::Run() { - // Skip this phase if there is nothing to be done anyway. - if (!graph()->has_soft_deoptimize()) return; - MarkAsDeoptimizing(); - NullifyUnreachableInstructions(); -} - -} } // namespace v8::internal diff --git a/deps/v8/src/hydrogen-escape-analysis.cc b/deps/v8/src/hydrogen-escape-analysis.cc index 997e4f9445..1023019923 100644 --- a/deps/v8/src/hydrogen-escape-analysis.cc +++ b/deps/v8/src/hydrogen-escape-analysis.cc @@ -154,9 +154,8 @@ HValue* HEscapeAnalysisPhase::NewMapCheckAndInsert(HCapturedObject* state, HValue* value = state->map_value(); // TODO(mstarzinger): This will narrow a map check against a set of maps // down to the first element in the set. Revisit and fix this. - Handle<Map> map_object = mapcheck->map_set()->first(); - UniqueValueId map_id = mapcheck->map_unique_ids()->first(); - HCheckValue* check = HCheckValue::New(zone, NULL, value, map_object, map_id); + HCheckValue* check = HCheckValue::New( + zone, NULL, value, mapcheck->first_map(), false); check->InsertBefore(mapcheck); return check; } @@ -307,7 +306,7 @@ void HEscapeAnalysisPhase::PerformScalarReplacement() { number_of_objects_++; block_states_.Clear(); - // Perform actual analysis steps. + // Perform actual analysis step. AnalyzeDataFlow(allocate); cumulative_values_ += number_of_values_; @@ -321,8 +320,13 @@ void HEscapeAnalysisPhase::Run() { // TODO(mstarzinger): We disable escape analysis with OSR for now, because // spill slots might be uninitialized. Needs investigation. if (graph()->has_osr()) return; - CollectCapturedValues(); - PerformScalarReplacement(); + int max_fixpoint_iteration_count = FLAG_escape_analysis_iterations; + for (int i = 0; i < max_fixpoint_iteration_count; i++) { + CollectCapturedValues(); + if (captured_.is_empty()) break; + PerformScalarReplacement(); + captured_.Clear(); + } } diff --git a/deps/v8/src/hydrogen-flow-engine.h b/deps/v8/src/hydrogen-flow-engine.h new file mode 100644 index 0000000000..dfe43ec6c3 --- /dev/null +++ b/deps/v8/src/hydrogen-flow-engine.h @@ -0,0 +1,235 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_HYDROGEN_FLOW_ENGINE_H_ +#define V8_HYDROGEN_FLOW_ENGINE_H_ + +#include "hydrogen.h" +#include "hydrogen-instructions.h" +#include "zone.h" + +namespace v8 { +namespace internal { + +// An example implementation of effects that doesn't collect anything. +class NoEffects : public ZoneObject { + public: + explicit NoEffects(Zone* zone) { } + + inline bool Disabled() { + return true; // Nothing to do. + } + template <class State> + inline void Apply(State* state) { + // do nothing. + } + inline void Process(HInstruction* value, Zone* zone) { + // do nothing. + } + inline void Union(NoEffects* other, Zone* zone) { + // do nothing. + } +}; + + +// An example implementation of state that doesn't track anything. +class NoState { + public: + inline NoState* Copy(HBasicBlock* succ, Zone* zone) { + return this; + } + inline NoState* Process(HInstruction* value, Zone* zone) { + return this; + } + inline NoState* Merge(HBasicBlock* succ, NoState* other, Zone* zone) { + return this; + } +}; + + +// This class implements an engine that can drive flow-sensitive analyses +// over a graph of basic blocks, either one block at a time (local analysis) +// or over the entire graph (global analysis). The flow engine is parameterized +// by the type of the state and the effects collected while walking over the +// graph. +// +// The "State" collects which facts are known while passing over instructions +// in control flow order, and the "Effects" collect summary information about +// which facts could be invalidated on other control flow paths. The effects +// are necessary to correctly handle loops in the control flow graph without +// doing a fixed-point iteration. Thus the flow engine is guaranteed to visit +// each block at most twice; once for state, and optionally once for effects. +// +// The flow engine requires the State and Effects classes to implement methods +// like the example NoState and NoEffects above. It's not necessary to provide +// an effects implementation for local analysis. +template <class State, class Effects> +class HFlowEngine { + public: + HFlowEngine(HGraph* graph, Zone* zone) + : graph_(graph), + zone_(zone), +#if DEBUG + pred_counts_(graph->blocks()->length(), zone), +#endif + block_states_(graph->blocks()->length(), zone), + loop_effects_(graph->blocks()->length(), zone) { + loop_effects_.AddBlock(NULL, graph_->blocks()->length(), zone); + } + + // Local analysis. Iterates over the instructions in the given block. + State* AnalyzeOneBlock(HBasicBlock* block, State* state) { + // Go through all instructions of the current block, updating the state. + for (HInstructionIterator it(block); !it.Done(); it.Advance()) { + state = state->Process(it.Current(), zone_); + } + return state; + } + + // Global analysis. Iterates over all blocks that are dominated by the given + // block, starting with the initial state. Computes effects for nested loops. + void AnalyzeDominatedBlocks(HBasicBlock* root, State* initial) { + InitializeStates(); + SetStateAt(root, initial); + + // Iterate all dominated blocks starting from the given start block. + for (int i = root->block_id(); i < graph_->blocks()->length(); i++) { + HBasicBlock* block = graph_->blocks()->at(i); + + // Skip blocks not dominated by the root node. + if (SkipNonDominatedBlock(root, block)) continue; + State* state = StateAt(block); + + if (block->IsLoopHeader()) { + // Apply loop effects before analyzing loop body. + ComputeLoopEffects(block)->Apply(state); + } else { + // Must have visited all predecessors before this block. + CheckPredecessorCount(block); + } + + // Go through all instructions of the current block, updating the state. + for (HInstructionIterator it(block); !it.Done(); it.Advance()) { + state = state->Process(it.Current(), zone_); + } + + // Propagate the block state forward to all successor blocks. + for (int i = 0; i < block->end()->SuccessorCount(); i++) { + HBasicBlock* succ = block->end()->SuccessorAt(i); + IncrementPredecessorCount(succ); + if (StateAt(succ) == NULL) { + // This is the first state to reach the successor. + SetStateAt(succ, state->Copy(succ, zone_)); + } else { + // Merge the current state with the state already at the successor. + SetStateAt(succ, state->Merge(succ, StateAt(succ), zone_)); + } + } + } + } + + private: + // Computes and caches the loop effects for the loop which has the given + // block as its loop header. + Effects* ComputeLoopEffects(HBasicBlock* block) { + ASSERT(block->IsLoopHeader()); + Effects* effects = loop_effects_[block->block_id()]; + if (effects != NULL) return effects; // Already analyzed this loop. + + effects = new(zone_) Effects(zone_); + loop_effects_[block->block_id()] = effects; + if (effects->Disabled()) return effects; // No effects for this analysis. + + HLoopInformation* loop = block->loop_information(); + int end = loop->GetLastBackEdge()->block_id(); + // Process the blocks between the header and the end. + for (int i = block->block_id(); i <= end; i++) { + HBasicBlock* member = graph_->blocks()->at(i); + if (i != block->block_id() && member->IsLoopHeader()) { + // Recursively compute and cache the effects of the nested loop. + ASSERT(member->loop_information()->parent_loop() == loop); + Effects* nested = ComputeLoopEffects(member); + effects->Union(nested, zone_); + // Skip the nested loop's blocks. + i = member->loop_information()->GetLastBackEdge()->block_id(); + } else { + // Process all the effects of the block. + ASSERT(member->current_loop() == loop); + for (HInstructionIterator it(member); !it.Done(); it.Advance()) { + effects->Process(it.Current(), zone_); + } + } + } + return effects; + } + + inline bool SkipNonDominatedBlock(HBasicBlock* root, HBasicBlock* other) { + if (root->block_id() == 0) return false; // Visit the whole graph. + if (root == other) return false; // Always visit the root. + return !root->Dominates(other); // Only visit dominated blocks. + } + + inline State* StateAt(HBasicBlock* block) { + return block_states_.at(block->block_id()); + } + + inline void SetStateAt(HBasicBlock* block, State* state) { + block_states_.Set(block->block_id(), state); + } + + inline void InitializeStates() { +#if DEBUG + pred_counts_.Rewind(0); + pred_counts_.AddBlock(0, graph_->blocks()->length(), zone_); +#endif + block_states_.Rewind(0); + block_states_.AddBlock(NULL, graph_->blocks()->length(), zone_); + } + + inline void CheckPredecessorCount(HBasicBlock* block) { + ASSERT(block->predecessors()->length() == pred_counts_[block->block_id()]); + } + + inline void IncrementPredecessorCount(HBasicBlock* block) { +#if DEBUG + pred_counts_[block->block_id()]++; +#endif + } + + HGraph* graph_; // The hydrogen graph. + Zone* zone_; // Temporary zone. +#if DEBUG + ZoneList<int> pred_counts_; // Finished predecessors (by block id). +#endif + ZoneList<State*> block_states_; // Block states (by block id). + ZoneList<Effects*> loop_effects_; // Loop effects (by block id). +}; + + +} } // namespace v8::internal + +#endif // V8_HYDROGEN_FLOW_ENGINE_H_ diff --git a/deps/v8/src/hydrogen-gvn.cc b/deps/v8/src/hydrogen-gvn.cc index 9a02a1dcf4..e3bf316f37 100644 --- a/deps/v8/src/hydrogen-gvn.cc +++ b/deps/v8/src/hydrogen-gvn.cc @@ -396,30 +396,27 @@ void HGlobalValueNumberingPhase::ComputeBlockSideEffects() { for (int i = graph()->blocks()->length() - 1; i >= 0; --i) { // Compute side effects for the block. HBasicBlock* block = graph()->blocks()->at(i); - int id = block->block_id(); GVNFlagSet side_effects; - for (HInstructionIterator it(block); !it.Done(); it.Advance()) { - HInstruction* instr = it.Current(); - side_effects.Add(instr->ChangesFlags()); - if (instr->IsDeoptimize()) { - block_side_effects_[id].RemoveAll(); - side_effects.RemoveAll(); - break; + if (block->IsReachable() && !block->IsDeoptimizing()) { + int id = block->block_id(); + for (HInstructionIterator it(block); !it.Done(); it.Advance()) { + HInstruction* instr = it.Current(); + side_effects.Add(instr->ChangesFlags()); } - } - block_side_effects_[id].Add(side_effects); + block_side_effects_[id].Add(side_effects); - // Loop headers are part of their loop. - if (block->IsLoopHeader()) { - loop_side_effects_[id].Add(side_effects); - } + // Loop headers are part of their loop. + if (block->IsLoopHeader()) { + loop_side_effects_[id].Add(side_effects); + } - // Propagate loop side effects upwards. - if (block->HasParentLoopHeader()) { - int header_id = block->parent_loop_header()->block_id(); - loop_side_effects_[header_id].Add(block->IsLoopHeader() - ? loop_side_effects_[id] - : side_effects); + // Propagate loop side effects upwards. + if (block->HasParentLoopHeader()) { + int header_id = block->parent_loop_header()->block_id(); + loop_side_effects_[header_id].Add(block->IsLoopHeader() + ? loop_side_effects_[id] + : side_effects); + } } } } @@ -609,7 +606,8 @@ bool HGlobalValueNumberingPhase::ShouldMove(HInstruction* instr, HBasicBlock* loop_header) { // If we've disabled code motion or we're in a block that unconditionally // deoptimizes, don't move any instructions. - return AllowCodeMotion() && !instr->block()->IsDeoptimizing(); + return AllowCodeMotion() && !instr->block()->IsDeoptimizing() && + instr->block()->IsReachable(); } diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc index cca95b9b5f..206ab7e2ac 100644 --- a/deps/v8/src/hydrogen-instructions.cc +++ b/deps/v8/src/hydrogen-instructions.cc @@ -509,6 +509,17 @@ const char* HValue::Mnemonic() const { } +bool HValue::CanReplaceWithDummyUses() { + return FLAG_unreachable_code_elimination && + !(block()->IsReachable() || + IsBlockEntry() || + IsControlInstruction() || + IsSimulate() || + IsEnterInlined() || + IsLeaveInlined()); +} + + bool HValue::IsInteger32Constant() { return IsConstant() && HConstant::cast(this)->HasInteger32Value(); } @@ -730,6 +741,10 @@ void HInstruction::InsertBefore(HInstruction* next) { next_ = next; previous_ = prev; SetBlock(next->block()); + if (position() == RelocInfo::kNoPosition && + next->position() != RelocInfo::kNoPosition) { + set_position(next->position()); + } } @@ -764,6 +779,10 @@ void HInstruction::InsertAfter(HInstruction* previous) { if (block->last() == previous) { block->set_last(this); } + if (position() == RelocInfo::kNoPosition && + previous->position() != RelocInfo::kNoPosition) { + set_position(previous->position()); + } } @@ -973,6 +992,9 @@ void HCallNewArray::PrintDataTo(StringStream* stream) { void HCallRuntime::PrintDataTo(StringStream* stream) { stream->Add("%o ", *name()); + if (save_doubles() == kSaveFPRegs) { + stream->Add("[save doubles] "); + } stream->Add("#%d", argument_count()); } @@ -1050,9 +1072,24 @@ Representation HBranch::observed_input_representation(int index) { } +bool HBranch::KnownSuccessorBlock(HBasicBlock** block) { + HValue* value = this->value(); + if (value->EmitAtUses()) { + ASSERT(value->IsConstant()); + ASSERT(!value->representation().IsDouble()); + *block = HConstant::cast(value)->BooleanValue() + ? FirstSuccessor() + : SecondSuccessor(); + return true; + } + *block = NULL; + return false; +} + + void HCompareMap::PrintDataTo(StringStream* stream) { value()->PrintNameTo(stream); - stream->Add(" (%p)", *map()); + stream->Add(" (%p)", *map().handle()); HControlInstruction::PrintDataTo(stream); } @@ -1218,8 +1255,15 @@ static bool IsIdentityOperation(HValue* arg1, HValue* arg2, int32_t identity) { HValue* HAdd::Canonicalize() { - if (IsIdentityOperation(left(), right(), 0)) return left(); - if (IsIdentityOperation(right(), left(), 0)) return right(); + // Adding 0 is an identity operation except in case of -0: -0 + 0 = +0 + if (IsIdentityOperation(left(), right(), 0) && + !left()->representation().IsDouble()) { // Left could be -0. + return left(); + } + if (IsIdentityOperation(right(), left(), 0) && + !left()->representation().IsDouble()) { // Right could be -0. + return right(); + } return this; } @@ -1237,6 +1281,16 @@ HValue* HMul::Canonicalize() { } +bool HMul::MulMinusOne() { + if (left()->EqualsInteger32Constant(-1) || + right()->EqualsInteger32Constant(-1)) { + return true; + } + + return false; +} + + HValue* HMod::Canonicalize() { return this; } @@ -1431,11 +1485,9 @@ void HCheckMaps::HandleSideEffectDominator(GVNFlag side_effect, HStoreNamedField* store = HStoreNamedField::cast(dominator); if (!store->has_transition() || store->object() != value()) return; HConstant* transition = HConstant::cast(store->transition()); - for (int i = 0; i < map_set()->length(); i++) { - if (transition->UniqueValueIdsMatch(map_unique_ids_.at(i))) { - DeleteAndReplaceWith(NULL); - return; - } + if (map_set_.Contains(transition->GetUnique())) { + DeleteAndReplaceWith(NULL); + return; } } } @@ -1443,9 +1495,9 @@ void HCheckMaps::HandleSideEffectDominator(GVNFlag side_effect, void HCheckMaps::PrintDataTo(StringStream* stream) { value()->PrintNameTo(stream); - stream->Add(" [%p", *map_set()->first()); - for (int i = 1; i < map_set()->length(); ++i) { - stream->Add(",%p", *map_set()->at(i)); + stream->Add(" [%p", *map_set_.at(0).handle()); + for (int i = 1; i < map_set_.size(); ++i) { + stream->Add(",%p", *map_set_.at(i).handle()); } stream->Add("]%s", CanOmitMapChecks() ? "(omitted)" : ""); } @@ -1454,13 +1506,13 @@ void HCheckMaps::PrintDataTo(StringStream* stream) { void HCheckValue::PrintDataTo(StringStream* stream) { value()->PrintNameTo(stream); stream->Add(" "); - object()->ShortPrint(stream); + object().handle()->ShortPrint(stream); } HValue* HCheckValue::Canonicalize() { return (value()->IsConstant() && - HConstant::cast(value())->UniqueValueIdsMatch(object_unique_id_)) + HConstant::cast(value())->GetUnique() == object_) ? NULL : this; } @@ -1555,6 +1607,11 @@ Range* HConstant::InferRange(Zone* zone) { } +int HPhi::position() const { + return block()->first()->position(); +} + + Range* HPhi::InferRange(Zone* zone) { Representation r = representation(); if (r.IsSmiOrInteger32()) { @@ -1624,10 +1681,13 @@ Range* HMul::InferRange(Zone* zone) { Range* a = left()->range(); Range* b = right()->range(); Range* res = a->Copy(zone); - if (!res->MulAndCheckOverflow(r, b)) { - // Clearing the kCanOverflow flag when kAllUsesAreTruncatingToInt32 - // would be wrong, because truncated integer multiplication is too - // precise and therefore not the same as converting to Double and back. + if (!res->MulAndCheckOverflow(r, b) || + (((r.IsInteger32() && CheckFlag(kAllUsesTruncatingToInt32)) || + (r.IsSmi() && CheckFlag(kAllUsesTruncatingToSmi))) && + MulMinusOne())) { + // Truncated int multiplication is too precise and therefore not the + // same as converting to Double and back. + // Handle truncated integer multiplication by -1 special. ClearFlag(kCanOverflow); } res->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToSmi) && @@ -1649,7 +1709,10 @@ Range* HDiv::InferRange(Zone* zone) { result->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToInt32) && (a->CanBeMinusZero() || (a->CanBeZero() && b->CanBeNegative()))); - if (!a->Includes(kMinInt) || !b->Includes(-1)) { + if (!a->Includes(kMinInt) || + !b->Includes(-1) || + CheckFlag(kAllUsesTruncatingToInt32)) { + // It is safe to clear kCanOverflow when kAllUsesTruncatingToInt32. ClearFlag(HValue::kCanOverflow); } @@ -2327,23 +2390,38 @@ void HSimulate::ReplayEnvironment(HEnvironment* env) { } +static void ReplayEnvironmentNested(const ZoneList<HValue*>* values, + HCapturedObject* other) { + for (int i = 0; i < values->length(); ++i) { + HValue* value = values->at(i); + if (value->IsCapturedObject()) { + if (HCapturedObject::cast(value)->capture_id() == other->capture_id()) { + values->at(i) = other; + } else { + ReplayEnvironmentNested(HCapturedObject::cast(value)->values(), other); + } + } + } +} + + // Replay captured objects by replacing all captured objects with the // same capture id in the current and all outer environments. void HCapturedObject::ReplayEnvironment(HEnvironment* env) { ASSERT(env != NULL); while (env != NULL) { - for (int i = 0; i < env->length(); ++i) { - HValue* value = env->values()->at(i); - if (value->IsCapturedObject() && - HCapturedObject::cast(value)->capture_id() == this->capture_id()) { - env->SetValueAt(i, this); - } - } + ReplayEnvironmentNested(env->values(), this); env = env->outer(); } } +void HCapturedObject::PrintDataTo(StringStream* stream) { + stream->Add("#%d ", capture_id()); + HDematerializedObject::PrintDataTo(stream); +} + + void HEnterInlined::RegisterReturnTarget(HBasicBlock* return_target, Zone* zone) { ASSERT(return_target->IsInlineReturnTarget()); @@ -2365,8 +2443,7 @@ static bool IsInteger32(double value) { HConstant::HConstant(Handle<Object> handle, Representation r) : HTemplateInstruction<0>(HType::TypeFromValue(handle)), - handle_(handle), - unique_id_(), + object_(Unique<Object>::CreateUninitialized(handle)), has_smi_value_(false), has_int32_value_(false), has_double_value_(false), @@ -2375,29 +2452,28 @@ HConstant::HConstant(Handle<Object> handle, Representation r) is_not_in_new_space_(true), is_cell_(false), boolean_value_(handle->BooleanValue()) { - if (handle_->IsHeapObject()) { + if (handle->IsHeapObject()) { Heap* heap = Handle<HeapObject>::cast(handle)->GetHeap(); is_not_in_new_space_ = !heap->InNewSpace(*handle); } - if (handle_->IsNumber()) { - double n = handle_->Number(); + if (handle->IsNumber()) { + double n = handle->Number(); has_int32_value_ = IsInteger32(n); int32_value_ = DoubleToInt32(n); has_smi_value_ = has_int32_value_ && Smi::IsValid(int32_value_); double_value_ = n; has_double_value_ = true; } else { - is_internalized_string_ = handle_->IsInternalizedString(); + is_internalized_string_ = handle->IsInternalizedString(); } - is_cell_ = !handle_.is_null() && - (handle_->IsCell() || handle_->IsPropertyCell()); + is_cell_ = !handle.is_null() && + (handle->IsCell() || handle->IsPropertyCell()); Initialize(r); } -HConstant::HConstant(Handle<Object> handle, - UniqueValueId unique_id, +HConstant::HConstant(Unique<Object> unique, Representation r, HType type, bool is_internalize_string, @@ -2405,8 +2481,7 @@ HConstant::HConstant(Handle<Object> handle, bool is_cell, bool boolean_value) : HTemplateInstruction<0>(type), - handle_(handle), - unique_id_(unique_id), + object_(unique), has_smi_value_(false), has_int32_value_(false), has_double_value_(false), @@ -2415,36 +2490,17 @@ HConstant::HConstant(Handle<Object> handle, is_not_in_new_space_(is_not_in_new_space), is_cell_(is_cell), boolean_value_(boolean_value) { - ASSERT(!handle.is_null()); + ASSERT(!unique.handle().is_null()); ASSERT(!type.IsTaggedNumber()); Initialize(r); } -HConstant::HConstant(Handle<Map> handle, - UniqueValueId unique_id) - : HTemplateInstruction<0>(HType::Tagged()), - handle_(handle), - unique_id_(unique_id), - has_smi_value_(false), - has_int32_value_(false), - has_double_value_(false), - has_external_reference_value_(false), - is_internalized_string_(false), - is_not_in_new_space_(true), - is_cell_(false), - boolean_value_(false) { - ASSERT(!handle.is_null()); - Initialize(Representation::Tagged()); -} - - HConstant::HConstant(int32_t integer_value, Representation r, bool is_not_in_new_space, - Handle<Object> optional_handle) - : handle_(optional_handle), - unique_id_(), + Unique<Object> object) + : object_(object), has_smi_value_(Smi::IsValid(integer_value)), has_int32_value_(true), has_double_value_(true), @@ -2463,9 +2519,8 @@ HConstant::HConstant(int32_t integer_value, HConstant::HConstant(double double_value, Representation r, bool is_not_in_new_space, - Handle<Object> optional_handle) - : handle_(optional_handle), - unique_id_(), + Unique<Object> object) + : object_(object), has_int32_value_(IsInteger32(double_value)), has_double_value_(true), has_external_reference_value_(false), @@ -2483,6 +2538,7 @@ HConstant::HConstant(double double_value, HConstant::HConstant(ExternalReference reference) : HTemplateInstruction<0>(HType::None()), + object_(Unique<Object>(Handle<Object>::null())), has_smi_value_(false), has_int32_value_(false), has_double_value_(false), @@ -2496,14 +2552,6 @@ HConstant::HConstant(ExternalReference reference) } -static void PrepareConstant(Handle<Object> object) { - if (!object->IsJSObject()) return; - Handle<JSObject> js_object = Handle<JSObject>::cast(object); - if (!js_object->map()->is_deprecated()) return; - JSObject::TryMigrateInstance(js_object); -} - - void HConstant::Initialize(Representation r) { if (r.IsNone()) { if (has_smi_value_ && SmiValuesAre31Bits()) { @@ -2515,7 +2563,14 @@ void HConstant::Initialize(Representation r) { } else if (has_external_reference_value_) { r = Representation::External(); } else { - PrepareConstant(handle_); + Handle<Object> object = object_.handle(); + if (object->IsJSObject()) { + // Try to eagerly migrate JSObjects that have deprecated maps. + Handle<JSObject> js_object = Handle<JSObject>::cast(object); + if (js_object->map()->is_deprecated()) { + JSObject::TryMigrateInstance(js_object); + } + } r = Representation::Tagged(); } } @@ -2526,9 +2581,12 @@ void HConstant::Initialize(Representation r) { bool HConstant::EmitAtUses() { ASSERT(IsLinked()); - if (block()->graph()->has_osr()) { - return block()->graph()->IsStandardConstant(this); + if (block()->graph()->has_osr() && + block()->graph()->IsStandardConstant(this)) { + // TODO(titzer): this seems like a hack that should be fixed by custom OSR. + return true; } + if (UseCount() == 0) return true; if (IsCell()) return false; if (representation().IsDouble()) return false; return true; @@ -2541,17 +2599,16 @@ HConstant* HConstant::CopyToRepresentation(Representation r, Zone* zone) const { if (r.IsDouble() && !has_double_value_) return NULL; if (r.IsExternal() && !has_external_reference_value_) return NULL; if (has_int32_value_) { - return new(zone) HConstant(int32_value_, r, is_not_in_new_space_, handle_); + return new(zone) HConstant(int32_value_, r, is_not_in_new_space_, object_); } if (has_double_value_) { - return new(zone) HConstant(double_value_, r, is_not_in_new_space_, handle_); + return new(zone) HConstant(double_value_, r, is_not_in_new_space_, object_); } if (has_external_reference_value_) { return new(zone) HConstant(external_reference_value_); } - ASSERT(!handle_.is_null()); - return new(zone) HConstant(handle_, - unique_id_, + ASSERT(!object_.handle().is_null()); + return new(zone) HConstant(object_, r, type_, is_internalized_string_, @@ -2567,16 +2624,12 @@ Maybe<HConstant*> HConstant::CopyToTruncatedInt32(Zone* zone) { res = new(zone) HConstant(int32_value_, Representation::Integer32(), is_not_in_new_space_, - handle_); + object_); } else if (has_double_value_) { res = new(zone) HConstant(DoubleToInt32(double_value_), Representation::Integer32(), is_not_in_new_space_, - handle_); - } else { - ASSERT(!HasNumberValue()); - Maybe<HConstant*> number = CopyToTruncatedNumber(zone); - if (number.has_value) return number.value->CopyToTruncatedInt32(zone); + object_); } return Maybe<HConstant*>(res != NULL, res); } @@ -2624,6 +2677,12 @@ void HBinaryOperation::InferRepresentation(HInferRepresentationPhase* h_infer) { ASSERT(CheckFlag(kFlexibleRepresentation)); Representation new_rep = RepresentationFromInputs(); UpdateRepresentation(new_rep, h_infer, "inputs"); + + if (representation().IsSmi() && HasNonSmiUse()) { + UpdateRepresentation( + Representation::Integer32(), h_infer, "use requirements"); + } + if (observed_output_representation_.IsNone()) { new_rep = RepresentationFromUses(); UpdateRepresentation(new_rep, h_infer, "uses"); @@ -2631,11 +2690,6 @@ void HBinaryOperation::InferRepresentation(HInferRepresentationPhase* h_infer) { new_rep = RepresentationFromOutput(); UpdateRepresentation(new_rep, h_infer, "output"); } - - if (representation().IsSmi() && HasNonSmiUse()) { - UpdateRepresentation( - Representation::Integer32(), h_infer, "use requirements"); - } } @@ -2662,7 +2716,7 @@ bool HBinaryOperation::IgnoreObservedOutputRepresentation( return ((current_rep.IsInteger32() && CheckUsesForFlag(kTruncatingToInt32)) || (current_rep.IsSmi() && CheckUsesForFlag(kTruncatingToSmi))) && // Mul in Integer32 mode would be too precise. - !this->IsMul(); + (!this->IsMul() || HMul::cast(this)->MulMinusOne()); } @@ -2802,6 +2856,9 @@ Range* HShl::InferRange(Zone* zone) { Range* HLoadNamedField::InferRange(Zone* zone) { + if (access().representation().IsByte()) { + return new(zone) Range(0, 255); + } if (access().IsStringLength()) { return new(zone) Range(0, String::kMaxLength); } @@ -2859,15 +2916,23 @@ void HCompareObjectEqAndBranch::PrintDataTo(StringStream* stream) { } -void HCompareHoleAndBranch::PrintDataTo(StringStream* stream) { - object()->PrintNameTo(stream); - HControlInstruction::PrintDataTo(stream); +bool HCompareObjectEqAndBranch::KnownSuccessorBlock(HBasicBlock** block) { + if (left()->IsConstant() && right()->IsConstant()) { + bool comparison_result = + HConstant::cast(left())->Equals(HConstant::cast(right())); + *block = comparison_result + ? FirstSuccessor() + : SecondSuccessor(); + return true; + } + *block = NULL; + return false; } void HCompareHoleAndBranch::InferRepresentation( HInferRepresentationPhase* h_infer) { - ChangeRepresentation(object()->representation()); + ChangeRepresentation(value()->representation()); } @@ -2937,22 +3002,17 @@ HCheckMaps* HCheckMaps::New(Zone* zone, if (map->CanOmitMapChecks() && value->IsConstant() && HConstant::cast(value)->HasMap(map)) { - check_map->omit(info); + // TODO(titzer): collect dependent map checks into a list. + check_map->omit_ = true; + if (map->CanTransition()) { + map->AddDependentCompilationInfo( + DependentCode::kPrototypeCheckGroup, info); + } } return check_map; } -void HCheckMaps::FinalizeUniqueValueId() { - if (!map_unique_ids_.is_empty()) return; - Zone* zone = block()->zone(); - map_unique_ids_.Initialize(map_set_.length(), zone); - for (int i = 0; i < map_set_.length(); i++) { - map_unique_ids_.Add(UniqueValueId(map_set_.at(i)), zone); - } -} - - void HLoadNamedGeneric::PrintDataTo(StringStream* stream) { object()->PrintNameTo(stream); stream->Add("."); @@ -3148,19 +3208,19 @@ void HStoreKeyedGeneric::PrintDataTo(StringStream* stream) { void HTransitionElementsKind::PrintDataTo(StringStream* stream) { object()->PrintNameTo(stream); - ElementsKind from_kind = original_map()->elements_kind(); - ElementsKind to_kind = transitioned_map()->elements_kind(); + ElementsKind from_kind = original_map().handle()->elements_kind(); + ElementsKind to_kind = transitioned_map().handle()->elements_kind(); stream->Add(" %p [%s] -> %p [%s]", - *original_map(), + *original_map().handle(), ElementsAccessor::ForKind(from_kind)->name(), - *transitioned_map(), + *transitioned_map().handle(), ElementsAccessor::ForKind(to_kind)->name()); if (IsSimpleMapChangeTransition(from_kind, to_kind)) stream->Add(" (simple)"); } void HLoadGlobalCell::PrintDataTo(StringStream* stream) { - stream->Add("[%p]", *cell()); + stream->Add("[%p]", *cell().handle()); if (!details_.IsDontDelete()) stream->Add(" (deleteable)"); if (details_.IsReadOnly()) stream->Add(" (read-only)"); } @@ -3188,7 +3248,7 @@ void HInnerAllocatedObject::PrintDataTo(StringStream* stream) { void HStoreGlobalCell::PrintDataTo(StringStream* stream) { - stream->Add("[%p] = ", *cell()); + stream->Add("[%p] = ", *cell().handle()); value()->PrintNameTo(stream); if (!details_.IsDontDelete()) stream->Add(" (deleteable)"); if (details_.IsReadOnly()) stream->Add(" (read-only)"); @@ -3454,8 +3514,8 @@ void HAllocate::CreateFreeSpaceFiller(int32_t free_space_size) { HConstant* filler_map = HConstant::New( zone, context(), - isolate()->factory()->free_space_map(), - UniqueValueId::free_space_map(isolate()->heap())); + isolate()->factory()->free_space_map()); + filler_map->FinalizeUniqueness(); // TODO(titzer): should be init'd a'ready filler_map->InsertAfter(free_space_instr); HInstruction* store_map = HStoreNamedField::New(zone, context(), free_space_instr, HObjectAccess::ForMap(), filler_map); @@ -4004,7 +4064,7 @@ Representation HValue::RepresentationFromUseRequirements() { Representation rep = Representation::None(); for (HUseIterator it(uses()); !it.Done(); it.Advance()) { // Ignore the use requirement from never run code - if (it.value()->block()->IsDeoptimizing()) continue; + if (it.value()->block()->IsUnreachable()) continue; // We check for observed_input_representation elsewhere. Representation use_rep = diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h index 7d33141a4f..80773bf147 100644 --- a/deps/v8/src/hydrogen-instructions.h +++ b/deps/v8/src/hydrogen-instructions.h @@ -36,6 +36,7 @@ #include "deoptimizer.h" #include "small-pointer-list.h" #include "string-stream.h" +#include "unique.h" #include "v8conversions.h" #include "v8utils.h" #include "zone.h" @@ -63,6 +64,7 @@ class LChunkBuilder; #define HYDROGEN_CONCRETE_INSTRUCTION_LIST(V) \ + V(AbnormalExit) \ V(AccessArgumentsAt) \ V(Add) \ V(Allocate) \ @@ -124,11 +126,9 @@ class LChunkBuilder; V(InnerAllocatedObject) \ V(InstanceOf) \ V(InstanceOfKnownGlobal) \ - V(InstanceSize) \ V(InvokeFunction) \ V(IsConstructCallAndBranch) \ V(IsObjectAndBranch) \ - V(IsNumberAndBranch) \ V(IsStringAndBranch) \ V(IsSmiAndBranch) \ V(IsUndetectableAndBranch) \ @@ -143,6 +143,7 @@ class LChunkBuilder; V(LoadKeyedGeneric) \ V(LoadNamedField) \ V(LoadNamedGeneric) \ + V(LoadRoot) \ V(MapEnumLength) \ V(MathFloorOfDiv) \ V(MathMinMax) \ @@ -305,64 +306,6 @@ class Range V8_FINAL : public ZoneObject { }; -class UniqueValueId V8_FINAL { - public: - UniqueValueId() : raw_address_(NULL) { } - - explicit UniqueValueId(Handle<Object> handle) { - ASSERT(!AllowHeapAllocation::IsAllowed()); - static const Address kEmptyHandleSentinel = reinterpret_cast<Address>(1); - if (handle.is_null()) { - raw_address_ = kEmptyHandleSentinel; - } else { - raw_address_ = reinterpret_cast<Address>(*handle); - ASSERT_NE(kEmptyHandleSentinel, raw_address_); - } - ASSERT(IsInitialized()); - } - - bool IsInitialized() const { return raw_address_ != NULL; } - - bool operator==(const UniqueValueId& other) const { - ASSERT(IsInitialized() && other.IsInitialized()); - return raw_address_ == other.raw_address_; - } - - bool operator!=(const UniqueValueId& other) const { - ASSERT(IsInitialized() && other.IsInitialized()); - return raw_address_ != other.raw_address_; - } - - intptr_t Hashcode() const { - ASSERT(IsInitialized()); - return reinterpret_cast<intptr_t>(raw_address_); - } - -#define IMMOVABLE_UNIQUE_VALUE_ID(name) \ - static UniqueValueId name(Heap* heap) { return UniqueValueId(heap->name()); } - - IMMOVABLE_UNIQUE_VALUE_ID(free_space_map) - IMMOVABLE_UNIQUE_VALUE_ID(minus_zero_value) - IMMOVABLE_UNIQUE_VALUE_ID(nan_value) - IMMOVABLE_UNIQUE_VALUE_ID(undefined_value) - IMMOVABLE_UNIQUE_VALUE_ID(null_value) - IMMOVABLE_UNIQUE_VALUE_ID(true_value) - IMMOVABLE_UNIQUE_VALUE_ID(false_value) - IMMOVABLE_UNIQUE_VALUE_ID(the_hole_value) - IMMOVABLE_UNIQUE_VALUE_ID(empty_string) - -#undef IMMOVABLE_UNIQUE_VALUE_ID - - private: - Address raw_address_; - - explicit UniqueValueId(Object* object) { - raw_address_ = reinterpret_cast<Address>(object); - ASSERT(IsInitialized()); - } -}; - - class HType V8_FINAL { public: static HType None() { return HType(kNone); } @@ -695,6 +638,8 @@ class HValue : public ZoneObject { flags_(0) {} virtual ~HValue() {} + virtual int position() const { return RelocInfo::kNoPosition; } + HBasicBlock* block() const { return block_; } void SetBlock(HBasicBlock* block); int LoopWeight() const; @@ -777,16 +722,24 @@ class HValue : public ZoneObject { return index == kNoRedefinedOperand ? NULL : OperandAt(index); } + bool CanReplaceWithDummyUses(); + + virtual int argument_delta() const { return 0; } + // A purely informative definition is an idef that will not emit code and // should therefore be removed from the graph in the RestoreActualValues // phase (so that live ranges will be shorter). virtual bool IsPurelyInformativeDefinition() { return false; } - // This method must always return the original HValue SSA definition - // (regardless of any iDef of this value). + // This method must always return the original HValue SSA definition, + // regardless of any chain of iDefs of this value. HValue* ActualValue() { - int index = RedefinedOperandIndex(); - return index == kNoRedefinedOperand ? this : OperandAt(index); + HValue* value = this; + int index; + while ((index = value->RedefinedOperandIndex()) != kNoRedefinedOperand) { + value = value->OperandAt(index); + } + return value; } bool IsInteger32Constant(); @@ -815,6 +768,9 @@ class HValue : public ZoneObject { void SetFlag(Flag f) { flags_ |= (1 << f); } void ClearFlag(Flag f) { flags_ &= ~(1 << f); } bool CheckFlag(Flag f) const { return (flags_ & (1 << f)) != 0; } + void CopyFlag(Flag f, HValue* other) { + if (other->CheckFlag(f)) SetFlag(f); + } // Returns true if the flag specified is set for all uses, false otherwise. bool CheckUsesForFlag(Flag f) const; @@ -898,7 +854,7 @@ class HValue : public ZoneObject { virtual intptr_t Hashcode(); // Compute unique ids upfront that is safe wrt GC and concurrent compilation. - virtual void FinalizeUniqueValueId() { } + virtual void FinalizeUniqueness() { } // Printing support. virtual void PrintTo(StringStream* stream) = 0; @@ -1104,6 +1060,47 @@ class HValue : public ZoneObject { return new(zone) I(p1, p2, p3, p4, p5); \ } +#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P0(I) \ + static I* New(Zone* zone, HValue* context) { \ + return new(zone) I(context); \ + } + +#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(I, P1) \ + static I* New(Zone* zone, HValue* context, P1 p1) { \ + return new(zone) I(context, p1); \ + } + +#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(I, P1, P2) \ + static I* New(Zone* zone, HValue* context, P1 p1, P2 p2) { \ + return new(zone) I(context, p1, p2); \ + } + +#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(I, P1, P2, P3) \ + static I* New(Zone* zone, HValue* context, P1 p1, P2 p2, P3 p3) { \ + return new(zone) I(context, p1, p2, p3); \ + } + +#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(I, P1, P2, P3, P4) \ + static I* New(Zone* zone, \ + HValue* context, \ + P1 p1, \ + P2 p2, \ + P3 p3, \ + P4 p4) { \ + return new(zone) I(context, p1, p2, p3, p4); \ + } + +#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P5(I, P1, P2, P3, P4, P5) \ + static I* New(Zone* zone, \ + HValue* context, \ + P1 p1, \ + P2 p2, \ + P3 p3, \ + P4 p4, \ + P5 p5) { \ + return new(zone) I(context, p1, p2, p3, p4, p5); \ + } + class HInstruction : public HValue { public: @@ -1119,7 +1116,7 @@ class HInstruction : public HValue { void InsertAfter(HInstruction* previous); // The position is a write-once variable. - int position() const { return position_; } + virtual int position() const V8_OVERRIDE { return position_; } bool has_position() const { return position_ != RelocInfo::kNoPosition; } void set_position(int position) { ASSERT(!has_position()); @@ -1194,6 +1191,11 @@ class HControlInstruction : public HInstruction { virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + virtual bool KnownSuccessorBlock(HBasicBlock** block) { + *block = NULL; + return false; + } + HBasicBlock* FirstSuccessor() { return SuccessorCount() > 0 ? SuccessorAt(0) : NULL; } @@ -1201,6 +1203,12 @@ class HControlInstruction : public HInstruction { return SuccessorCount() > 1 ? SuccessorAt(1) : NULL; } + void Not() { + HBasicBlock* swap = SuccessorAt(0); + SetSuccessorAt(0, SuccessorAt(1)); + SetSuccessorAt(1, swap); + } + DECLARE_ABSTRACT_INSTRUCTION(ControlInstruction) }; @@ -1277,53 +1285,74 @@ class HDummyUse V8_FINAL : public HTemplateInstruction<1> { }; -class HDeoptimize V8_FINAL : public HTemplateInstruction<0> { +// Inserts an int3/stop break instruction for debugging purposes. +class HDebugBreak V8_FINAL : public HTemplateInstruction<0> { public: - DECLARE_INSTRUCTION_FACTORY_P2(HDeoptimize, const char*, - Deoptimizer::BailoutType); + DECLARE_INSTRUCTION_FACTORY_P0(HDebugBreak); virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { return Representation::None(); } - const char* reason() const { return reason_; } - Deoptimizer::BailoutType type() { return type_; } - - DECLARE_CONCRETE_INSTRUCTION(Deoptimize) - - private: - explicit HDeoptimize(const char* reason, Deoptimizer::BailoutType type) - : reason_(reason), type_(type) {} - - const char* reason_; - Deoptimizer::BailoutType type_; + DECLARE_CONCRETE_INSTRUCTION(DebugBreak) }; -// Inserts an int3/stop break instruction for debugging purposes. -class HDebugBreak V8_FINAL : public HTemplateInstruction<0> { +class HGoto V8_FINAL : public HTemplateControlInstruction<1, 0> { public: + explicit HGoto(HBasicBlock* target) { + SetSuccessorAt(0, target); + } + + virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE { + *block = FirstSuccessor(); + return true; + } + virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { return Representation::None(); } - DECLARE_CONCRETE_INSTRUCTION(DebugBreak) + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + + DECLARE_CONCRETE_INSTRUCTION(Goto) }; -class HGoto V8_FINAL : public HTemplateControlInstruction<1, 0> { +class HDeoptimize V8_FINAL : public HTemplateControlInstruction<1, 0> { public: - explicit HGoto(HBasicBlock* target) { - SetSuccessorAt(0, target); + static HInstruction* New(Zone* zone, + HValue* context, + const char* reason, + Deoptimizer::BailoutType type, + HBasicBlock* unreachable_continuation) { + return new(zone) HDeoptimize(reason, type, unreachable_continuation); + } + + virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE { + *block = NULL; + return true; } virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { return Representation::None(); } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + const char* reason() const { return reason_; } + Deoptimizer::BailoutType type() { return type_; } - DECLARE_CONCRETE_INSTRUCTION(Goto) + DECLARE_CONCRETE_INSTRUCTION(Deoptimize) + + private: + explicit HDeoptimize(const char* reason, + Deoptimizer::BailoutType type, + HBasicBlock* unreachable_continuation) + : reason_(reason), type_(type) { + SetSuccessorAt(0, unreachable_continuation); + } + + const char* reason_; + Deoptimizer::BailoutType type_; }; @@ -1345,20 +1374,20 @@ class HUnaryControlInstruction : public HTemplateControlInstruction<2, 1> { class HBranch V8_FINAL : public HUnaryControlInstruction { public: - HBranch(HValue* value, - ToBooleanStub::Types expected_input_types = ToBooleanStub::Types(), - HBasicBlock* true_target = NULL, - HBasicBlock* false_target = NULL) - : HUnaryControlInstruction(value, true_target, false_target), - expected_input_types_(expected_input_types) { - SetFlag(kAllowUndefinedAsNaN); - } + DECLARE_INSTRUCTION_FACTORY_P1(HBranch, HValue*); + DECLARE_INSTRUCTION_FACTORY_P2(HBranch, HValue*, + ToBooleanStub::Types); + DECLARE_INSTRUCTION_FACTORY_P4(HBranch, HValue*, + ToBooleanStub::Types, + HBasicBlock*, HBasicBlock*); virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { return Representation::None(); } virtual Representation observed_input_representation(int index) V8_OVERRIDE; + virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE; + ToBooleanStub::Types expected_input_types() const { return expected_input_types_; } @@ -1366,24 +1395,28 @@ class HBranch V8_FINAL : public HUnaryControlInstruction { DECLARE_CONCRETE_INSTRUCTION(Branch) private: + HBranch(HValue* value, + ToBooleanStub::Types expected_input_types = ToBooleanStub::Types(), + HBasicBlock* true_target = NULL, + HBasicBlock* false_target = NULL) + : HUnaryControlInstruction(value, true_target, false_target), + expected_input_types_(expected_input_types) { + SetFlag(kAllowUndefinedAsNaN); + } + ToBooleanStub::Types expected_input_types_; }; class HCompareMap V8_FINAL : public HUnaryControlInstruction { public: - HCompareMap(HValue* value, - Handle<Map> map, - HBasicBlock* true_target = NULL, - HBasicBlock* false_target = NULL) - : HUnaryControlInstruction(value, true_target, false_target), - map_(map) { - ASSERT(!map.is_null()); - } + DECLARE_INSTRUCTION_FACTORY_P2(HCompareMap, HValue*, Handle<Map>); + DECLARE_INSTRUCTION_FACTORY_P4(HCompareMap, HValue*, Handle<Map>, + HBasicBlock*, HBasicBlock*); virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; - Handle<Map> map() const { return map_; } + Unique<Map> map() const { return map_; } virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { return Representation::Tagged(); @@ -1395,7 +1428,16 @@ class HCompareMap V8_FINAL : public HUnaryControlInstruction { virtual int RedefinedOperandIndex() { return 0; } private: - Handle<Map> map_; + HCompareMap(HValue* value, + Handle<Map> map, + HBasicBlock* true_target = NULL, + HBasicBlock* false_target = NULL) + : HUnaryControlInstruction(value, true_target, false_target), + map_(Unique<Map>(map)) { + ASSERT(!map.is_null()); + } + + Unique<Map> map_; }; @@ -1426,18 +1468,8 @@ class HContext V8_FINAL : public HTemplateInstruction<0> { class HReturn V8_FINAL : public HTemplateControlInstruction<0, 3> { public: - static HInstruction* New(Zone* zone, - HValue* context, - HValue* value, - HValue* parameter_count) { - return new(zone) HReturn(value, context, parameter_count); - } - - static HInstruction* New(Zone* zone, - HValue* context, - HValue* value) { - return new(zone) HReturn(value, context, 0); - } + DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HReturn, HValue*, HValue*); + DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HReturn, HValue*); virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { return Representation::Tagged(); @@ -1452,7 +1484,7 @@ class HReturn V8_FINAL : public HTemplateControlInstruction<0, 3> { DECLARE_CONCRETE_INSTRUCTION(Return) private: - HReturn(HValue* value, HValue* context, HValue* parameter_count) { + HReturn(HValue* context, HValue* value, HValue* parameter_count = 0) { SetOperandAt(0, value); SetOperandAt(1, context); SetOperandAt(2, parameter_count); @@ -1460,6 +1492,20 @@ class HReturn V8_FINAL : public HTemplateControlInstruction<0, 3> { }; +class HAbnormalExit V8_FINAL : public HTemplateControlInstruction<0, 0> { + public: + DECLARE_INSTRUCTION_FACTORY_P0(HAbnormalExit); + + virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { + return Representation::None(); + } + + DECLARE_CONCRETE_INSTRUCTION(AbnormalExit) + private: + HAbnormalExit() {} +}; + + class HUnaryOperation : public HTemplateInstruction<1> { public: HUnaryOperation(HValue* value, HType type = HType::Tagged()) @@ -1478,11 +1524,7 @@ class HUnaryOperation : public HTemplateInstruction<1> { class HThrow V8_FINAL : public HTemplateInstruction<2> { public: - static HThrow* New(Zone* zone, - HValue* context, - HValue* value) { - return new(zone) HThrow(context, value); - } + DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HThrow, HValue*); virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { return Representation::Tagged(); @@ -1738,8 +1780,7 @@ class HEnvironmentMarker V8_FINAL : public HTemplateInstruction<1> { public: enum Kind { BIND, LOOKUP }; - HEnvironmentMarker(Kind kind, int index) - : kind_(kind), index_(index), next_simulate_(NULL) { } + DECLARE_INSTRUCTION_FACTORY_P2(HEnvironmentMarker, Kind, int); Kind kind() { return kind_; } int index() { return index_; } @@ -1766,6 +1807,9 @@ class HEnvironmentMarker V8_FINAL : public HTemplateInstruction<1> { DECLARE_CONCRETE_INSTRUCTION(EnvironmentMarker); private: + HEnvironmentMarker(Kind kind, int index) + : kind_(kind), index_(index), next_simulate_(NULL) { } + Kind kind_; int index_; HSimulate* next_simulate_; @@ -1783,7 +1827,7 @@ class HStackCheck V8_FINAL : public HTemplateInstruction<1> { kBackwardsBranch }; - DECLARE_INSTRUCTION_FACTORY_P2(HStackCheck, HValue*, Type); + DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HStackCheck, Type); HValue* context() { return OperandAt(0); } @@ -1898,13 +1942,24 @@ class HEnterInlined V8_FINAL : public HTemplateInstruction<0> { class HLeaveInlined V8_FINAL : public HTemplateInstruction<0> { public: - HLeaveInlined() { } + HLeaveInlined(HEnterInlined* entry, + int drop_count) + : entry_(entry), + drop_count_(drop_count) { } virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { return Representation::None(); } + virtual int argument_delta() const V8_OVERRIDE { + return entry_->arguments_pushed() ? -drop_count_ : 0; + } + DECLARE_CONCRETE_INSTRUCTION(LeaveInlined) + + private: + HEnterInlined* entry_; + int drop_count_; }; @@ -1916,6 +1971,7 @@ class HPushArgument V8_FINAL : public HUnaryOperation { return Representation::Tagged(); } + virtual int argument_delta() const V8_OVERRIDE { return 1; } HValue* argument() { return OperandAt(0); } DECLARE_CONCRETE_INSTRUCTION(PushArgument) @@ -1929,10 +1985,7 @@ class HPushArgument V8_FINAL : public HUnaryOperation { class HThisFunction V8_FINAL : public HTemplateInstruction<0> { public: - HThisFunction() { - set_representation(Representation::Tagged()); - SetFlag(kUseGVN); - } + DECLARE_INSTRUCTION_FACTORY_P0(HThisFunction); virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { return Representation::None(); @@ -1944,6 +1997,11 @@ class HThisFunction V8_FINAL : public HTemplateInstruction<0> { virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; } private: + HThisFunction() { + set_representation(Representation::Tagged()); + SetFlag(kUseGVN); + } + virtual bool IsDeletable() const V8_OVERRIDE { return true; } }; @@ -1973,22 +2031,9 @@ class HOuterContext V8_FINAL : public HUnaryOperation { class HDeclareGlobals V8_FINAL : public HUnaryOperation { public: - HDeclareGlobals(HValue* context, - Handle<FixedArray> pairs, - int flags) - : HUnaryOperation(context), - pairs_(pairs), - flags_(flags) { - set_representation(Representation::Tagged()); - SetAllSideEffects(); - } - - static HDeclareGlobals* New(Zone* zone, - HValue* context, - Handle<FixedArray> pairs, - int flags) { - return new(zone) HDeclareGlobals(context, pairs, flags); - } + DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HDeclareGlobals, + Handle<FixedArray>, + int); HValue* context() { return OperandAt(0); } Handle<FixedArray> pairs() const { return pairs_; } @@ -2001,6 +2046,16 @@ class HDeclareGlobals V8_FINAL : public HUnaryOperation { } private: + HDeclareGlobals(HValue* context, + Handle<FixedArray> pairs, + int flags) + : HUnaryOperation(context), + pairs_(pairs), + flags_(flags) { + set_representation(Representation::Tagged()); + SetAllSideEffects(); + } + Handle<FixedArray> pairs_; int flags_; }; @@ -2008,14 +2063,7 @@ class HDeclareGlobals V8_FINAL : public HUnaryOperation { class HGlobalObject V8_FINAL : public HUnaryOperation { public: - explicit HGlobalObject(HValue* context) : HUnaryOperation(context) { - set_representation(Representation::Tagged()); - SetFlag(kUseGVN); - } - - static HGlobalObject* New(Zone* zone, HValue* context) { - return new(zone) HGlobalObject(context); - } + DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P0(HGlobalObject); DECLARE_CONCRETE_INSTRUCTION(GlobalObject) @@ -2027,6 +2075,11 @@ class HGlobalObject V8_FINAL : public HUnaryOperation { virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; } private: + explicit HGlobalObject(HValue* context) : HUnaryOperation(context) { + set_representation(Representation::Tagged()); + SetFlag(kUseGVN); + } + virtual bool IsDeletable() const V8_OVERRIDE { return true; } }; @@ -2068,7 +2121,13 @@ class HCall : public HTemplateInstruction<V> { return HType::Tagged(); } - virtual int argument_count() const { return argument_count_; } + virtual int argument_count() const { + return argument_count_; + } + + virtual int argument_delta() const V8_OVERRIDE { + return -argument_count(); + } virtual bool IsCall() V8_FINAL V8_OVERRIDE { return true; } @@ -2117,16 +2176,7 @@ class HBinaryCall : public HCall<2> { class HInvokeFunction V8_FINAL : public HBinaryCall { public: - HInvokeFunction(HValue* context, HValue* function, int argument_count) - : HBinaryCall(context, function, argument_count) { - } - - static HInvokeFunction* New(Zone* zone, - HValue* context, - HValue* function, - int argument_count) { - return new(zone) HInvokeFunction(context, function, argument_count); - } + DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HInvokeFunction, HValue*, int); HInvokeFunction(HValue* context, HValue* function, @@ -2155,6 +2205,10 @@ class HInvokeFunction V8_FINAL : public HBinaryCall { DECLARE_CONCRETE_INSTRUCTION(InvokeFunction) private: + HInvokeFunction(HValue* context, HValue* function, int argument_count) + : HBinaryCall(context, function, argument_count) { + } + Handle<JSFunction> known_function_; int formal_parameter_count_; }; @@ -2162,10 +2216,9 @@ class HInvokeFunction V8_FINAL : public HBinaryCall { class HCallConstantFunction V8_FINAL : public HCall<0> { public: - HCallConstantFunction(Handle<JSFunction> function, int argument_count) - : HCall<0>(argument_count), - function_(function), - formal_parameter_count_(function->shared()->formal_parameter_count()) {} + DECLARE_INSTRUCTION_FACTORY_P2(HCallConstantFunction, + Handle<JSFunction>, + int); Handle<JSFunction> function() const { return function_; } int formal_parameter_count() const { return formal_parameter_count_; } @@ -2184,6 +2237,11 @@ class HCallConstantFunction V8_FINAL : public HCall<0> { DECLARE_CONCRETE_INSTRUCTION(CallConstantFunction) private: + HCallConstantFunction(Handle<JSFunction> function, int argument_count) + : HCall<0>(argument_count), + function_(function), + formal_parameter_count_(function->shared()->formal_parameter_count()) {} + Handle<JSFunction> function_; int formal_parameter_count_; }; @@ -2191,22 +2249,23 @@ class HCallConstantFunction V8_FINAL : public HCall<0> { class HCallKeyed V8_FINAL : public HBinaryCall { public: - HCallKeyed(HValue* context, HValue* key, int argument_count) - : HBinaryCall(context, key, argument_count) { - } + DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallKeyed, HValue*, int); HValue* context() { return first(); } HValue* key() { return second(); } DECLARE_CONCRETE_INSTRUCTION(CallKeyed) + + private: + HCallKeyed(HValue* context, HValue* key, int argument_count) + : HBinaryCall(context, key, argument_count) { + } }; class HCallNamed V8_FINAL : public HUnaryCall { public: - HCallNamed(HValue* context, Handle<String> name, int argument_count) - : HUnaryCall(context, argument_count), name_(name) { - } + DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallNamed, Handle<String>, int); virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; @@ -2216,42 +2275,33 @@ class HCallNamed V8_FINAL : public HUnaryCall { DECLARE_CONCRETE_INSTRUCTION(CallNamed) private: + HCallNamed(HValue* context, Handle<String> name, int argument_count) + : HUnaryCall(context, argument_count), name_(name) { + } + Handle<String> name_; }; class HCallFunction V8_FINAL : public HBinaryCall { public: - HCallFunction(HValue* context, HValue* function, int argument_count) - : HBinaryCall(context, function, argument_count) { - } - - static HCallFunction* New(Zone* zone, - HValue* context, - HValue* function, - int argument_count) { - return new(zone) HCallFunction(context, function, argument_count); - } + DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallFunction, HValue*, int); HValue* context() { return first(); } HValue* function() { return second(); } DECLARE_CONCRETE_INSTRUCTION(CallFunction) + + private: + HCallFunction(HValue* context, HValue* function, int argument_count) + : HBinaryCall(context, function, argument_count) { + } }; class HCallGlobal V8_FINAL : public HUnaryCall { public: - HCallGlobal(HValue* context, Handle<String> name, int argument_count) - : HUnaryCall(context, argument_count), name_(name) { - } - - static HCallGlobal* New(Zone* zone, - HValue* context, - Handle<String> name, - int argument_count) { - return new(zone) HCallGlobal(context, name, argument_count); - } + DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallGlobal, Handle<String>, int); virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; @@ -2261,16 +2311,17 @@ class HCallGlobal V8_FINAL : public HUnaryCall { DECLARE_CONCRETE_INSTRUCTION(CallGlobal) private: + HCallGlobal(HValue* context, Handle<String> name, int argument_count) + : HUnaryCall(context, argument_count), name_(name) { + } + Handle<String> name_; }; class HCallKnownGlobal V8_FINAL : public HCall<0> { public: - HCallKnownGlobal(Handle<JSFunction> target, int argument_count) - : HCall<0>(argument_count), - target_(target), - formal_parameter_count_(target->shared()->formal_parameter_count()) { } + DECLARE_INSTRUCTION_FACTORY_P2(HCallKnownGlobal, Handle<JSFunction>, int); virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; @@ -2284,6 +2335,11 @@ class HCallKnownGlobal V8_FINAL : public HCall<0> { DECLARE_CONCRETE_INSTRUCTION(CallKnownGlobal) private: + HCallKnownGlobal(Handle<JSFunction> target, int argument_count) + : HCall<0>(argument_count), + target_(target), + formal_parameter_count_(target->shared()->formal_parameter_count()) { } + Handle<JSFunction> target_; int formal_parameter_count_; }; @@ -2291,23 +2347,26 @@ class HCallKnownGlobal V8_FINAL : public HCall<0> { class HCallNew V8_FINAL : public HBinaryCall { public: - HCallNew(HValue* context, HValue* constructor, int argument_count) - : HBinaryCall(context, constructor, argument_count) {} + DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallNew, HValue*, int); HValue* context() { return first(); } HValue* constructor() { return second(); } DECLARE_CONCRETE_INSTRUCTION(CallNew) + + private: + HCallNew(HValue* context, HValue* constructor, int argument_count) + : HBinaryCall(context, constructor, argument_count) {} }; class HCallNewArray V8_FINAL : public HBinaryCall { public: - HCallNewArray(HValue* context, HValue* constructor, int argument_count, - Handle<Cell> type_cell, ElementsKind elements_kind) - : HBinaryCall(context, constructor, argument_count), - elements_kind_(elements_kind), - type_cell_(type_cell) {} + DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HCallNewArray, + HValue*, + int, + Handle<Cell>, + ElementsKind); HValue* context() { return first(); } HValue* constructor() { return second(); } @@ -2323,6 +2382,12 @@ class HCallNewArray V8_FINAL : public HBinaryCall { DECLARE_CONCRETE_INSTRUCTION(CallNewArray) private: + HCallNewArray(HValue* context, HValue* constructor, int argument_count, + Handle<Cell> type_cell, ElementsKind elements_kind) + : HBinaryCall(context, constructor, argument_count), + elements_kind_(elements_kind), + type_cell_(type_cell) {} + ElementsKind elements_kind_; Handle<Cell> type_cell_; }; @@ -2330,19 +2395,20 @@ class HCallNewArray V8_FINAL : public HBinaryCall { class HCallRuntime V8_FINAL : public HCall<1> { public: - static HCallRuntime* New(Zone* zone, - HValue* context, - Handle<String> name, - const Runtime::Function* c_function, - int argument_count) { - return new(zone) HCallRuntime(context, name, c_function, argument_count); - } + DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HCallRuntime, + Handle<String>, + const Runtime::Function*, + int); virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; HValue* context() { return OperandAt(0); } const Runtime::Function* function() const { return c_function_; } Handle<String> name() const { return name_; } + SaveFPRegsMode save_doubles() const { return save_doubles_; } + void set_save_doubles(SaveFPRegsMode save_doubles) { + save_doubles_ = save_doubles; + } virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { return Representation::Tagged(); @@ -2355,12 +2421,14 @@ class HCallRuntime V8_FINAL : public HCall<1> { Handle<String> name, const Runtime::Function* c_function, int argument_count) - : HCall<1>(argument_count), c_function_(c_function), name_(name) { + : HCall<1>(argument_count), c_function_(c_function), name_(name), + save_doubles_(kDontSaveFPRegs) { SetOperandAt(0, context); } const Runtime::Function* c_function_; Handle<String> name_; + SaveFPRegsMode save_doubles_; }; @@ -2509,6 +2577,40 @@ class HUnaryMathOperation V8_FINAL : public HTemplateInstruction<2> { }; +class HLoadRoot V8_FINAL : public HTemplateInstruction<0> { + public: + DECLARE_INSTRUCTION_FACTORY_P1(HLoadRoot, Heap::RootListIndex); + DECLARE_INSTRUCTION_FACTORY_P2(HLoadRoot, Heap::RootListIndex, HType); + + virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { + return Representation::None(); + } + + Heap::RootListIndex index() const { return index_; } + + DECLARE_CONCRETE_INSTRUCTION(LoadRoot) + + protected: + virtual bool DataEquals(HValue* other) V8_OVERRIDE { + HLoadRoot* b = HLoadRoot::cast(other); + return index_ == b->index_; + } + + private: + HLoadRoot(Heap::RootListIndex index, HType type = HType::Tagged()) + : HTemplateInstruction<0>(type), index_(index) { + SetFlag(kUseGVN); + // TODO(bmeurer): We'll need kDependsOnRoots once we add the + // corresponding HStoreRoot instruction. + SetGVNFlag(kDependsOnCalls); + } + + virtual bool IsDeletable() const V8_OVERRIDE { return true; } + + const Heap::RootListIndex index_; +}; + + class HLoadExternalArrayPointer V8_FINAL : public HUnaryOperation { public: DECLARE_INSTRUCTION_FACTORY_P1(HLoadExternalArrayPointer, HValue*); @@ -2553,7 +2655,6 @@ class HCheckMaps V8_FINAL : public HTemplateInstruction<2> { for (int i = 0; i < maps->length(); i++) { check_map->Add(maps->at(i), zone); } - check_map->map_set_.Sort(); return check_map; } @@ -2568,38 +2669,26 @@ class HCheckMaps V8_FINAL : public HTemplateInstruction<2> { virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; HValue* value() { return OperandAt(0); } - SmallMapList* map_set() { return &map_set_; } - ZoneList<UniqueValueId>* map_unique_ids() { return &map_unique_ids_; } - bool has_migration_target() { + Unique<Map> first_map() const { return map_set_.at(0); } + UniqueSet<Map> map_set() const { return map_set_; } + + bool has_migration_target() const { return has_migration_target_; } - virtual void FinalizeUniqueValueId() V8_OVERRIDE; - DECLARE_CONCRETE_INSTRUCTION(CheckMaps) protected: virtual bool DataEquals(HValue* other) V8_OVERRIDE { - ASSERT_EQ(map_set_.length(), map_unique_ids_.length()); - HCheckMaps* b = HCheckMaps::cast(other); - // Relies on the fact that map_set has been sorted before. - if (map_unique_ids_.length() != b->map_unique_ids_.length()) { - return false; - } - for (int i = 0; i < map_unique_ids_.length(); i++) { - if (map_unique_ids_.at(i) != b->map_unique_ids_.at(i)) { - return false; - } - } - return true; + return this->map_set_.Equals(&HCheckMaps::cast(other)->map_set_); } virtual int RedefinedOperandIndex() { return 0; } private: void Add(Handle<Map> map, Zone* zone) { - map_set_.Add(map, zone); + map_set_.Add(Unique<Map>(map), zone); if (!has_migration_target_ && map->is_migration_target()) { has_migration_target_ = true; SetGVNFlag(kChangesNewSpacePromotion); @@ -2609,10 +2698,9 @@ class HCheckMaps V8_FINAL : public HTemplateInstruction<2> { // Clients should use one of the static New* methods above. HCheckMaps(HValue* value, Zone *zone, HValue* typecheck) : HTemplateInstruction<2>(value->type()), - omit_(false), has_migration_target_(false), map_unique_ids_(0, zone) { + omit_(false), has_migration_target_(false) { SetOperandAt(0, value); // Use the object value for the dependency if NULL is passed. - // TODO(titzer): do GVN flags already express this dependency? SetOperandAt(1, typecheck != NULL ? typecheck : value); set_representation(Representation::Tagged()); SetFlag(kUseGVN); @@ -2621,36 +2709,33 @@ class HCheckMaps V8_FINAL : public HTemplateInstruction<2> { SetGVNFlag(kDependsOnElementsKind); } - void omit(CompilationInfo* info) { - omit_ = true; - for (int i = 0; i < map_set_.length(); i++) { - Handle<Map> map = map_set_.at(i); - if (!map->CanTransition()) continue; - map->AddDependentCompilationInfo(DependentCode::kPrototypeCheckGroup, - info); - } - } - bool omit_; bool has_migration_target_; - SmallMapList map_set_; - ZoneList<UniqueValueId> map_unique_ids_; + UniqueSet<Map> map_set_; }; class HCheckValue V8_FINAL : public HUnaryOperation { public: static HCheckValue* New(Zone* zone, HValue* context, - HValue* value, Handle<JSFunction> target) { - bool in_new_space = zone->isolate()->heap()->InNewSpace(*target); + HValue* value, Handle<JSFunction> func) { + bool in_new_space = zone->isolate()->heap()->InNewSpace(*func); + // NOTE: We create an uninitialized Unique and initialize it later. + // This is because a JSFunction can move due to GC during graph creation. + // TODO(titzer): This is a migration crutch. Replace with some kind of + // Uniqueness scope later. + Unique<JSFunction> target = Unique<JSFunction>::CreateUninitialized(func); HCheckValue* check = new(zone) HCheckValue(value, target, in_new_space); return check; } static HCheckValue* New(Zone* zone, HValue* context, - HValue* value, Handle<Map> map, UniqueValueId id) { - HCheckValue* check = new(zone) HCheckValue(value, map, false); - check->object_unique_id_ = id; - return check; + HValue* value, Unique<HeapObject> target, + bool object_in_new_space) { + return new(zone) HCheckValue(value, target, object_in_new_space); + } + + virtual void FinalizeUniqueness() V8_OVERRIDE { + object_ = Unique<HeapObject>(object_.handle()); } virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { @@ -2664,11 +2749,7 @@ class HCheckValue V8_FINAL : public HUnaryOperation { virtual void Verify() V8_OVERRIDE; #endif - virtual void FinalizeUniqueValueId() V8_OVERRIDE { - object_unique_id_ = UniqueValueId(object_); - } - - Handle<HeapObject> object() const { return object_; } + Unique<HeapObject> object() const { return object_; } bool object_in_new_space() const { return object_in_new_space_; } DECLARE_CONCRETE_INSTRUCTION(CheckValue) @@ -2676,38 +2757,35 @@ class HCheckValue V8_FINAL : public HUnaryOperation { protected: virtual bool DataEquals(HValue* other) V8_OVERRIDE { HCheckValue* b = HCheckValue::cast(other); - return object_unique_id_ == b->object_unique_id_; + return object_ == b->object_; } private: - HCheckValue(HValue* value, Handle<HeapObject> object, bool in_new_space) + HCheckValue(HValue* value, Unique<HeapObject> object, + bool object_in_new_space) : HUnaryOperation(value, value->type()), - object_(object), object_in_new_space_(in_new_space) { + object_(object), + object_in_new_space_(object_in_new_space) { set_representation(Representation::Tagged()); SetFlag(kUseGVN); } - Handle<HeapObject> object_; - UniqueValueId object_unique_id_; + Unique<HeapObject> object_; bool object_in_new_space_; }; class HCheckInstanceType V8_FINAL : public HUnaryOperation { public: - static HCheckInstanceType* NewIsSpecObject(HValue* value, Zone* zone) { - return new(zone) HCheckInstanceType(value, IS_SPEC_OBJECT); - } - static HCheckInstanceType* NewIsJSArray(HValue* value, Zone* zone) { - return new(zone) HCheckInstanceType(value, IS_JS_ARRAY); - } - static HCheckInstanceType* NewIsString(HValue* value, Zone* zone) { - return new(zone) HCheckInstanceType(value, IS_STRING); - } - static HCheckInstanceType* NewIsInternalizedString( - HValue* value, Zone* zone) { - return new(zone) HCheckInstanceType(value, IS_INTERNALIZED_STRING); - } + enum Check { + IS_SPEC_OBJECT, + IS_JS_ARRAY, + IS_STRING, + IS_INTERNALIZED_STRING, + LAST_INTERVAL_CHECK = IS_JS_ARRAY + }; + + DECLARE_INSTRUCTION_FACTORY_P2(HCheckInstanceType, HValue*, Check); virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; @@ -2735,14 +2813,6 @@ class HCheckInstanceType V8_FINAL : public HUnaryOperation { virtual int RedefinedOperandIndex() { return 0; } private: - enum Check { - IS_SPEC_OBJECT, - IS_JS_ARRAY, - IS_STRING, - IS_INTERNALIZED_STRING, - LAST_INTERVAL_CHECK = IS_JS_ARRAY - }; - const char* GetCheckName(); HCheckInstanceType(HValue* value, Check check) @@ -2784,21 +2854,6 @@ class HCheckSmi V8_FINAL : public HUnaryOperation { }; -class HIsNumberAndBranch V8_FINAL : public HUnaryControlInstruction { - public: - explicit HIsNumberAndBranch(HValue* value) - : HUnaryControlInstruction(value, NULL, NULL) { - SetFlag(kFlexibleRepresentation); - } - - virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { - return Representation::None(); - } - - DECLARE_CONCRETE_INSTRUCTION(IsNumberAndBranch) -}; - - class HCheckHeapObject V8_FINAL : public HUnaryOperation { public: DECLARE_INSTRUCTION_FACTORY_P1(HCheckHeapObject, HValue*); @@ -3090,6 +3145,8 @@ class HPhi V8_FINAL : public HValue { bool IsReceiver() const { return merged_index_ == 0; } bool HasMergedIndex() const { return merged_index_ != kInvalidMergedIndex; } + virtual int position() const V8_OVERRIDE; + int merged_index() const { return merged_index_; } InductionVariableData* induction_variable_data() { @@ -3260,6 +3317,8 @@ class HCapturedObject V8_FINAL : public HDematerializedObject { // Replay effects of this instruction on the given environment. void ReplayEnvironment(HEnvironment* env); + virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; + DECLARE_CONCRETE_INSTRUCTION(CapturedObject) private: @@ -3273,7 +3332,6 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> { DECLARE_INSTRUCTION_FACTORY_P2(HConstant, int32_t, Representation); DECLARE_INSTRUCTION_FACTORY_P1(HConstant, double); DECLARE_INSTRUCTION_FACTORY_P1(HConstant, Handle<Object>); - DECLARE_INSTRUCTION_FACTORY_P2(HConstant, Handle<Map>, UniqueValueId); DECLARE_INSTRUCTION_FACTORY_P1(HConstant, ExternalReference); static HConstant* CreateAndInsertAfter(Zone* zone, @@ -3298,16 +3356,27 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> { return new_constant; } + static HConstant* CreateAndInsertBefore(Zone* zone, + Unique<Object> unique, + bool is_not_in_new_space, + HInstruction* instruction) { + HConstant* new_constant = new(zone) HConstant(unique, + Representation::Tagged(), HType::Tagged(), false, is_not_in_new_space, + false, false); + new_constant->InsertBefore(instruction); + return new_constant; + } + Handle<Object> handle(Isolate* isolate) { - if (handle_.is_null()) { - Factory* factory = isolate->factory(); + if (object_.handle().is_null()) { // Default arguments to is_not_in_new_space depend on this heap number - // to be tenured so that it's guaranteed not be be located in new space. - handle_ = factory->NewNumber(double_value_, TENURED); + // to be tenured so that it's guaranteed not to be located in new space. + object_ = Unique<Object>::CreateUninitialized( + isolate->factory()->NewNumber(double_value_, TENURED)); } AllowDeferredHandleDereference smi_check; - ASSERT(has_int32_value_ || !handle_->IsSmi()); - return handle_; + ASSERT(has_int32_value_ || !object_.handle()->IsSmi()); + return object_.handle(); } bool HasMap(Handle<Map> map) { @@ -3341,16 +3410,18 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> { return false; } - ASSERT(!handle_.is_null()); + ASSERT(!object_.handle().is_null()); Heap* heap = isolate()->heap(); - ASSERT(unique_id_ != UniqueValueId::minus_zero_value(heap)); - ASSERT(unique_id_ != UniqueValueId::nan_value(heap)); - return unique_id_ == UniqueValueId::undefined_value(heap) || - unique_id_ == UniqueValueId::null_value(heap) || - unique_id_ == UniqueValueId::true_value(heap) || - unique_id_ == UniqueValueId::false_value(heap) || - unique_id_ == UniqueValueId::the_hole_value(heap) || - unique_id_ == UniqueValueId::empty_string(heap); + ASSERT(!object_.IsKnownGlobal(heap->minus_zero_value())); + ASSERT(!object_.IsKnownGlobal(heap->nan_value())); + return + object_.IsKnownGlobal(heap->undefined_value()) || + object_.IsKnownGlobal(heap->null_value()) || + object_.IsKnownGlobal(heap->true_value()) || + object_.IsKnownGlobal(heap->false_value()) || + object_.IsKnownGlobal(heap->the_hole_value()) || + object_.IsKnownGlobal(heap->empty_string()) || + object_.IsKnownGlobal(heap->empty_fixed_array()); } bool IsCell() const { @@ -3389,11 +3460,7 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> { if (HasDoubleValue() && FixedDoubleArray::is_the_hole_nan(double_value_)) { return true; } - Heap* heap = isolate()->heap(); - if (!handle_.is_null() && *handle_ == heap->the_hole_value()) { - return true; - } - return false; + return object_.IsKnownGlobal(isolate()->heap()->the_hole_value()); } bool HasNumberValue() const { return has_double_value_; } int32_t NumberValueAsInteger32() const { @@ -3405,12 +3472,12 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> { } bool HasStringValue() const { if (has_double_value_ || has_int32_value_) return false; - ASSERT(!handle_.is_null()); + ASSERT(!object_.handle().is_null()); return type_.IsString(); } Handle<String> StringValue() const { ASSERT(HasStringValue()); - return Handle<String>::cast(handle_); + return Handle<String>::cast(object_.handle()); } bool HasInternalizedStringValue() const { return HasStringValue() && is_internalized_string_; @@ -3434,21 +3501,20 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> { } else if (has_external_reference_value_) { return reinterpret_cast<intptr_t>(external_reference_value_.address()); } else { - ASSERT(!handle_.is_null()); - return unique_id_.Hashcode(); + ASSERT(!object_.handle().is_null()); + return object_.Hashcode(); } } - virtual void FinalizeUniqueValueId() V8_OVERRIDE { + virtual void FinalizeUniqueness() V8_OVERRIDE { if (!has_double_value_ && !has_external_reference_value_) { - ASSERT(!handle_.is_null()); - unique_id_ = UniqueValueId(handle_); + ASSERT(!object_.handle().is_null()); + object_ = Unique<Object>(object_.handle()); } } - bool UniqueValueIdsMatch(UniqueValueId other) { - return !has_double_value_ && !has_external_reference_value_ && - unique_id_ == other; + Unique<Object> GetUnique() const { + return object_; } #ifdef DEBUG @@ -3474,9 +3540,13 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> { external_reference_value_ == other_constant->external_reference_value_; } else { - ASSERT(!handle_.is_null()); - return !other_constant->handle_.is_null() && - unique_id_ == other_constant->unique_id_; + if (other_constant->has_int32_value_ || + other_constant->has_double_value_ || + other_constant->has_external_reference_value_) { + return false; + } + ASSERT(!object_.handle().is_null()); + return other_constant->object_ == object_; } } @@ -3486,33 +3556,30 @@ class HConstant V8_FINAL : public HTemplateInstruction<0> { HConstant(int32_t value, Representation r = Representation::None(), bool is_not_in_new_space = true, - Handle<Object> optional_handle = Handle<Object>::null()); + Unique<Object> optional = Unique<Object>(Handle<Object>::null())); HConstant(double value, Representation r = Representation::None(), bool is_not_in_new_space = true, - Handle<Object> optional_handle = Handle<Object>::null()); - HConstant(Handle<Object> handle, - UniqueValueId unique_id, + Unique<Object> optional = Unique<Object>(Handle<Object>::null())); + HConstant(Unique<Object> unique, Representation r, HType type, bool is_internalized_string, bool is_not_in_new_space, bool is_cell, bool boolean_value); - HConstant(Handle<Map> handle, - UniqueValueId unique_id); + explicit HConstant(ExternalReference reference); void Initialize(Representation r); virtual bool IsDeletable() const V8_OVERRIDE { return true; } - // If this is a numerical constant, handle_ either points to to the + // If this is a numerical constant, object_ either points to the // HeapObject the constant originated from or is null. If the - // constant is non-numeric, handle_ always points to a valid + // constant is non-numeric, object_ always points to a valid // constant HeapObject. - Handle<Object> handle_; - UniqueValueId unique_id_; + Unique<Object> object_; // We store the HConstant in the most specific form safely possible. // The two flags, has_int32_value_ and has_double_value_ tell us if @@ -3649,17 +3716,8 @@ class HWrapReceiver V8_FINAL : public HTemplateInstruction<2> { class HApplyArguments V8_FINAL : public HTemplateInstruction<4> { public: - HApplyArguments(HValue* function, - HValue* receiver, - HValue* length, - HValue* elements) { - set_representation(Representation::Tagged()); - SetOperandAt(0, function); - SetOperandAt(1, receiver); - SetOperandAt(2, length); - SetOperandAt(3, elements); - SetAllSideEffects(); - } + DECLARE_INSTRUCTION_FACTORY_P4(HApplyArguments, HValue*, HValue*, HValue*, + HValue*); virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { // The length is untagged, all other inputs are tagged. @@ -3674,6 +3732,19 @@ class HApplyArguments V8_FINAL : public HTemplateInstruction<4> { HValue* elements() { return OperandAt(3); } DECLARE_CONCRETE_INSTRUCTION(ApplyArguments) + + private: + HApplyArguments(HValue* function, + HValue* receiver, + HValue* length, + HValue* elements) { + set_representation(Representation::Tagged()); + SetOperandAt(0, function); + SetOperandAt(1, receiver); + SetOperandAt(2, length); + SetOperandAt(3, elements); + SetAllSideEffects(); + } }; @@ -3731,13 +3802,7 @@ class HArgumentsLength V8_FINAL : public HUnaryOperation { class HAccessArgumentsAt V8_FINAL : public HTemplateInstruction<3> { public: - HAccessArgumentsAt(HValue* arguments, HValue* length, HValue* index) { - set_representation(Representation::Tagged()); - SetFlag(kUseGVN); - SetOperandAt(0, arguments); - SetOperandAt(1, length); - SetOperandAt(2, index); - } + DECLARE_INSTRUCTION_FACTORY_P3(HAccessArgumentsAt, HValue*, HValue*, HValue*); virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; @@ -3754,6 +3819,15 @@ class HAccessArgumentsAt V8_FINAL : public HTemplateInstruction<3> { DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt) + private: + HAccessArgumentsAt(HValue* arguments, HValue* length, HValue* index) { + set_representation(Representation::Tagged()); + SetFlag(kUseGVN); + SetOperandAt(0, arguments); + SetOperandAt(1, length); + SetOperandAt(2, index); + } + virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; } }; @@ -3882,13 +3956,14 @@ class HBitwiseBinaryOperation : public HBinaryOperation { } virtual void RepresentationChanged(Representation to) V8_OVERRIDE { - if (!to.IsTagged()) { - ASSERT(to.IsSmiOrInteger32()); - ClearAllSideEffects(); - SetFlag(kUseGVN); - } else { + if (to.IsTagged()) SetGVNFlag(kChangesNewSpacePromotion); + if (to.IsTagged() && + (left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved())) { SetAllSideEffects(); ClearFlag(kUseGVN); + } else { + ClearAllSideEffects(); + SetFlag(kUseGVN); } } @@ -3920,12 +3995,9 @@ class HBitwiseBinaryOperation : public HBinaryOperation { class HMathFloorOfDiv V8_FINAL : public HBinaryOperation { public: - static HMathFloorOfDiv* New(Zone* zone, - HValue* context, - HValue* left, - HValue* right) { - return new(zone) HMathFloorOfDiv(context, left, right); - } + DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HMathFloorOfDiv, + HValue*, + HValue*); virtual HValue* EnsureAndPropagateNotMinusZero( BitVector* visited) V8_OVERRIDE; @@ -3961,7 +4033,9 @@ class HArithmeticBinaryOperation : public HBinaryOperation { } virtual void RepresentationChanged(Representation to) V8_OVERRIDE { - if (to.IsTagged()) { + if (to.IsTagged()) SetGVNFlag(kChangesNewSpacePromotion); + if (to.IsTagged() && + (left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved())) { SetAllSideEffects(); ClearFlag(kUseGVN); } else { @@ -3971,7 +4045,6 @@ class HArithmeticBinaryOperation : public HBinaryOperation { } DECLARE_ABSTRACT_INSTRUCTION(ArithmeticBinaryOperation) - private: virtual bool IsDeletable() const V8_OVERRIDE { return true; } }; @@ -3979,16 +4052,8 @@ class HArithmeticBinaryOperation : public HBinaryOperation { class HCompareGeneric V8_FINAL : public HBinaryOperation { public: - HCompareGeneric(HValue* context, - HValue* left, - HValue* right, - Token::Value token) - : HBinaryOperation(context, left, right, HType::Boolean()), - token_(token) { - ASSERT(Token::IsCompareOp(token)); - set_representation(Representation::Tagged()); - SetAllSideEffects(); - } + DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HCompareGeneric, HValue*, + HValue*, Token::Value); virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { return index == 0 @@ -4002,19 +4067,28 @@ class HCompareGeneric V8_FINAL : public HBinaryOperation { DECLARE_CONCRETE_INSTRUCTION(CompareGeneric) private: + HCompareGeneric(HValue* context, + HValue* left, + HValue* right, + Token::Value token) + : HBinaryOperation(context, left, right, HType::Boolean()), + token_(token) { + ASSERT(Token::IsCompareOp(token)); + set_representation(Representation::Tagged()); + SetAllSideEffects(); + } + Token::Value token_; }; class HCompareNumericAndBranch : public HTemplateControlInstruction<2, 2> { public: - HCompareNumericAndBranch(HValue* left, HValue* right, Token::Value token) - : token_(token) { - SetFlag(kFlexibleRepresentation); - ASSERT(Token::IsCompareOp(token)); - SetOperandAt(0, left); - SetOperandAt(1, right); - } + DECLARE_INSTRUCTION_FACTORY_P3(HCompareNumericAndBranch, + HValue*, HValue*, Token::Value); + DECLARE_INSTRUCTION_FACTORY_P5(HCompareNumericAndBranch, + HValue*, HValue*, Token::Value, + HBasicBlock*, HBasicBlock*); HValue* left() { return OperandAt(0); } HValue* right() { return OperandAt(1); } @@ -4040,25 +4114,30 @@ class HCompareNumericAndBranch : public HTemplateControlInstruction<2, 2> { DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch) private: + HCompareNumericAndBranch(HValue* left, + HValue* right, + Token::Value token, + HBasicBlock* true_target = NULL, + HBasicBlock* false_target = NULL) + : token_(token) { + SetFlag(kFlexibleRepresentation); + ASSERT(Token::IsCompareOp(token)); + SetOperandAt(0, left); + SetOperandAt(1, right); + SetSuccessorAt(0, true_target); + SetSuccessorAt(1, false_target); + } + Representation observed_input_representation_[2]; Token::Value token_; }; -class HCompareHoleAndBranch V8_FINAL - : public HTemplateControlInstruction<2, 1> { +class HCompareHoleAndBranch V8_FINAL : public HUnaryControlInstruction { public: - // TODO(danno): make this private when the IfBuilder properly constructs - // control flow instructions. - explicit HCompareHoleAndBranch(HValue* object) { - SetFlag(kFlexibleRepresentation); - SetFlag(kAllowUndefinedAsNaN); - SetOperandAt(0, object); - } - DECLARE_INSTRUCTION_FACTORY_P1(HCompareHoleAndBranch, HValue*); - - HValue* object() { return OperandAt(0); } + DECLARE_INSTRUCTION_FACTORY_P3(HCompareHoleAndBranch, HValue*, + HBasicBlock*, HBasicBlock*); virtual void InferRepresentation( HInferRepresentationPhase* h_infer) V8_OVERRIDE; @@ -4067,23 +4146,44 @@ class HCompareHoleAndBranch V8_FINAL return representation(); } - virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; - DECLARE_CONCRETE_INSTRUCTION(CompareHoleAndBranch) + + private: + HCompareHoleAndBranch(HValue* value, + HBasicBlock* true_target = NULL, + HBasicBlock* false_target = NULL) + : HUnaryControlInstruction(value, true_target, false_target) { + SetFlag(kFlexibleRepresentation); + SetFlag(kAllowUndefinedAsNaN); + } }; class HCompareObjectEqAndBranch : public HTemplateControlInstruction<2, 2> { public: - // TODO(danno): make this private when the IfBuilder properly constructs - // control flow instructions. HCompareObjectEqAndBranch(HValue* left, - HValue* right) { + HValue* right, + HBasicBlock* true_target = NULL, + HBasicBlock* false_target = NULL) { + // TODO(danno): make this private when the IfBuilder properly constructs + // control flow instructions. + ASSERT(!left->IsConstant() || + (!HConstant::cast(left)->HasInteger32Value() || + HConstant::cast(left)->HasSmiValue())); + ASSERT(!right->IsConstant() || + (!HConstant::cast(right)->HasInteger32Value() || + HConstant::cast(right)->HasSmiValue())); SetOperandAt(0, left); SetOperandAt(1, right); + SetSuccessorAt(0, true_target); + SetSuccessorAt(1, false_target); } DECLARE_INSTRUCTION_FACTORY_P2(HCompareObjectEqAndBranch, HValue*, HValue*); + DECLARE_INSTRUCTION_FACTORY_P4(HCompareObjectEqAndBranch, HValue*, HValue*, + HBasicBlock*, HBasicBlock*); + + virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE; HValue* left() { return OperandAt(0); } HValue* right() { return OperandAt(1); } @@ -4104,33 +4204,49 @@ class HCompareObjectEqAndBranch : public HTemplateControlInstruction<2, 2> { class HIsObjectAndBranch V8_FINAL : public HUnaryControlInstruction { public: - explicit HIsObjectAndBranch(HValue* value) - : HUnaryControlInstruction(value, NULL, NULL) { } + DECLARE_INSTRUCTION_FACTORY_P1(HIsObjectAndBranch, HValue*); + DECLARE_INSTRUCTION_FACTORY_P3(HIsObjectAndBranch, HValue*, + HBasicBlock*, HBasicBlock*); virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { return Representation::Tagged(); } DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch) + + private: + HIsObjectAndBranch(HValue* value, + HBasicBlock* true_target = NULL, + HBasicBlock* false_target = NULL) + : HUnaryControlInstruction(value, true_target, false_target) {} }; + class HIsStringAndBranch V8_FINAL : public HUnaryControlInstruction { public: - explicit HIsStringAndBranch(HValue* value) - : HUnaryControlInstruction(value, NULL, NULL) { } + DECLARE_INSTRUCTION_FACTORY_P1(HIsStringAndBranch, HValue*); + DECLARE_INSTRUCTION_FACTORY_P3(HIsStringAndBranch, HValue*, + HBasicBlock*, HBasicBlock*); virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { return Representation::Tagged(); } DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch) + + private: + HIsStringAndBranch(HValue* value, + HBasicBlock* true_target = NULL, + HBasicBlock* false_target = NULL) + : HUnaryControlInstruction(value, true_target, false_target) {} }; class HIsSmiAndBranch V8_FINAL : public HUnaryControlInstruction { public: - explicit HIsSmiAndBranch(HValue* value) - : HUnaryControlInstruction(value, NULL, NULL) { } + DECLARE_INSTRUCTION_FACTORY_P1(HIsSmiAndBranch, HValue*); + DECLARE_INSTRUCTION_FACTORY_P3(HIsSmiAndBranch, HValue*, + HBasicBlock*, HBasicBlock*); DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch) @@ -4140,36 +4256,41 @@ class HIsSmiAndBranch V8_FINAL : public HUnaryControlInstruction { protected: virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; } + + private: + HIsSmiAndBranch(HValue* value, + HBasicBlock* true_target = NULL, + HBasicBlock* false_target = NULL) + : HUnaryControlInstruction(value, true_target, false_target) {} }; class HIsUndetectableAndBranch V8_FINAL : public HUnaryControlInstruction { public: - explicit HIsUndetectableAndBranch(HValue* value) - : HUnaryControlInstruction(value, NULL, NULL) { } + DECLARE_INSTRUCTION_FACTORY_P1(HIsUndetectableAndBranch, HValue*); + DECLARE_INSTRUCTION_FACTORY_P3(HIsUndetectableAndBranch, HValue*, + HBasicBlock*, HBasicBlock*); virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { return Representation::Tagged(); } DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch) + + private: + HIsUndetectableAndBranch(HValue* value, + HBasicBlock* true_target = NULL, + HBasicBlock* false_target = NULL) + : HUnaryControlInstruction(value, true_target, false_target) {} }; class HStringCompareAndBranch : public HTemplateControlInstruction<2, 3> { public: - HStringCompareAndBranch(HValue* context, - HValue* left, - HValue* right, - Token::Value token) - : token_(token) { - ASSERT(Token::IsCompareOp(token)); - SetOperandAt(0, context); - SetOperandAt(1, left); - SetOperandAt(2, right); - set_representation(Representation::Tagged()); - SetGVNFlag(kChangesNewSpacePromotion); - } + DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HStringCompareAndBranch, + HValue*, + HValue*, + Token::Value); HValue* context() { return OperandAt(0); } HValue* left() { return OperandAt(1); } @@ -4189,28 +4310,43 @@ class HStringCompareAndBranch : public HTemplateControlInstruction<2, 3> { DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch) private: + HStringCompareAndBranch(HValue* context, + HValue* left, + HValue* right, + Token::Value token) + : token_(token) { + ASSERT(Token::IsCompareOp(token)); + SetOperandAt(0, context); + SetOperandAt(1, left); + SetOperandAt(2, right); + set_representation(Representation::Tagged()); + SetGVNFlag(kChangesNewSpacePromotion); + } + Token::Value token_; }; class HIsConstructCallAndBranch : public HTemplateControlInstruction<2, 0> { public: + DECLARE_INSTRUCTION_FACTORY_P0(HIsConstructCallAndBranch); + virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { return Representation::None(); } DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch) + private: + HIsConstructCallAndBranch() {} }; class HHasInstanceTypeAndBranch V8_FINAL : public HUnaryControlInstruction { public: - HHasInstanceTypeAndBranch(HValue* value, InstanceType type) - : HUnaryControlInstruction(value, NULL, NULL), from_(type), to_(type) { } - HHasInstanceTypeAndBranch(HValue* value, InstanceType from, InstanceType to) - : HUnaryControlInstruction(value, NULL, NULL), from_(from), to_(to) { - ASSERT(to == LAST_TYPE); // Others not implemented yet in backend. - } + DECLARE_INSTRUCTION_FACTORY_P2( + HHasInstanceTypeAndBranch, HValue*, InstanceType); + DECLARE_INSTRUCTION_FACTORY_P3( + HHasInstanceTypeAndBranch, HValue*, InstanceType, InstanceType); InstanceType from() { return from_; } InstanceType to() { return to_; } @@ -4224,6 +4360,13 @@ class HHasInstanceTypeAndBranch V8_FINAL : public HUnaryControlInstruction { DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch) private: + HHasInstanceTypeAndBranch(HValue* value, InstanceType type) + : HUnaryControlInstruction(value, NULL, NULL), from_(type), to_(type) { } + HHasInstanceTypeAndBranch(HValue* value, InstanceType from, InstanceType to) + : HUnaryControlInstruction(value, NULL, NULL), from_(from), to_(to) { + ASSERT(to == LAST_TYPE); // Others not implemented yet in backend. + } + InstanceType from_; InstanceType to_; // Inclusive range, not all combinations work. }; @@ -4231,23 +4374,22 @@ class HHasInstanceTypeAndBranch V8_FINAL : public HUnaryControlInstruction { class HHasCachedArrayIndexAndBranch V8_FINAL : public HUnaryControlInstruction { public: - explicit HHasCachedArrayIndexAndBranch(HValue* value) - : HUnaryControlInstruction(value, NULL, NULL) { } + DECLARE_INSTRUCTION_FACTORY_P1(HHasCachedArrayIndexAndBranch, HValue*); virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { return Representation::Tagged(); } DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch) + private: + explicit HHasCachedArrayIndexAndBranch(HValue* value) + : HUnaryControlInstruction(value, NULL, NULL) { } }; class HGetCachedArrayIndex V8_FINAL : public HUnaryOperation { public: - explicit HGetCachedArrayIndex(HValue* value) : HUnaryOperation(value) { - set_representation(Representation::Tagged()); - SetFlag(kUseGVN); - } + DECLARE_INSTRUCTION_FACTORY_P1(HGetCachedArrayIndex, HValue*); virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { return Representation::Tagged(); @@ -4259,15 +4401,19 @@ class HGetCachedArrayIndex V8_FINAL : public HUnaryOperation { virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; } private: + explicit HGetCachedArrayIndex(HValue* value) : HUnaryOperation(value) { + set_representation(Representation::Tagged()); + SetFlag(kUseGVN); + } + virtual bool IsDeletable() const V8_OVERRIDE { return true; } }; class HClassOfTestAndBranch V8_FINAL : public HUnaryControlInstruction { public: - HClassOfTestAndBranch(HValue* value, Handle<String> class_name) - : HUnaryControlInstruction(value, NULL, NULL), - class_name_(class_name) { } + DECLARE_INSTRUCTION_FACTORY_P2(HClassOfTestAndBranch, HValue*, + Handle<String>); DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch) @@ -4280,15 +4426,17 @@ class HClassOfTestAndBranch V8_FINAL : public HUnaryControlInstruction { Handle<String> class_name() const { return class_name_; } private: + HClassOfTestAndBranch(HValue* value, Handle<String> class_name) + : HUnaryControlInstruction(value, NULL, NULL), + class_name_(class_name) { } + Handle<String> class_name_; }; class HTypeofIsAndBranch V8_FINAL : public HUnaryControlInstruction { public: - HTypeofIsAndBranch(HValue* value, Handle<String> type_literal) - : HUnaryControlInstruction(value, NULL, NULL), - type_literal_(type_literal) { } + DECLARE_INSTRUCTION_FACTORY_P2(HTypeofIsAndBranch, HValue*, Handle<String>); Handle<String> type_literal() { return type_literal_; } virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; @@ -4300,17 +4448,17 @@ class HTypeofIsAndBranch V8_FINAL : public HUnaryControlInstruction { } private: + HTypeofIsAndBranch(HValue* value, Handle<String> type_literal) + : HUnaryControlInstruction(value, NULL, NULL), + type_literal_(type_literal) { } + Handle<String> type_literal_; }; class HInstanceOf V8_FINAL : public HBinaryOperation { public: - HInstanceOf(HValue* context, HValue* left, HValue* right) - : HBinaryOperation(context, left, right, HType::Boolean()) { - set_representation(Representation::Tagged()); - SetAllSideEffects(); - } + DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HInstanceOf, HValue*, HValue*); virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { return Representation::Tagged(); @@ -4319,20 +4467,21 @@ class HInstanceOf V8_FINAL : public HBinaryOperation { virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; DECLARE_CONCRETE_INSTRUCTION(InstanceOf) + + private: + HInstanceOf(HValue* context, HValue* left, HValue* right) + : HBinaryOperation(context, left, right, HType::Boolean()) { + set_representation(Representation::Tagged()); + SetAllSideEffects(); + } }; class HInstanceOfKnownGlobal V8_FINAL : public HTemplateInstruction<2> { public: - HInstanceOfKnownGlobal(HValue* context, - HValue* left, - Handle<JSFunction> right) - : HTemplateInstruction<2>(HType::Boolean()), function_(right) { - SetOperandAt(0, context); - SetOperandAt(1, left); - set_representation(Representation::Tagged()); - SetAllSideEffects(); - } + DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HInstanceOfKnownGlobal, + HValue*, + Handle<JSFunction>); HValue* context() { return OperandAt(0); } HValue* left() { return OperandAt(1); } @@ -4345,27 +4494,17 @@ class HInstanceOfKnownGlobal V8_FINAL : public HTemplateInstruction<2> { DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal) private: - Handle<JSFunction> function_; -}; - - -// TODO(mstarzinger): This instruction should be modeled as a load of the map -// field followed by a load of the instance size field once HLoadNamedField is -// flexible enough to accommodate byte-field loads. -class HInstanceSize V8_FINAL : public HTemplateInstruction<1> { - public: - explicit HInstanceSize(HValue* object) { - SetOperandAt(0, object); - set_representation(Representation::Integer32()); - } - - HValue* object() { return OperandAt(0); } - - virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { - return Representation::Tagged(); + HInstanceOfKnownGlobal(HValue* context, + HValue* left, + Handle<JSFunction> right) + : HTemplateInstruction<2>(HType::Boolean()), function_(right) { + SetOperandAt(0, context); + SetOperandAt(1, left); + set_representation(Representation::Tagged()); + SetAllSideEffects(); } - DECLARE_CONCRETE_INSTRUCTION(InstanceSize) + Handle<JSFunction> function_; }; @@ -4410,10 +4549,7 @@ class HPower V8_FINAL : public HTemplateInstruction<2> { class HRandom V8_FINAL : public HTemplateInstruction<1> { public: - explicit HRandom(HValue* global_object) { - SetOperandAt(0, global_object); - set_representation(Representation::Double()); - } + DECLARE_INSTRUCTION_FACTORY_P1(HRandom, HValue*); HValue* global_object() { return OperandAt(0); } @@ -4424,6 +4560,11 @@ class HRandom V8_FINAL : public HTemplateInstruction<1> { DECLARE_CONCRETE_INSTRUCTION(Random) private: + explicit HRandom(HValue* global_object) { + SetOperandAt(0, global_object); + set_representation(Representation::Double()); + } + virtual bool IsDeletable() const V8_OVERRIDE { return true; } }; @@ -4459,8 +4600,19 @@ class HAdd V8_FINAL : public HArithmeticBinaryOperation { } virtual void RepresentationChanged(Representation to) V8_OVERRIDE { - if (to.IsTagged()) ClearFlag(kAllowUndefinedAsNaN); - HArithmeticBinaryOperation::RepresentationChanged(to); + if (to.IsTagged()) { + SetGVNFlag(kChangesNewSpacePromotion); + ClearFlag(kAllowUndefinedAsNaN); + } + if (to.IsTagged() && + (left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved() || + left()->ToStringCanBeObserved() || right()->ToStringCanBeObserved())) { + SetAllSideEffects(); + ClearFlag(kUseGVN); + } else { + ClearAllSideEffects(); + SetFlag(kUseGVN); + } } DECLARE_CONCRETE_INSTRUCTION(Add) @@ -4522,10 +4674,12 @@ class HMul V8_FINAL : public HArithmeticBinaryOperation { HValue* right); static HInstruction* NewImul(Zone* zone, - HValue* context, - HValue* left, - HValue* right) { - HMul* mul = new(zone) HMul(context, left, right); + HValue* context, + HValue* left, + HValue* right) { + HInstruction* instr = HMul::New(zone, context, left, right); + if (!instr->IsMul()) return instr; + HMul* mul = HMul::cast(instr); // TODO(mstarzinger): Prevent bailout on minus zero for imul. mul->AssumeRepresentation(Representation::Integer32()); mul->ClearFlag(HValue::kCanOverflow); @@ -4548,6 +4702,8 @@ class HMul V8_FINAL : public HArithmeticBinaryOperation { HArithmeticBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason); } + bool MulMinusOne(); + DECLARE_CONCRETE_INSTRUCTION(Mul) protected: @@ -4884,9 +5040,11 @@ class HSar V8_FINAL : public HBitwiseBinaryOperation { class HRor V8_FINAL : public HBitwiseBinaryOperation { public: - HRor(HValue* context, HValue* left, HValue* right) - : HBitwiseBinaryOperation(context, left, right) { - ChangeRepresentation(Representation::Integer32()); + static HInstruction* New(Zone* zone, + HValue* context, + HValue* left, + HValue* right) { + return new(zone) HRor(context, left, right); } virtual void UpdateRepresentation(Representation new_rep, @@ -4900,6 +5058,12 @@ class HRor V8_FINAL : public HBitwiseBinaryOperation { protected: virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; } + + private: + HRor(HValue* context, HValue* left, HValue* right) + : HBitwiseBinaryOperation(context, left, right) { + ChangeRepresentation(Representation::Integer32()); + } }; @@ -4971,12 +5135,7 @@ class HParameter V8_FINAL : public HTemplateInstruction<0> { class HCallStub V8_FINAL : public HUnaryCall { public: - HCallStub(HValue* context, CodeStub::Major major_key, int argument_count) - : HUnaryCall(context, argument_count), - major_key_(major_key), - transcendental_type_(TranscendentalCache::kNumberOfCaches) { - } - + DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallStub, CodeStub::Major, int); CodeStub::Major major_key() { return major_key_; } HValue* context() { return value(); } @@ -4993,6 +5152,12 @@ class HCallStub V8_FINAL : public HUnaryCall { DECLARE_CONCRETE_INSTRUCTION(CallStub) private: + HCallStub(HValue* context, CodeStub::Major major_key, int argument_count) + : HUnaryCall(context, argument_count), + major_key_(major_key), + transcendental_type_(TranscendentalCache::kNumberOfCaches) { + } + CodeStub::Major major_key_; TranscendentalCache::Type transcendental_type_; }; @@ -5036,24 +5201,20 @@ class HUnknownOSRValue V8_FINAL : public HTemplateInstruction<0> { class HLoadGlobalCell V8_FINAL : public HTemplateInstruction<0> { public: - HLoadGlobalCell(Handle<Cell> cell, PropertyDetails details) - : cell_(cell), details_(details), unique_id_() { - set_representation(Representation::Tagged()); - SetFlag(kUseGVN); - SetGVNFlag(kDependsOnGlobalVars); - } + DECLARE_INSTRUCTION_FACTORY_P2(HLoadGlobalCell, Handle<Cell>, + PropertyDetails); - Handle<Cell> cell() const { return cell_; } + Unique<Cell> cell() const { return cell_; } bool RequiresHoleCheck() const; virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; virtual intptr_t Hashcode() V8_OVERRIDE { - return unique_id_.Hashcode(); + return cell_.Hashcode(); } - virtual void FinalizeUniqueValueId() V8_OVERRIDE { - unique_id_ = UniqueValueId(cell_); + virtual void FinalizeUniqueness() V8_OVERRIDE { + cell_ = Unique<Cell>(cell_.handle()); } virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { @@ -5064,32 +5225,28 @@ class HLoadGlobalCell V8_FINAL : public HTemplateInstruction<0> { protected: virtual bool DataEquals(HValue* other) V8_OVERRIDE { - HLoadGlobalCell* b = HLoadGlobalCell::cast(other); - return unique_id_ == b->unique_id_; + return cell_ == HLoadGlobalCell::cast(other)->cell_; } private: + HLoadGlobalCell(Handle<Cell> cell, PropertyDetails details) + : cell_(Unique<Cell>::CreateUninitialized(cell)), details_(details) { + set_representation(Representation::Tagged()); + SetFlag(kUseGVN); + SetGVNFlag(kDependsOnGlobalVars); + } + virtual bool IsDeletable() const V8_OVERRIDE { return !RequiresHoleCheck(); } - Handle<Cell> cell_; + Unique<Cell> cell_; PropertyDetails details_; - UniqueValueId unique_id_; }; class HLoadGlobalGeneric V8_FINAL : public HTemplateInstruction<2> { public: - HLoadGlobalGeneric(HValue* context, - HValue* global_object, - Handle<Object> name, - bool for_typeof) - : name_(name), - for_typeof_(for_typeof) { - SetOperandAt(0, context); - SetOperandAt(1, global_object); - set_representation(Representation::Tagged()); - SetAllSideEffects(); - } + DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HLoadGlobalGeneric, HValue*, + Handle<Object>, bool); HValue* context() { return OperandAt(0); } HValue* global_object() { return OperandAt(1); } @@ -5105,6 +5262,18 @@ class HLoadGlobalGeneric V8_FINAL : public HTemplateInstruction<2> { DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric) private: + HLoadGlobalGeneric(HValue* context, + HValue* global_object, + Handle<Object> name, + bool for_typeof) + : name_(name), + for_typeof_(for_typeof) { + SetOperandAt(0, context); + SetOperandAt(1, global_object); + set_representation(Representation::Tagged()); + SetAllSideEffects(); + } + Handle<Object> name_; bool for_typeof_; }; @@ -5344,7 +5513,7 @@ class HStoreGlobalCell V8_FINAL : public HUnaryOperation { DECLARE_INSTRUCTION_FACTORY_P3(HStoreGlobalCell, HValue*, Handle<PropertyCell>, PropertyDetails); - Handle<PropertyCell> cell() const { return cell_; } + Unique<PropertyCell> cell() const { return cell_; } bool RequiresHoleCheck() { return !details_.IsDontDelete() || details_.IsReadOnly(); } @@ -5352,6 +5521,10 @@ class HStoreGlobalCell V8_FINAL : public HUnaryOperation { return StoringValueNeedsWriteBarrier(value()); } + virtual void FinalizeUniqueness() V8_OVERRIDE { + cell_ = Unique<PropertyCell>(cell_.handle()); + } + virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { return Representation::Tagged(); } @@ -5364,12 +5537,12 @@ class HStoreGlobalCell V8_FINAL : public HUnaryOperation { Handle<PropertyCell> cell, PropertyDetails details) : HUnaryOperation(value), - cell_(cell), + cell_(Unique<PropertyCell>::CreateUninitialized(cell)), details_(details) { SetGVNFlag(kChangesGlobalVars); } - Handle<PropertyCell> cell_; + Unique<PropertyCell> cell_; PropertyDetails details_; }; @@ -5580,6 +5753,18 @@ class HObjectAccess V8_FINAL { kDouble, HeapNumber::kValueOffset, Representation::Double()); } + static HObjectAccess ForHeapNumberValueLowestBits() { + return HObjectAccess(kDouble, + HeapNumber::kValueOffset, + Representation::Integer32()); + } + + static HObjectAccess ForHeapNumberValueHighestBits() { + return HObjectAccess(kDouble, + HeapNumber::kValueOffset + kIntSize, + Representation::Integer32()); + } + static HObjectAccess ForElementsPointer() { return HObjectAccess(kElementsPointer, JSObject::kElementsOffset); } @@ -5601,12 +5786,9 @@ class HObjectAccess V8_FINAL { ? Representation::Smi() : Representation::Tagged()); } - static HObjectAccess ForAllocationSiteTransitionInfo() { - return HObjectAccess(kInobject, AllocationSite::kTransitionInfoOffset); - } - - static HObjectAccess ForAllocationSiteWeakNext() { - return HObjectAccess(kInobject, AllocationSite::kWeakNextOffset); + static HObjectAccess ForAllocationSiteOffset(int offset) { + ASSERT(offset >= HeapObject::kHeaderSize && offset < AllocationSite::kSize); + return HObjectAccess(kInobject, offset); } static HObjectAccess ForAllocationSiteList() { @@ -5669,6 +5851,12 @@ class HObjectAccess V8_FINAL { return HObjectAccess(kMaps, JSObject::kMapOffset); } + static HObjectAccess ForMapInstanceSize() { + return HObjectAccess(kInobject, + Map::kInstanceSizeOffset, + Representation::Byte()); + } + static HObjectAccess ForPropertyCellValue() { return HObjectAccess(kInobject, PropertyCell::kValueOffset); } @@ -5798,7 +5986,9 @@ class HLoadNamedField V8_FINAL : public HTemplateInstruction<1> { SetOperandAt(0, object); Representation representation = access.representation(); - if (representation.IsSmi()) { + if (representation.IsByte()) { + set_representation(Representation::Integer32()); + } else if (representation.IsSmi()) { set_type(HType::Smi()); set_representation(representation); } else if (representation.IsDouble() || @@ -5823,13 +6013,8 @@ class HLoadNamedField V8_FINAL : public HTemplateInstruction<1> { class HLoadNamedGeneric V8_FINAL : public HTemplateInstruction<2> { public: - HLoadNamedGeneric(HValue* context, HValue* object, Handle<Object> name) - : name_(name) { - SetOperandAt(0, context); - SetOperandAt(1, object); - set_representation(Representation::Tagged()); - SetAllSideEffects(); - } + DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HLoadNamedGeneric, HValue*, + Handle<Object>); HValue* context() { return OperandAt(0); } HValue* object() { return OperandAt(1); } @@ -5844,18 +6029,21 @@ class HLoadNamedGeneric V8_FINAL : public HTemplateInstruction<2> { DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric) private: + HLoadNamedGeneric(HValue* context, HValue* object, Handle<Object> name) + : name_(name) { + SetOperandAt(0, context); + SetOperandAt(1, object); + set_representation(Representation::Tagged()); + SetAllSideEffects(); + } + Handle<Object> name_; }; class HLoadFunctionPrototype V8_FINAL : public HUnaryOperation { public: - explicit HLoadFunctionPrototype(HValue* function) - : HUnaryOperation(function) { - set_representation(Representation::Tagged()); - SetFlag(kUseGVN); - SetGVNFlag(kDependsOnCalls); - } + DECLARE_INSTRUCTION_FACTORY_P1(HLoadFunctionPrototype, HValue*); HValue* function() { return OperandAt(0); } @@ -5867,6 +6055,14 @@ class HLoadFunctionPrototype V8_FINAL : public HUnaryOperation { protected: virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; } + + private: + explicit HLoadFunctionPrototype(HValue* function) + : HUnaryOperation(function) { + set_representation(Representation::Tagged()); + SetFlag(kUseGVN); + SetGVNFlag(kDependsOnCalls); + } }; class ArrayInstructionInterface { @@ -6054,14 +6250,8 @@ class HLoadKeyed V8_FINAL class HLoadKeyedGeneric V8_FINAL : public HTemplateInstruction<3> { public: - HLoadKeyedGeneric(HValue* context, HValue* obj, HValue* key) { - set_representation(Representation::Tagged()); - SetOperandAt(0, obj); - SetOperandAt(1, key); - SetOperandAt(2, context); - SetAllSideEffects(); - } - + DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HLoadKeyedGeneric, HValue*, + HValue*); HValue* object() { return OperandAt(0); } HValue* key() { return OperandAt(1); } HValue* context() { return OperandAt(2); } @@ -6076,6 +6266,15 @@ class HLoadKeyedGeneric V8_FINAL : public HTemplateInstruction<3> { virtual HValue* Canonicalize() V8_OVERRIDE; DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric) + + private: + HLoadKeyedGeneric(HValue* context, HValue* obj, HValue* key) { + set_representation(Representation::Tagged()); + SetOperandAt(0, obj); + SetOperandAt(1, key); + SetOperandAt(2, context); + SetAllSideEffects(); + } }; @@ -6096,11 +6295,14 @@ class HStoreNamedField V8_FINAL : public HTemplateInstruction<3> { if (index == 0 && access().IsExternalMemory()) { // object must be external in case of external memory access return Representation::External(); - } else if (index == 1 && - (field_representation().IsDouble() || - field_representation().IsSmi() || - field_representation().IsInteger32())) { - return field_representation(); + } else if (index == 1) { + if (field_representation().IsByte() || + field_representation().IsInteger32()) { + return Representation::Integer32(); + } else if (field_representation().IsDouble() || + field_representation().IsSmi()) { + return field_representation(); + } } return Representation::Tagged(); } @@ -6191,19 +6393,9 @@ class HStoreNamedField V8_FINAL : public HTemplateInstruction<3> { class HStoreNamedGeneric V8_FINAL : public HTemplateInstruction<3> { public: - HStoreNamedGeneric(HValue* context, - HValue* object, - Handle<String> name, - HValue* value, - StrictModeFlag strict_mode_flag) - : name_(name), - strict_mode_flag_(strict_mode_flag) { - SetOperandAt(0, object); - SetOperandAt(1, value); - SetOperandAt(2, context); - SetAllSideEffects(); - } - + DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HStoreNamedGeneric, HValue*, + Handle<String>, HValue*, + StrictModeFlag); HValue* object() { return OperandAt(0); } HValue* value() { return OperandAt(1); } HValue* context() { return OperandAt(2); } @@ -6219,6 +6411,19 @@ class HStoreNamedGeneric V8_FINAL : public HTemplateInstruction<3> { DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric) private: + HStoreNamedGeneric(HValue* context, + HValue* object, + Handle<String> name, + HValue* value, + StrictModeFlag strict_mode_flag) + : name_(name), + strict_mode_flag_(strict_mode_flag) { + SetOperandAt(0, object); + SetOperandAt(1, value); + SetOperandAt(2, context); + SetAllSideEffects(); + } + Handle<String> name_; StrictModeFlag strict_mode_flag_; }; @@ -6367,18 +6572,8 @@ class HStoreKeyed V8_FINAL class HStoreKeyedGeneric V8_FINAL : public HTemplateInstruction<4> { public: - HStoreKeyedGeneric(HValue* context, - HValue* object, - HValue* key, - HValue* value, - StrictModeFlag strict_mode_flag) - : strict_mode_flag_(strict_mode_flag) { - SetOperandAt(0, object); - SetOperandAt(1, key); - SetOperandAt(2, value); - SetOperandAt(3, context); - SetAllSideEffects(); - } + DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HStoreKeyedGeneric, HValue*, + HValue*, HValue*, StrictModeFlag); HValue* object() { return OperandAt(0); } HValue* key() { return OperandAt(1); } @@ -6396,6 +6591,19 @@ class HStoreKeyedGeneric V8_FINAL : public HTemplateInstruction<4> { DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric) private: + HStoreKeyedGeneric(HValue* context, + HValue* object, + HValue* key, + HValue* value, + StrictModeFlag strict_mode_flag) + : strict_mode_flag_(strict_mode_flag) { + SetOperandAt(0, object); + SetOperandAt(1, key); + SetOperandAt(2, value); + SetOperandAt(3, context); + SetAllSideEffects(); + } + StrictModeFlag strict_mode_flag_; }; @@ -6417,25 +6625,20 @@ class HTransitionElementsKind V8_FINAL : public HTemplateInstruction<2> { HValue* object() { return OperandAt(0); } HValue* context() { return OperandAt(1); } - Handle<Map> original_map() { return original_map_; } - Handle<Map> transitioned_map() { return transitioned_map_; } + Unique<Map> original_map() { return original_map_; } + Unique<Map> transitioned_map() { return transitioned_map_; } ElementsKind from_kind() { return from_kind_; } ElementsKind to_kind() { return to_kind_; } virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; - virtual void FinalizeUniqueValueId() V8_OVERRIDE { - original_map_unique_id_ = UniqueValueId(original_map_); - transitioned_map_unique_id_ = UniqueValueId(transitioned_map_); - } - DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind) protected: virtual bool DataEquals(HValue* other) V8_OVERRIDE { HTransitionElementsKind* instr = HTransitionElementsKind::cast(other); - return original_map_unique_id_ == instr->original_map_unique_id_ && - transitioned_map_unique_id_ == instr->transitioned_map_unique_id_; + return original_map_ == instr->original_map_ && + transitioned_map_ == instr->transitioned_map_; } private: @@ -6443,10 +6646,8 @@ class HTransitionElementsKind V8_FINAL : public HTemplateInstruction<2> { HValue* object, Handle<Map> original_map, Handle<Map> transitioned_map) - : original_map_(original_map), - transitioned_map_(transitioned_map), - original_map_unique_id_(), - transitioned_map_unique_id_(), + : original_map_(Unique<Map>(original_map)), + transitioned_map_(Unique<Map>(transitioned_map)), from_kind_(original_map->elements_kind()), to_kind_(transitioned_map->elements_kind()) { SetOperandAt(0, object); @@ -6460,10 +6661,8 @@ class HTransitionElementsKind V8_FINAL : public HTemplateInstruction<2> { set_representation(Representation::Tagged()); } - Handle<Map> original_map_; - Handle<Map> transitioned_map_; - UniqueValueId original_map_unique_id_; - UniqueValueId transitioned_map_unique_id_; + Unique<Map> original_map_; + Unique<Map> transitioned_map_; ElementsKind from_kind_; ElementsKind to_kind_; }; @@ -6492,14 +6691,26 @@ class HStringAdd V8_FINAL : public HBinaryOperation { HStringAdd(HValue* context, HValue* left, HValue* right, StringAddFlags flags) : HBinaryOperation(context, left, right, HType::String()), flags_(flags) { set_representation(Representation::Tagged()); - SetFlag(kUseGVN); - SetGVNFlag(kDependsOnMaps); - SetGVNFlag(kChangesNewSpacePromotion); + if (MightHaveSideEffects()) { + SetAllSideEffects(); + } else { + SetFlag(kUseGVN); + SetGVNFlag(kDependsOnMaps); + SetGVNFlag(kChangesNewSpacePromotion); + } } - // No side-effects except possible allocation. - // NOTE: this instruction _does not_ call ToString() on its inputs. - virtual bool IsDeletable() const V8_OVERRIDE { return true; } + bool MightHaveSideEffects() const { + return flags_ != STRING_ADD_CHECK_NONE && + (left()->ToStringCanBeObserved() || right()->ToStringCanBeObserved()); + } + + // No side-effects except possible allocation: + // NOTE: this instruction does not call ToString() on its inputs, when flags_ + // is set to STRING_ADD_CHECK_NONE. + virtual bool IsDeletable() const V8_OVERRIDE { + return !MightHaveSideEffects(); + } const StringAddFlags flags_; }; @@ -6507,12 +6718,9 @@ class HStringAdd V8_FINAL : public HBinaryOperation { class HStringCharCodeAt V8_FINAL : public HTemplateInstruction<3> { public: - static HStringCharCodeAt* New(Zone* zone, - HValue* context, - HValue* string, - HValue* index) { - return new(zone) HStringCharCodeAt(context, string, index); - } + DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HStringCharCodeAt, + HValue*, + HValue*); virtual Representation RequiredInputRepresentation(int index) { // The index is supposed to be Integer32. @@ -6616,6 +6824,24 @@ class HMaterializedLiteral : public HTemplateInstruction<V> { class HRegExpLiteral V8_FINAL : public HMaterializedLiteral<1> { public: + DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HRegExpLiteral, + Handle<FixedArray>, + Handle<String>, + Handle<String>, + int); + + HValue* context() { return OperandAt(0); } + Handle<FixedArray> literals() { return literals_; } + Handle<String> pattern() { return pattern_; } + Handle<String> flags() { return flags_; } + + virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { + return Representation::Tagged(); + } + + DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral) + + private: HRegExpLiteral(HValue* context, Handle<FixedArray> literals, Handle<String> pattern, @@ -6630,18 +6856,6 @@ class HRegExpLiteral V8_FINAL : public HMaterializedLiteral<1> { set_type(HType::JSObject()); } - HValue* context() { return OperandAt(0); } - Handle<FixedArray> literals() { return literals_; } - Handle<String> pattern() { return pattern_; } - Handle<String> flags() { return flags_; } - - virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { - return Representation::Tagged(); - } - - DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral) - - private: Handle<FixedArray> literals_; Handle<String> pattern_; Handle<String> flags_; @@ -6650,20 +6864,9 @@ class HRegExpLiteral V8_FINAL : public HMaterializedLiteral<1> { class HFunctionLiteral V8_FINAL : public HTemplateInstruction<1> { public: - HFunctionLiteral(HValue* context, - Handle<SharedFunctionInfo> shared, - bool pretenure) - : HTemplateInstruction<1>(HType::JSObject()), - shared_info_(shared), - pretenure_(pretenure), - has_no_literals_(shared->num_literals() == 0), - is_generator_(shared->is_generator()), - language_mode_(shared->language_mode()) { - SetOperandAt(0, context); - set_representation(Representation::Tagged()); - SetGVNFlag(kChangesNewSpacePromotion); - } - + DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HFunctionLiteral, + Handle<SharedFunctionInfo>, + bool); HValue* context() { return OperandAt(0); } virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { @@ -6679,6 +6882,20 @@ class HFunctionLiteral V8_FINAL : public HTemplateInstruction<1> { LanguageMode language_mode() const { return language_mode_; } private: + HFunctionLiteral(HValue* context, + Handle<SharedFunctionInfo> shared, + bool pretenure) + : HTemplateInstruction<1>(HType::JSObject()), + shared_info_(shared), + pretenure_(pretenure), + has_no_literals_(shared->num_literals() == 0), + is_generator_(shared->is_generator()), + language_mode_(shared->language_mode()) { + SetOperandAt(0, context); + set_representation(Representation::Tagged()); + SetGVNFlag(kChangesNewSpacePromotion); + } + virtual bool IsDeletable() const V8_OVERRIDE { return true; } Handle<SharedFunctionInfo> shared_info_; @@ -6691,11 +6908,7 @@ class HFunctionLiteral V8_FINAL : public HTemplateInstruction<1> { class HTypeof V8_FINAL : public HTemplateInstruction<2> { public: - explicit HTypeof(HValue* context, HValue* value) { - SetOperandAt(0, context); - SetOperandAt(1, value); - set_representation(Representation::Tagged()); - } + DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HTypeof, HValue*); HValue* context() { return OperandAt(0); } HValue* value() { return OperandAt(1); } @@ -6709,6 +6922,12 @@ class HTypeof V8_FINAL : public HTemplateInstruction<2> { DECLARE_CONCRETE_INSTRUCTION(Typeof) private: + explicit HTypeof(HValue* context, HValue* value) { + SetOperandAt(0, context); + SetOperandAt(1, value); + set_representation(Representation::Tagged()); + } + virtual bool IsDeletable() const V8_OVERRIDE { return true; } }; @@ -6753,8 +6972,7 @@ class HToFastProperties V8_FINAL : public HUnaryOperation { ASSERT(value->IsCallRuntime()); #ifdef DEBUG const Runtime::Function* function = HCallRuntime::cast(value)->function(); - ASSERT(function->function_id == Runtime::kCreateObjectLiteral || - function->function_id == Runtime::kCreateObjectLiteralShallow); + ASSERT(function->function_id == Runtime::kCreateObjectLiteral); #endif } @@ -6764,9 +6982,7 @@ class HToFastProperties V8_FINAL : public HUnaryOperation { class HValueOf V8_FINAL : public HUnaryOperation { public: - explicit HValueOf(HValue* value) : HUnaryOperation(value) { - set_representation(Representation::Tagged()); - } + DECLARE_INSTRUCTION_FACTORY_P1(HValueOf, HValue*); virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { return Representation::Tagged(); @@ -6775,16 +6991,17 @@ class HValueOf V8_FINAL : public HUnaryOperation { DECLARE_CONCRETE_INSTRUCTION(ValueOf) private: + explicit HValueOf(HValue* value) : HUnaryOperation(value) { + set_representation(Representation::Tagged()); + } + virtual bool IsDeletable() const V8_OVERRIDE { return true; } }; class HDateField V8_FINAL : public HUnaryOperation { public: - HDateField(HValue* date, Smi* index) - : HUnaryOperation(date), index_(index) { - set_representation(Representation::Tagged()); - } + DECLARE_INSTRUCTION_FACTORY_P2(HDateField, HValue*, Smi*); Smi* index() const { return index_; } @@ -6795,21 +7012,19 @@ class HDateField V8_FINAL : public HUnaryOperation { DECLARE_CONCRETE_INSTRUCTION(DateField) private: + HDateField(HValue* date, Smi* index) + : HUnaryOperation(date), index_(index) { + set_representation(Representation::Tagged()); + } + Smi* index_; }; class HSeqStringSetChar V8_FINAL : public HTemplateInstruction<3> { public: - HSeqStringSetChar(String::Encoding encoding, - HValue* string, - HValue* index, - HValue* value) : encoding_(encoding) { - SetOperandAt(0, string); - SetOperandAt(1, index); - SetOperandAt(2, value); - set_representation(Representation::Tagged()); - } + DECLARE_INSTRUCTION_FACTORY_P4(HSeqStringSetChar, String::Encoding, + HValue*, HValue*, HValue*); String::Encoding encoding() { return encoding_; } HValue* string() { return OperandAt(0); } @@ -6824,6 +7039,16 @@ class HSeqStringSetChar V8_FINAL : public HTemplateInstruction<3> { DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar) private: + HSeqStringSetChar(String::Encoding encoding, + HValue* string, + HValue* index, + HValue* value) : encoding_(encoding) { + SetOperandAt(0, string); + SetOperandAt(1, index); + SetOperandAt(2, value); + set_representation(Representation::Tagged()); + } + String::Encoding encoding_; }; @@ -6867,11 +7092,7 @@ class HCheckMapValue V8_FINAL : public HTemplateInstruction<2> { class HForInPrepareMap V8_FINAL : public HTemplateInstruction<2> { public: - static HForInPrepareMap* New(Zone* zone, - HValue* context, - HValue* object) { - return new(zone) HForInPrepareMap(context, object); - } + DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HForInPrepareMap, HValue*); virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE { return Representation::Tagged(); diff --git a/deps/v8/src/hydrogen-load-elimination.cc b/deps/v8/src/hydrogen-load-elimination.cc new file mode 100644 index 0000000000..3337188f9a --- /dev/null +++ b/deps/v8/src/hydrogen-load-elimination.cc @@ -0,0 +1,510 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "hydrogen-alias-analysis.h" +#include "hydrogen-load-elimination.h" +#include "hydrogen-instructions.h" +#include "hydrogen-flow-engine.h" + +namespace v8 { +namespace internal { + +#define GLOBAL true +#define TRACE(x) if (FLAG_trace_load_elimination) PrintF x + +static const int kMaxTrackedFields = 16; +static const int kMaxTrackedObjects = 5; + +// An element in the field approximation list. +class HFieldApproximation : public ZoneObject { + public: // Just a data blob. + HValue* object_; + HLoadNamedField* last_load_; + HValue* last_value_; + HFieldApproximation* next_; + + // Recursively copy the entire linked list of field approximations. + HFieldApproximation* Copy(Zone* zone) { + if (this == NULL) return NULL; + HFieldApproximation* copy = new(zone) HFieldApproximation(); + copy->object_ = this->object_; + copy->last_load_ = this->last_load_; + copy->last_value_ = this->last_value_; + copy->next_ = this->next_->Copy(zone); + return copy; + } +}; + + +// The main datastructure used during load/store elimination. Each in-object +// field is tracked separately. For each field, store a list of known field +// values for known objects. +class HLoadEliminationTable : public ZoneObject { + public: + HLoadEliminationTable(Zone* zone, HAliasAnalyzer* aliasing) + : zone_(zone), fields_(kMaxTrackedFields, zone), aliasing_(aliasing) { } + + // The main processing of instructions. + HLoadEliminationTable* Process(HInstruction* instr, Zone* zone) { + switch (instr->opcode()) { + case HValue::kLoadNamedField: { + HLoadNamedField* l = HLoadNamedField::cast(instr); + TRACE((" process L%d field %d (o%d)\n", + instr->id(), + FieldOf(l->access()), + l->object()->ActualValue()->id())); + HValue* result = load(l); + if (result != instr) { + // The load can be replaced with a previous load or a value. + TRACE((" replace L%d -> v%d\n", instr->id(), result->id())); + instr->DeleteAndReplaceWith(result); + } + break; + } + case HValue::kStoreNamedField: { + HStoreNamedField* s = HStoreNamedField::cast(instr); + TRACE((" process S%d field %d (o%d) = v%d\n", + instr->id(), + FieldOf(s->access()), + s->object()->ActualValue()->id(), + s->value()->id())); + HValue* result = store(s); + if (result == NULL) { + // The store is redundant. Remove it. + TRACE((" remove S%d\n", instr->id())); + instr->DeleteAndReplaceWith(NULL); + } + break; + } + default: { + if (instr->CheckGVNFlag(kChangesInobjectFields)) { + TRACE((" kill-all i%d\n", instr->id())); + Kill(); + break; + } + if (instr->CheckGVNFlag(kChangesMaps)) { + TRACE((" kill-maps i%d\n", instr->id())); + KillOffset(JSObject::kMapOffset); + } + if (instr->CheckGVNFlag(kChangesElementsKind)) { + TRACE((" kill-elements-kind i%d\n", instr->id())); + KillOffset(JSObject::kMapOffset); + KillOffset(JSObject::kElementsOffset); + } + if (instr->CheckGVNFlag(kChangesElementsPointer)) { + TRACE((" kill-elements i%d\n", instr->id())); + KillOffset(JSObject::kElementsOffset); + } + if (instr->CheckGVNFlag(kChangesOsrEntries)) { + TRACE((" kill-osr i%d\n", instr->id())); + Kill(); + } + } + // Improvements possible: + // - learn from HCheckMaps for field 0 + // - remove unobservable stores (write-after-write) + // - track cells + // - track globals + // - track roots + } + return this; + } + + // Support for global analysis with HFlowEngine: Copy state to sucessor block. + HLoadEliminationTable* Copy(HBasicBlock* succ, Zone* zone) { + HLoadEliminationTable* copy = + new(zone) HLoadEliminationTable(zone, aliasing_); + copy->EnsureFields(fields_.length()); + for (int i = 0; i < fields_.length(); i++) { + copy->fields_[i] = fields_[i]->Copy(zone); + } + if (FLAG_trace_load_elimination) { + TRACE((" copy-to B%d\n", succ->block_id())); + copy->Print(); + } + return copy; + } + + // Support for global analysis with HFlowEngine: Merge this state with + // the other incoming state. + HLoadEliminationTable* Merge(HBasicBlock* succ, + HLoadEliminationTable* that, Zone* zone) { + if (that->fields_.length() < fields_.length()) { + // Drop fields not in the other table. + fields_.Rewind(that->fields_.length()); + } + for (int i = 0; i < fields_.length(); i++) { + // Merge the field approximations for like fields. + HFieldApproximation* approx = fields_[i]; + HFieldApproximation* prev = NULL; + while (approx != NULL) { + // TODO(titzer): Merging is O(N * M); sort? + HFieldApproximation* other = that->Find(approx->object_, i); + if (other == NULL || !Equal(approx->last_value_, other->last_value_)) { + // Kill an entry that doesn't agree with the other value. + if (prev != NULL) { + prev->next_ = approx->next_; + } else { + fields_[i] = approx->next_; + } + approx = approx->next_; + continue; + } + prev = approx; + approx = approx->next_; + } + } + return this; + } + + friend class HLoadEliminationEffects; // Calls Kill() and others. + friend class HLoadEliminationPhase; + + private: + // Process a load instruction, updating internal table state. If a previous + // load or store for this object and field exists, return the new value with + // which the load should be replaced. Otherwise, return {instr}. + HValue* load(HLoadNamedField* instr) { + int field = FieldOf(instr->access()); + if (field < 0) return instr; + + HValue* object = instr->object()->ActualValue(); + HFieldApproximation* approx = FindOrCreate(object, field); + + if (approx->last_value_ == NULL) { + // Load is not redundant. Fill out a new entry. + approx->last_load_ = instr; + approx->last_value_ = instr; + return instr; + } else { + // Eliminate the load. Reuse previously stored value or load instruction. + return approx->last_value_; + } + } + + // Process a store instruction, updating internal table state. If a previous + // store to the same object and field makes this store redundant (e.g. because + // the stored values are the same), return NULL indicating that this store + // instruction is redundant. Otherwise, return {instr}. + HValue* store(HStoreNamedField* instr) { + int field = FieldOf(instr->access()); + if (field < 0) return KillIfMisaligned(instr); + + HValue* object = instr->object()->ActualValue(); + HValue* value = instr->value(); + + // Kill non-equivalent may-alias entries. + KillFieldInternal(object, field, value); + if (instr->has_transition()) { + // A transition store alters the map of the object. + // TODO(titzer): remember the new map (a constant) for the object. + KillFieldInternal(object, FieldOf(JSObject::kMapOffset), NULL); + } + HFieldApproximation* approx = FindOrCreate(object, field); + + if (Equal(approx->last_value_, value)) { + // The store is redundant because the field already has this value. + return NULL; + } else { + // The store is not redundant. Update the entry. + approx->last_load_ = NULL; + approx->last_value_ = value; + return instr; + } + } + + // Kill everything in this table. + void Kill() { + fields_.Rewind(0); + } + + // Kill all entries matching the given offset. + void KillOffset(int offset) { + int field = FieldOf(offset); + if (field >= 0 && field < fields_.length()) { + fields_[field] = NULL; + } + } + + // Kill all entries aliasing the given store. + void KillStore(HStoreNamedField* s) { + int field = FieldOf(s->access()); + if (field >= 0) { + KillFieldInternal(s->object()->ActualValue(), field, s->value()); + } else { + KillIfMisaligned(s); + } + } + + // Kill multiple entries in the case of a misaligned store. + HValue* KillIfMisaligned(HStoreNamedField* instr) { + HObjectAccess access = instr->access(); + if (access.IsInobject()) { + int offset = access.offset(); + if ((offset % kPointerSize) != 0) { + // Kill the field containing the first word of the access. + HValue* object = instr->object()->ActualValue(); + int field = offset / kPointerSize; + KillFieldInternal(object, field, NULL); + + // Kill the next field in case of overlap. + int size = kPointerSize; + if (access.representation().IsByte()) size = 1; + else if (access.representation().IsInteger32()) size = 4; + int next_field = (offset + size - 1) / kPointerSize; + if (next_field != field) KillFieldInternal(object, next_field, NULL); + } + } + return instr; + } + + // Find an entry for the given object and field pair. + HFieldApproximation* Find(HValue* object, int field) { + // Search for a field approximation for this object. + HFieldApproximation* approx = fields_[field]; + while (approx != NULL) { + if (aliasing_->MustAlias(object, approx->object_)) return approx; + approx = approx->next_; + } + return NULL; + } + + // Find or create an entry for the given object and field pair. + HFieldApproximation* FindOrCreate(HValue* object, int field) { + EnsureFields(field + 1); + + // Search for a field approximation for this object. + HFieldApproximation* approx = fields_[field]; + int count = 0; + while (approx != NULL) { + if (aliasing_->MustAlias(object, approx->object_)) return approx; + count++; + approx = approx->next_; + } + + if (count >= kMaxTrackedObjects) { + // Pull the last entry off the end and repurpose it for this object. + approx = ReuseLastApproximation(field); + } else { + // Allocate a new entry. + approx = new(zone_) HFieldApproximation(); + } + + // Insert the entry at the head of the list. + approx->object_ = object; + approx->last_load_ = NULL; + approx->last_value_ = NULL; + approx->next_ = fields_[field]; + fields_[field] = approx; + + return approx; + } + + // Kill all entries for a given field that _may_ alias the given object + // and do _not_ have the given value. + void KillFieldInternal(HValue* object, int field, HValue* value) { + if (field >= fields_.length()) return; // Nothing to do. + + HFieldApproximation* approx = fields_[field]; + HFieldApproximation* prev = NULL; + while (approx != NULL) { + if (aliasing_->MayAlias(object, approx->object_)) { + if (!Equal(approx->last_value_, value)) { + // Kill an aliasing entry that doesn't agree on the value. + if (prev != NULL) { + prev->next_ = approx->next_; + } else { + fields_[field] = approx->next_; + } + approx = approx->next_; + continue; + } + } + prev = approx; + approx = approx->next_; + } + } + + bool Equal(HValue* a, HValue* b) { + if (a == b) return true; + if (a != NULL && b != NULL) return a->Equals(b); + return false; + } + + // Remove the last approximation for a field so that it can be reused. + // We reuse the last entry because it was the first inserted and is thus + // farthest away from the current instruction. + HFieldApproximation* ReuseLastApproximation(int field) { + HFieldApproximation* approx = fields_[field]; + ASSERT(approx != NULL); + + HFieldApproximation* prev = NULL; + while (approx->next_ != NULL) { + prev = approx; + approx = approx->next_; + } + if (prev != NULL) prev->next_ = NULL; + return approx; + } + + // Compute the field index for the given object access; -1 if not tracked. + int FieldOf(HObjectAccess access) { + return access.IsInobject() ? FieldOf(access.offset()) : -1; + } + + // Compute the field index for the given in-object offset; -1 if not tracked. + int FieldOf(int offset) { + if (offset >= kMaxTrackedFields * kPointerSize) return -1; + // TODO(titzer): track misaligned loads in a separate list? + if ((offset % kPointerSize) != 0) return -1; // Ignore misaligned accesses. + return offset / kPointerSize; + } + + // Ensure internal storage for the given number of fields. + void EnsureFields(int num_fields) { + if (fields_.length() < num_fields) { + fields_.AddBlock(NULL, num_fields - fields_.length(), zone_); + } + } + + // Print this table to stdout. + void Print() { + for (int i = 0; i < fields_.length(); i++) { + PrintF(" field %d: ", i); + for (HFieldApproximation* a = fields_[i]; a != NULL; a = a->next_) { + PrintF("[o%d =", a->object_->id()); + if (a->last_load_ != NULL) PrintF(" L%d", a->last_load_->id()); + if (a->last_value_ != NULL) PrintF(" v%d", a->last_value_->id()); + PrintF("] "); + } + PrintF("\n"); + } + } + + Zone* zone_; + ZoneList<HFieldApproximation*> fields_; + HAliasAnalyzer* aliasing_; +}; + + +// Support for HFlowEngine: collect store effects within loops. +class HLoadEliminationEffects : public ZoneObject { + public: + explicit HLoadEliminationEffects(Zone* zone) + : zone_(zone), + maps_stored_(false), + fields_stored_(false), + elements_stored_(false), + stores_(5, zone) { } + + inline bool Disabled() { + return false; // Effects are _not_ disabled. + } + + // Process a possibly side-effecting instruction. + void Process(HInstruction* instr, Zone* zone) { + switch (instr->opcode()) { + case HValue::kStoreNamedField: { + stores_.Add(HStoreNamedField::cast(instr), zone_); + break; + } + case HValue::kOsrEntry: { + // Kill everything. Loads must not be hoisted past the OSR entry. + maps_stored_ = true; + fields_stored_ = true; + elements_stored_ = true; + } + default: { + fields_stored_ |= instr->CheckGVNFlag(kChangesInobjectFields); + maps_stored_ |= instr->CheckGVNFlag(kChangesMaps); + maps_stored_ |= instr->CheckGVNFlag(kChangesElementsKind); + elements_stored_ |= instr->CheckGVNFlag(kChangesElementsKind); + elements_stored_ |= instr->CheckGVNFlag(kChangesElementsPointer); + } + } + } + + // Apply these effects to the given load elimination table. + void Apply(HLoadEliminationTable* table) { + if (fields_stored_) { + table->Kill(); + return; + } + if (maps_stored_) { + table->KillOffset(JSObject::kMapOffset); + } + if (elements_stored_) { + table->KillOffset(JSObject::kElementsOffset); + } + + // Kill non-agreeing fields for each store contained in these effects. + for (int i = 0; i < stores_.length(); i++) { + table->KillStore(stores_[i]); + } + } + + // Union these effects with the other effects. + void Union(HLoadEliminationEffects* that, Zone* zone) { + maps_stored_ |= that->maps_stored_; + fields_stored_ |= that->fields_stored_; + elements_stored_ |= that->elements_stored_; + for (int i = 0; i < that->stores_.length(); i++) { + stores_.Add(that->stores_[i], zone); + } + } + + private: + Zone* zone_; + bool maps_stored_ : 1; + bool fields_stored_ : 1; + bool elements_stored_ : 1; + ZoneList<HStoreNamedField*> stores_; +}; + + +// The main routine of the analysis phase. Use the HFlowEngine for either a +// local or a global analysis. +void HLoadEliminationPhase::Run() { + HFlowEngine<HLoadEliminationTable, HLoadEliminationEffects> + engine(graph(), zone()); + HAliasAnalyzer aliasing; + HLoadEliminationTable* table = + new(zone()) HLoadEliminationTable(zone(), &aliasing); + + if (GLOBAL) { + // Perform a global analysis. + engine.AnalyzeDominatedBlocks(graph()->blocks()->at(0), table); + } else { + // Perform only local analysis. + for (int i = 0; i < graph()->blocks()->length(); i++) { + table->Kill(); + engine.AnalyzeOneBlock(graph()->blocks()->at(i), table); + } + } +} + +} } // namespace v8::internal diff --git a/deps/v8/src/v8preparserdll-main.cc b/deps/v8/src/hydrogen-load-elimination.h index c0344d344a..ef6f71fa11 100644 --- a/deps/v8/src/v8preparserdll-main.cc +++ b/deps/v8/src/hydrogen-load-elimination.h @@ -1,4 +1,4 @@ -// Copyright 2010 the V8 project authors. All rights reserved. +// Copyright 2013 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -25,15 +25,26 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#include <windows.h> +#ifndef V8_HYDROGEN_LOAD_ELIMINATION_H_ +#define V8_HYDROGEN_LOAD_ELIMINATION_H_ -#include "../include/v8-preparser.h" +#include "hydrogen.h" -extern "C" { -BOOL WINAPI DllMain(HANDLE hinstDLL, - DWORD dwReason, - LPVOID lpvReserved) { - // Do nothing. - return TRUE; -} -} +namespace v8 { +namespace internal { + +class HLoadEliminationPhase : public HPhase { + public: + explicit HLoadEliminationPhase(HGraph* graph) + : HPhase("H_Load elimination", graph) { } + + void Run(); + + private: + void EliminateLoads(HBasicBlock* block); +}; + + +} } // namespace v8::internal + +#endif // V8_HYDROGEN_LOAD_ELIMINATION_H_ diff --git a/deps/v8/src/hydrogen-mark-unreachable.cc b/deps/v8/src/hydrogen-mark-unreachable.cc new file mode 100644 index 0000000000..d7c5ed2b18 --- /dev/null +++ b/deps/v8/src/hydrogen-mark-unreachable.cc @@ -0,0 +1,77 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "hydrogen-mark-unreachable.h" + +namespace v8 { +namespace internal { + + +void HMarkUnreachableBlocksPhase::MarkUnreachableBlocks() { + // If there is unreachable code in the graph, propagate the unreachable marks + // using a fixed-point iteration. + bool changed = true; + const ZoneList<HBasicBlock*>* blocks = graph()->blocks(); + while (changed) { + changed = false; + for (int i = 0; i < blocks->length(); i++) { + HBasicBlock* block = blocks->at(i); + if (!block->IsReachable()) continue; + bool is_reachable = blocks->at(0) == block; + for (HPredecessorIterator it(block); !it.Done(); it.Advance()) { + HBasicBlock* predecessor = it.Current(); + // A block is reachable if one of its predecessors is reachable, + // doesn't deoptimize and either is known to transfer control to the + // block or has a control flow instruction for which the next block + // cannot be determined. + if (predecessor->IsReachable() && !predecessor->IsDeoptimizing()) { + HBasicBlock* pred_succ; + bool known_pred_succ = + predecessor->end()->KnownSuccessorBlock(&pred_succ); + if (!known_pred_succ || pred_succ == block) { + is_reachable = true; + break; + } + } + if (block->is_osr_entry()) { + is_reachable = true; + } + } + if (!is_reachable) { + block->MarkUnreachable(); + changed = true; + } + } + } +} + + +void HMarkUnreachableBlocksPhase::Run() { + MarkUnreachableBlocks(); +} + +} } // namespace v8::internal diff --git a/deps/v8/src/hydrogen-deoptimizing-mark.h b/deps/v8/src/hydrogen-mark-unreachable.h index 7d6e6e4bda..e9459d5208 100644 --- a/deps/v8/src/hydrogen-deoptimizing-mark.h +++ b/deps/v8/src/hydrogen-mark-unreachable.h @@ -25,8 +25,8 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#ifndef V8_HYDROGEN_DEOPTIMIZING_MARK_H_ -#define V8_HYDROGEN_DEOPTIMIZING_MARK_H_ +#ifndef V8_HYDROGEN_MARK_UNREACHABLE_H_ +#define V8_HYDROGEN_MARK_UNREACHABLE_H_ #include "hydrogen.h" @@ -34,23 +34,20 @@ namespace v8 { namespace internal { -// Mark all blocks that are dominated by an unconditional soft deoptimize to -// prevent code motion across those blocks. -class HPropagateDeoptimizingMarkPhase : public HPhase { +class HMarkUnreachableBlocksPhase : public HPhase { public: - explicit HPropagateDeoptimizingMarkPhase(HGraph* graph) - : HPhase("H_Propagate deoptimizing mark", graph) { } + explicit HMarkUnreachableBlocksPhase(HGraph* graph) + : HPhase("H_Mark unrechable blocks", graph) { } void Run(); private: - void MarkAsDeoptimizing(); - void NullifyUnreachableInstructions(); + void MarkUnreachableBlocks(); - DISALLOW_COPY_AND_ASSIGN(HPropagateDeoptimizingMarkPhase); + DISALLOW_COPY_AND_ASSIGN(HMarkUnreachableBlocksPhase); }; } } // namespace v8::internal -#endif // V8_HYDROGEN_DEOPTIMIZING_MARK_H_ +#endif // V8_HYDROGEN_MARK_UNREACHABLE_H_ diff --git a/deps/v8/src/hydrogen-osr.cc b/deps/v8/src/hydrogen-osr.cc index 6b1df1e7a5..6e39df6aa9 100644 --- a/deps/v8/src/hydrogen-osr.cc +++ b/deps/v8/src/hydrogen-osr.cc @@ -37,19 +37,8 @@ bool HOsrBuilder::HasOsrEntryAt(IterationStatement* statement) { } -// Build a new loop header block and set it as the current block. -HBasicBlock *HOsrBuilder::BuildLoopEntry() { - HBasicBlock* loop_entry = builder_->CreateLoopHeaderBlock(); - builder_->current_block()->Goto(loop_entry); - builder_->set_current_block(loop_entry); - return loop_entry; -} - - -HBasicBlock* HOsrBuilder::BuildPossibleOsrLoopEntry( - IterationStatement* statement) { - // Check if there is an OSR here first. - if (!HasOsrEntryAt(statement)) return BuildLoopEntry(); +HBasicBlock* HOsrBuilder::BuildOsrLoopEntry(IterationStatement* statement) { + ASSERT(HasOsrEntryAt(statement)); Zone* zone = builder_->zone(); HGraph* graph = builder_->graph(); @@ -63,12 +52,12 @@ HBasicBlock* HOsrBuilder::BuildPossibleOsrLoopEntry( HBasicBlock* non_osr_entry = graph->CreateBasicBlock(); osr_entry_ = graph->CreateBasicBlock(); HValue* true_value = graph->GetConstantTrue(); - HBranch* test = new(zone) HBranch(true_value, ToBooleanStub::Types(), - non_osr_entry, osr_entry_); - builder_->current_block()->Finish(test); + HBranch* test = builder_->New<HBranch>(true_value, ToBooleanStub::Types(), + non_osr_entry, osr_entry_); + builder_->FinishCurrentBlock(test); HBasicBlock* loop_predecessor = graph->CreateBasicBlock(); - non_osr_entry->Goto(loop_predecessor); + builder_->Goto(non_osr_entry, loop_predecessor); builder_->set_current_block(osr_entry_); osr_entry_->set_osr_entry(); @@ -108,12 +97,12 @@ HBasicBlock* HOsrBuilder::BuildPossibleOsrLoopEntry( builder_->Add<HOsrEntry>(osr_entry_id); HContext* context = builder_->Add<HContext>(); environment->BindContext(context); - builder_->current_block()->Goto(loop_predecessor); + builder_->Goto(loop_predecessor); loop_predecessor->SetJoinId(statement->EntryId()); builder_->set_current_block(loop_predecessor); // Create the final loop entry - osr_loop_entry_ = BuildLoopEntry(); + osr_loop_entry_ = builder_->BuildLoopEntry(); return osr_loop_entry_; } diff --git a/deps/v8/src/hydrogen-osr.h b/deps/v8/src/hydrogen-osr.h index 5014a75bda..ae72ce650c 100644 --- a/deps/v8/src/hydrogen-osr.h +++ b/deps/v8/src/hydrogen-osr.h @@ -45,9 +45,10 @@ class HOsrBuilder : public ZoneObject { osr_entry_(NULL), osr_loop_entry_(NULL), osr_values_(NULL) { } + // Creates the loop entry block for the given statement, setting up OSR // entries as necessary, and sets the current block to the new block. - HBasicBlock* BuildPossibleOsrLoopEntry(IterationStatement* statement); + HBasicBlock* BuildOsrLoopEntry(IterationStatement* statement); // Process the hydrogen graph after it has been completed, performing // any OSR-specific cleanups or changes. @@ -61,10 +62,9 @@ class HOsrBuilder : public ZoneObject { return unoptimized_frame_slots_; } - private: - HBasicBlock* BuildLoopEntry(); bool HasOsrEntryAt(IterationStatement* statement); + private: int unoptimized_frame_slots_; HOptimizedGraphBuilder* builder_; HBasicBlock* osr_entry_; diff --git a/deps/v8/src/hydrogen-redundant-phi.cc b/deps/v8/src/hydrogen-redundant-phi.cc index 9c38200577..1263833dac 100644 --- a/deps/v8/src/hydrogen-redundant-phi.cc +++ b/deps/v8/src/hydrogen-redundant-phi.cc @@ -31,37 +31,18 @@ namespace v8 { namespace internal { void HRedundantPhiEliminationPhase::Run() { - // We do a simple fixed point iteration without any work list, because - // machine-generated JavaScript can lead to a very dense Hydrogen graph with - // an enormous work list and will consequently result in OOM. Experiments - // showed that this simple algorithm is good enough, and even e.g. tracking - // the set or range of blocks to consider is not a real improvement. - bool need_another_iteration; + // Gather all phis from all blocks first. const ZoneList<HBasicBlock*>* blocks(graph()->blocks()); - ZoneList<HPhi*> redundant_phis(blocks->length(), zone()); - do { - need_another_iteration = false; - for (int i = 0; i < blocks->length(); ++i) { - HBasicBlock* block = blocks->at(i); - for (int j = 0; j < block->phis()->length(); j++) { - HPhi* phi = block->phis()->at(j); - HValue* replacement = phi->GetRedundantReplacement(); - if (replacement != NULL) { - // Remember phi to avoid concurrent modification of the block's phis. - redundant_phis.Add(phi, zone()); - for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) { - HValue* value = it.value(); - value->SetOperandAt(it.index(), replacement); - need_another_iteration |= value->IsPhi(); - } - } - } - for (int i = 0; i < redundant_phis.length(); i++) { - block->RemovePhi(redundant_phis[i]); - } - redundant_phis.Clear(); + ZoneList<HPhi*> all_phis(blocks->length(), zone()); + for (int i = 0; i < blocks->length(); ++i) { + HBasicBlock* block = blocks->at(i); + for (int j = 0; j < block->phis()->length(); j++) { + all_phis.Add(block->phis()->at(j), zone()); } - } while (need_another_iteration); + } + + // Iteratively reduce all phis in the list. + ProcessPhis(&all_phis); #if DEBUG // Make sure that we *really* removed all redundant phis. @@ -73,4 +54,35 @@ void HRedundantPhiEliminationPhase::Run() { #endif } + +void HRedundantPhiEliminationPhase::ProcessBlock(HBasicBlock* block) { + ProcessPhis(block->phis()); +} + + +void HRedundantPhiEliminationPhase::ProcessPhis(const ZoneList<HPhi*>* phis) { + bool updated; + do { + // Iterately replace all redundant phis in the given list. + updated = false; + for (int i = 0; i < phis->length(); i++) { + HPhi* phi = phis->at(i); + if (phi->CheckFlag(HValue::kIsDead)) continue; // Already replaced. + + HValue* replacement = phi->GetRedundantReplacement(); + if (replacement != NULL) { + phi->SetFlag(HValue::kIsDead); + for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) { + HValue* value = it.value(); + value->SetOperandAt(it.index(), replacement); + // Iterate again if used in another non-dead phi. + updated |= value->IsPhi() && !value->CheckFlag(HValue::kIsDead); + } + phi->block()->RemovePhi(phi); + } + } + } while (updated); +} + + } } // namespace v8::internal diff --git a/deps/v8/src/hydrogen-redundant-phi.h b/deps/v8/src/hydrogen-redundant-phi.h index 6291fa5b78..960ae69c95 100644 --- a/deps/v8/src/hydrogen-redundant-phi.h +++ b/deps/v8/src/hydrogen-redundant-phi.h @@ -42,8 +42,11 @@ class HRedundantPhiEliminationPhase : public HPhase { : HPhase("H_Redundant phi elimination", graph) { } void Run(); + void ProcessBlock(HBasicBlock* block); private: + void ProcessPhis(const ZoneList<HPhi*>* phis); + DISALLOW_COPY_AND_ASSIGN(HRedundantPhiEliminationPhase); }; diff --git a/deps/v8/src/hydrogen-representation-changes.cc b/deps/v8/src/hydrogen-representation-changes.cc index 960113782f..d0c9b58258 100644 --- a/deps/v8/src/hydrogen-representation-changes.cc +++ b/deps/v8/src/hydrogen-representation-changes.cc @@ -61,6 +61,11 @@ void HRepresentationChangesPhase::InsertRepresentationChangeForUse( if (new_value == NULL) { new_value = new(graph()->zone()) HChange( value, to, is_truncating_to_smi, is_truncating_to_int); + if (use_value->position() != RelocInfo::kNoPosition) { + new_value->set_position(use_value->position()); + } else { + ASSERT(!FLAG_emit_opt_code_positions || !graph()->info()->IsOptimizing()); + } } new_value->InsertBefore(next); diff --git a/deps/v8/src/hydrogen-uint32-analysis.cc b/deps/v8/src/hydrogen-uint32-analysis.cc index 835a198d4d..8de887d6f8 100644 --- a/deps/v8/src/hydrogen-uint32-analysis.cc +++ b/deps/v8/src/hydrogen-uint32-analysis.cc @@ -35,8 +35,17 @@ bool HUint32AnalysisPhase::IsSafeUint32Use(HValue* val, HValue* use) { // Operations that operate on bits are safe. if (use->IsBitwise() || use->IsShl() || use->IsSar() || use->IsShr()) { return true; - } else if (use->IsChange() || use->IsSimulate()) { - // Conversions and deoptimization have special support for unt32. + } else if (use->IsSimulate()) { + // Deoptimization has special support for uint32. + return true; + } else if (use->IsChange()) { + // Conversions have special support for uint32. + // This ASSERT guards that the conversion in question is actually + // implemented. Do not extend the whitelist without adding + // support to LChunkBuilder::DoChange(). + ASSERT(HChange::cast(use)->to().IsDouble() || + HChange::cast(use)->to().IsSmi() || + HChange::cast(use)->to().IsTagged()); return true; } else if (use->IsStoreKeyed()) { HStoreKeyed* store = HStoreKeyed::cast(use); diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc index 15ef5ed0b6..3b232e6e93 100644 --- a/deps/v8/src/hydrogen.cc +++ b/deps/v8/src/hydrogen.cc @@ -30,21 +30,24 @@ #include <algorithm> #include "v8.h" +#include "allocation-site-scopes.h" #include "codegen.h" #include "full-codegen.h" #include "hashmap.h" #include "hydrogen-bce.h" #include "hydrogen-bch.h" #include "hydrogen-canonicalize.h" +#include "hydrogen-check-elimination.h" #include "hydrogen-dce.h" #include "hydrogen-dehoist.h" -#include "hydrogen-deoptimizing-mark.h" #include "hydrogen-environment-liveness.h" #include "hydrogen-escape-analysis.h" #include "hydrogen-infer-representation.h" #include "hydrogen-infer-types.h" +#include "hydrogen-load-elimination.h" #include "hydrogen-gvn.h" #include "hydrogen-mark-deoptimize.h" +#include "hydrogen-mark-unreachable.h" #include "hydrogen-minus-zero.h" #include "hydrogen-osr.h" #include "hydrogen-range-analysis.h" @@ -94,7 +97,7 @@ HBasicBlock::HBasicBlock(HGraph* graph) parent_loop_header_(NULL), inlined_entry_block_(NULL), is_inline_return_target_(false), - is_deoptimizing_(false), + is_reachable_(true), dominates_loop_successors_(false), is_osr_entry_(false) { } @@ -104,6 +107,11 @@ Isolate* HBasicBlock::isolate() const { } +void HBasicBlock::MarkUnreachable() { + is_reachable_ = false; +} + + void HBasicBlock::AttachLoopInformation() { ASSERT(!IsLoopHeader()); loop_information_ = new(zone()) HLoopInformation(this, zone()); @@ -132,16 +140,25 @@ void HBasicBlock::RemovePhi(HPhi* phi) { } -void HBasicBlock::AddInstruction(HInstruction* instr) { +void HBasicBlock::AddInstruction(HInstruction* instr, int position) { ASSERT(!IsStartBlock() || !IsFinished()); ASSERT(!instr->IsLinked()); ASSERT(!IsFinished()); + if (position != RelocInfo::kNoPosition) { + instr->set_position(position); + } if (first_ == NULL) { ASSERT(last_environment() != NULL); ASSERT(!last_environment()->ast_id().IsNone()); HBlockEntry* entry = new(zone()) HBlockEntry(); entry->InitializeAsFirst(this); + if (position != RelocInfo::kNoPosition) { + entry->set_position(position); + } else { + ASSERT(!FLAG_emit_opt_code_positions || + !graph()->info()->IsOptimizing()); + } first_ = last_ = entry; } instr->InsertAfter(last_); @@ -192,9 +209,9 @@ HSimulate* HBasicBlock::CreateSimulate(BailoutId ast_id, } -void HBasicBlock::Finish(HControlInstruction* end) { +void HBasicBlock::Finish(HControlInstruction* end, int position) { ASSERT(!IsFinished()); - AddInstruction(end); + AddInstruction(end, position); end_ = end; for (HSuccessorIterator it(end); !it.Done(); it.Advance()) { it.Current()->RegisterPredecessor(this); @@ -203,35 +220,44 @@ void HBasicBlock::Finish(HControlInstruction* end) { void HBasicBlock::Goto(HBasicBlock* block, + int position, FunctionState* state, bool add_simulate) { bool drop_extra = state != NULL && state->inlining_kind() == DROP_EXTRA_ON_RETURN; if (block->IsInlineReturnTarget()) { - AddInstruction(new(zone()) HLeaveInlined()); + HEnvironment* env = last_environment(); + int argument_count = env->arguments_environment()->parameter_count(); + AddInstruction(new(zone()) + HLeaveInlined(state->entry(), argument_count), + position); UpdateEnvironment(last_environment()->DiscardInlined(drop_extra)); } - if (add_simulate) AddNewSimulate(BailoutId::None()); + if (add_simulate) AddNewSimulate(BailoutId::None(), position); HGoto* instr = new(zone()) HGoto(block); - Finish(instr); + Finish(instr, position); } void HBasicBlock::AddLeaveInlined(HValue* return_value, - FunctionState* state) { + FunctionState* state, + int position) { HBasicBlock* target = state->function_return(); bool drop_extra = state->inlining_kind() == DROP_EXTRA_ON_RETURN; ASSERT(target->IsInlineReturnTarget()); ASSERT(return_value != NULL); - AddInstruction(new(zone()) HLeaveInlined()); + HEnvironment* env = last_environment(); + int argument_count = env->arguments_environment()->parameter_count(); + AddInstruction(new(zone()) HLeaveInlined(state->entry(), argument_count), + position); UpdateEnvironment(last_environment()->DiscardInlined(drop_extra)); last_environment()->Push(return_value); - AddNewSimulate(BailoutId::None()); + AddNewSimulate(BailoutId::None(), position); HGoto* instr = new(zone()) HGoto(target); - Finish(instr); + Finish(instr, position); } @@ -622,10 +648,21 @@ HConstant* HGraph::GetConstant(SetOncePointer<HConstant>* pointer, // Can't pass GetInvalidContext() to HConstant::New, because that will // recursively call GetConstant HConstant* constant = HConstant::New(zone(), NULL, value); - constant->InsertAfter(GetConstantUndefined()); + constant->InsertAfter(entry_block()->first()); pointer->set(constant); + return constant; } - return pointer->get(); + return ReinsertConstantIfNecessary(pointer->get()); +} + + +HConstant* HGraph::ReinsertConstantIfNecessary(HConstant* constant) { + if (!constant->IsLinked()) { + // The constant was removed from the graph. Reinsert. + constant->ClearFlag(HValue::kIsDead); + constant->InsertAfter(entry_block()->first()); + } + return constant; } @@ -648,21 +685,21 @@ HConstant* HGraph::GetConstantMinus1() { HConstant* HGraph::GetConstant##Name() { \ if (!constant_##name##_.is_set()) { \ HConstant* constant = new(zone()) HConstant( \ - isolate()->factory()->name##_value(), \ - UniqueValueId::name##_value(isolate()->heap()), \ + Unique<Object>::CreateImmovable(isolate()->factory()->name##_value()), \ Representation::Tagged(), \ htype, \ false, \ true, \ false, \ boolean_value); \ - constant->InsertAfter(GetConstantUndefined()); \ + constant->InsertAfter(entry_block()->first()); \ constant_##name##_.set(constant); \ } \ - return constant_##name##_.get(); \ + return ReinsertConstantIfNecessary(constant_##name##_.get()); \ } +DEFINE_GET_CONSTANT(Undefined, undefined, HType::Tagged(), false) DEFINE_GET_CONSTANT(True, true, HType::Boolean(), true) DEFINE_GET_CONSTANT(False, false, HType::Boolean(), false) DEFINE_GET_CONSTANT(Hole, the_hole, HType::Tagged(), false) @@ -690,9 +727,8 @@ bool HGraph::IsStandardConstant(HConstant* constant) { } -HGraphBuilder::IfBuilder::IfBuilder(HGraphBuilder* builder, int position) +HGraphBuilder::IfBuilder::IfBuilder(HGraphBuilder* builder) : builder_(builder), - position_(position), finished_(false), deopt_then_(false), deopt_else_(false), @@ -715,7 +751,6 @@ HGraphBuilder::IfBuilder::IfBuilder( HGraphBuilder* builder, HIfContinuation* continuation) : builder_(builder), - position_(RelocInfo::kNoPosition), finished_(false), deopt_then_(false), deopt_else_(false), @@ -726,16 +761,17 @@ HGraphBuilder::IfBuilder::IfBuilder( captured_(false), needs_compare_(false), first_true_block_(NULL), + last_true_block_(NULL), first_false_block_(NULL), split_edge_merge_block_(NULL), merge_block_(NULL) { continuation->Continue(&first_true_block_, - &first_false_block_, - &position_); + &first_false_block_); } -void HGraphBuilder::IfBuilder::AddCompare(HControlInstruction* compare) { +HControlInstruction* HGraphBuilder::IfBuilder::AddCompare( + HControlInstruction* compare) { if (split_edge_merge_block_ != NULL) { HEnvironment* env = first_false_block_->last_environment(); HBasicBlock* split_edge = @@ -747,24 +783,26 @@ void HGraphBuilder::IfBuilder::AddCompare(HControlInstruction* compare) { compare->SetSuccessorAt(0, first_true_block_); compare->SetSuccessorAt(1, split_edge); } - split_edge->GotoNoSimulate(split_edge_merge_block_); + builder_->GotoNoSimulate(split_edge, split_edge_merge_block_); } else { compare->SetSuccessorAt(0, first_true_block_); compare->SetSuccessorAt(1, first_false_block_); } - builder_->current_block()->Finish(compare); + builder_->FinishCurrentBlock(compare); needs_compare_ = false; + return compare; } void HGraphBuilder::IfBuilder::Or() { + ASSERT(!needs_compare_); ASSERT(!did_and_); did_or_ = true; HEnvironment* env = first_false_block_->last_environment(); if (split_edge_merge_block_ == NULL) { split_edge_merge_block_ = builder_->CreateBasicBlock(env->Copy()); - first_true_block_->GotoNoSimulate(split_edge_merge_block_); + builder_->GotoNoSimulate(first_true_block_, split_edge_merge_block_); first_true_block_ = split_edge_merge_block_; } builder_->set_current_block(first_false_block_); @@ -773,12 +811,13 @@ void HGraphBuilder::IfBuilder::Or() { void HGraphBuilder::IfBuilder::And() { + ASSERT(!needs_compare_); ASSERT(!did_or_); did_and_ = true; HEnvironment* env = first_false_block_->last_environment(); if (split_edge_merge_block_ == NULL) { split_edge_merge_block_ = builder_->CreateBasicBlock(env->Copy()); - first_false_block_->GotoNoSimulate(split_edge_merge_block_); + builder_->GotoNoSimulate(first_false_block_, split_edge_merge_block_); first_false_block_ = split_edge_merge_block_; } builder_->set_current_block(first_true_block_); @@ -796,7 +835,29 @@ void HGraphBuilder::IfBuilder::CaptureContinuation( HBasicBlock* false_block = did_else_ && (first_false_block_ != NULL) ? builder_->current_block() : first_false_block_; - continuation->Capture(true_block, false_block, position_); + continuation->Capture(true_block, false_block); + captured_ = true; + End(); +} + + +void HGraphBuilder::IfBuilder::JoinContinuation(HIfContinuation* continuation) { + ASSERT(!finished_); + ASSERT(!captured_); + HBasicBlock* true_block = last_true_block_ == NULL + ? first_true_block_ + : last_true_block_; + HBasicBlock* false_block = did_else_ && (first_false_block_ != NULL) + ? builder_->current_block() + : first_false_block_; + if (true_block != NULL && !true_block->IsFinished()) { + ASSERT(continuation->IsTrueReachable()); + builder_->GotoNoSimulate(true_block, continuation->true_branch()); + } + if (false_block != NULL && !false_block->IsFinished()) { + ASSERT(continuation->IsFalseReachable()); + builder_->GotoNoSimulate(false_block, continuation->false_branch()); + } captured_ = true; End(); } @@ -814,10 +875,9 @@ void HGraphBuilder::IfBuilder::Then() { HConstant* constant_false = builder_->graph()->GetConstantFalse(); ToBooleanStub::Types boolean_type = ToBooleanStub::Types(); boolean_type.Add(ToBooleanStub::BOOLEAN); - HBranch* branch = - new(zone()) HBranch(constant_false, boolean_type, first_true_block_, - first_false_block_); - builder_->current_block()->Finish(branch); + HBranch* branch = builder()->New<HBranch>( + constant_false, boolean_type, first_true_block_, first_false_block_); + builder_->FinishCurrentBlock(branch); } builder_->set_current_block(first_true_block_); } @@ -845,10 +905,9 @@ void HGraphBuilder::IfBuilder::Deopt(const char* reason) { void HGraphBuilder::IfBuilder::Return(HValue* value) { - HBasicBlock* block = builder_->current_block(); HValue* parameter_count = builder_->graph()->GetConstantMinus1(); - block->FinishExit(builder_->New<HReturn>(value, parameter_count)); - builder_->set_current_block(NULL); + builder_->FinishExitCurrentBlock( + builder_->New<HReturn>(value, parameter_count)); if (did_else_) { first_false_block_ = NULL; } else { @@ -878,17 +937,17 @@ void HGraphBuilder::IfBuilder::End() { HBasicBlock* last_false_block = builder_->current_block(); ASSERT(!last_false_block->IsFinished()); if (deopt_then_) { - last_false_block->GotoNoSimulate(merge_block_); + builder_->GotoNoSimulate(last_false_block, merge_block_); builder_->PadEnvironmentForContinuation(last_true_block_, merge_block_); - last_true_block_->GotoNoSimulate(merge_block_); + builder_->GotoNoSimulate(last_true_block_, merge_block_); } else { - last_true_block_->GotoNoSimulate(merge_block_); + builder_->GotoNoSimulate(last_true_block_, merge_block_); if (deopt_else_) { builder_->PadEnvironmentForContinuation(last_false_block, merge_block_); } - last_false_block->GotoNoSimulate(merge_block_); + builder_->GotoNoSimulate(last_false_block, merge_block_); } builder_->set_current_block(merge_block_); } @@ -936,7 +995,7 @@ HValue* HGraphBuilder::LoopBuilder::BeginBody( phi_ = header_block_->AddNewPhi(env->values()->length()); phi_->AddInput(initial); env->Push(initial); - builder_->current_block()->GotoNoSimulate(header_block_); + builder_->GotoNoSimulate(header_block_); HEnvironment* body_env = env->Copy(); HEnvironment* exit_env = env->Copy(); @@ -948,11 +1007,8 @@ HValue* HGraphBuilder::LoopBuilder::BeginBody( builder_->set_current_block(header_block_); env->Pop(); - HCompareNumericAndBranch* compare = - new(zone()) HCompareNumericAndBranch(phi_, terminating, token); - compare->SetSuccessorAt(0, body_block_); - compare->SetSuccessorAt(1, exit_block_); - builder_->current_block()->Finish(compare); + builder_->FinishCurrentBlock(builder_->New<HCompareNumericAndBranch>( + phi_, terminating, token, body_block_, exit_block_)); builder_->set_current_block(body_block_); if (direction_ == kPreIncrement || direction_ == kPreDecrement) { @@ -976,10 +1032,10 @@ void HGraphBuilder::LoopBuilder::Break() { // Its the first time we saw a break. HEnvironment* env = exit_block_->last_environment()->Copy(); exit_trampoline_block_ = builder_->CreateBasicBlock(env); - exit_block_->GotoNoSimulate(exit_trampoline_block_); + builder_->GotoNoSimulate(exit_block_, exit_trampoline_block_); } - builder_->current_block()->GotoNoSimulate(exit_trampoline_block_); + builder_->GotoNoSimulate(exit_trampoline_block_); } @@ -999,7 +1055,7 @@ void HGraphBuilder::LoopBuilder::EndBody() { // Push the new increment value on the expression stack to merge into the phi. builder_->environment()->Push(increment_); HBasicBlock* last_block = builder_->current_block(); - last_block->GotoNoSimulate(header_block_); + builder_->GotoNoSimulate(last_block, header_block_); header_block_->loop_information()->RegisterBackEdge(last_block); if (exit_trampoline_block_ != NULL) { @@ -1017,14 +1073,16 @@ HGraph* HGraphBuilder::CreateGraph() { CompilationPhase phase("H_Block building", info_); set_current_block(graph()->entry_block()); if (!BuildGraph()) return NULL; - graph()->FinalizeUniqueValueIds(); + graph()->FinalizeUniqueness(); return graph_; } HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) { ASSERT(current_block() != NULL); - current_block()->AddInstruction(instr); + ASSERT(!FLAG_emit_opt_code_positions || + position_ != RelocInfo::kNoPosition || !info_->IsOptimizing()); + current_block()->AddInstruction(instr, position_); if (graph()->IsInsideNoSideEffectsScope()) { instr->SetFlag(HValue::kHasNoObservableSideEffects); } @@ -1032,8 +1090,27 @@ HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) { } -void HGraphBuilder::AddIncrementCounter(StatsCounter* counter, - HValue* context) { +void HGraphBuilder::FinishCurrentBlock(HControlInstruction* last) { + ASSERT(!FLAG_emit_opt_code_positions || !info_->IsOptimizing() || + position_ != RelocInfo::kNoPosition); + current_block()->Finish(last, position_); + if (last->IsReturn() || last->IsAbnormalExit()) { + set_current_block(NULL); + } +} + + +void HGraphBuilder::FinishExitCurrentBlock(HControlInstruction* instruction) { + ASSERT(!FLAG_emit_opt_code_positions || !info_->IsOptimizing() || + position_ != RelocInfo::kNoPosition); + current_block()->FinishExit(instruction, position_); + if (instruction->IsReturn() || instruction->IsAbnormalExit()) { + set_current_block(NULL); + } +} + + +void HGraphBuilder::AddIncrementCounter(StatsCounter* counter) { if (FLAG_native_code_counters && counter->Enabled()) { HValue* reference = Add<HConstant>(ExternalReference(counter)); HValue* old_value = Add<HLoadNamedField>(reference, @@ -1081,9 +1158,9 @@ void HGraphBuilder::FinishExitWithHardDeoptimization( PadEnvironmentForContinuation(current_block(), continuation); Add<HDeoptimize>(reason, Deoptimizer::EAGER); if (graph()->IsInsideNoSideEffectsScope()) { - current_block()->GotoNoSimulate(continuation); + GotoNoSimulate(continuation); } else { - current_block()->Goto(continuation); + Goto(continuation); } } @@ -1128,7 +1205,6 @@ HValue* HGraphBuilder::BuildCheckForCapacityGrow(HValue* object, HValue* length, HValue* key, bool is_js_array) { - Zone* zone = this->zone(); IfBuilder length_checker(this); Token::Value token = IsHoleyElementsKind(kind) ? Token::GTE : Token::EQ; @@ -1144,8 +1220,6 @@ HValue* HGraphBuilder::BuildCheckForCapacityGrow(HValue* object, Token::GTE); capacity_checker.Then(); - HValue* context = environment()->context(); - HValue* max_gap = Add<HConstant>(static_cast<int32_t>(JSObject::kMaxGap)); HValue* max_capacity = Add<HAdd>(current_capacity, max_gap); IfBuilder key_checker(this); @@ -1166,8 +1240,7 @@ HValue* HGraphBuilder::BuildCheckForCapacityGrow(HValue* object, capacity_checker.End(); if (is_js_array) { - HValue* new_length = AddInstruction( - HAdd::New(zone, context, key, graph_->GetConstant1())); + HValue* new_length = AddUncasted<HAdd>(key, graph_->GetConstant1()); new_length->ClearFlag(HValue::kCanOverflow); Add<HStoreNamedField>(object, HObjectAccess::ForArrayLength(kind), @@ -1252,6 +1325,135 @@ void HGraphBuilder::BuildTransitionElementsKind(HValue* object, } +HValue* HGraphBuilder::BuildNumberToString(HValue* object, + Handle<Type> type) { + NoObservableSideEffectsScope scope(this); + + // Create a joinable continuation. + HIfContinuation found(graph()->CreateBasicBlock(), + graph()->CreateBasicBlock()); + + // Load the number string cache. + HValue* number_string_cache = + Add<HLoadRoot>(Heap::kNumberStringCacheRootIndex); + + // Make the hash mask from the length of the number string cache. It + // contains two elements (number and string) for each cache entry. + HValue* mask = AddLoadFixedArrayLength(number_string_cache); + mask->set_type(HType::Smi()); + mask = Add<HSar>(mask, graph()->GetConstant1()); + mask = Add<HSub>(mask, graph()->GetConstant1()); + + // Check whether object is a smi. + IfBuilder if_objectissmi(this); + if_objectissmi.If<HIsSmiAndBranch>(object); + if_objectissmi.Then(); + { + // Compute hash for smi similar to smi_get_hash(). + HValue* hash = Add<HBitwise>(Token::BIT_AND, object, mask); + + // Load the key. + HValue* key_index = Add<HShl>(hash, graph()->GetConstant1()); + HValue* key = Add<HLoadKeyed>(number_string_cache, key_index, + static_cast<HValue*>(NULL), + FAST_ELEMENTS, ALLOW_RETURN_HOLE); + + // Check if object == key. + IfBuilder if_objectiskey(this); + if_objectiskey.If<HCompareObjectEqAndBranch>(object, key); + if_objectiskey.Then(); + { + // Make the key_index available. + Push(key_index); + } + if_objectiskey.JoinContinuation(&found); + } + if_objectissmi.Else(); + { + if (type->Is(Type::Smi())) { + if_objectissmi.Deopt("Excepted smi"); + } else { + // Check if the object is a heap number. + IfBuilder if_objectisnumber(this); + if_objectisnumber.If<HCompareMap>( + object, isolate()->factory()->heap_number_map()); + if_objectisnumber.Then(); + { + // Compute hash for heap number similar to double_get_hash(). + HValue* low = Add<HLoadNamedField>( + object, HObjectAccess::ForHeapNumberValueLowestBits()); + HValue* high = Add<HLoadNamedField>( + object, HObjectAccess::ForHeapNumberValueHighestBits()); + HValue* hash = Add<HBitwise>(Token::BIT_XOR, low, high); + hash = Add<HBitwise>(Token::BIT_AND, hash, mask); + + // Load the key. + HValue* key_index = Add<HShl>(hash, graph()->GetConstant1()); + HValue* key = Add<HLoadKeyed>(number_string_cache, key_index, + static_cast<HValue*>(NULL), + FAST_ELEMENTS, ALLOW_RETURN_HOLE); + + // Check if key is a heap number (the number string cache contains only + // SMIs and heap number, so it is sufficient to do a SMI check here). + IfBuilder if_keyisnotsmi(this); + if_keyisnotsmi.IfNot<HIsSmiAndBranch>(key); + if_keyisnotsmi.Then(); + { + // Check if values of key and object match. + IfBuilder if_keyeqobject(this); + if_keyeqobject.If<HCompareNumericAndBranch>( + Add<HLoadNamedField>(key, HObjectAccess::ForHeapNumberValue()), + Add<HLoadNamedField>(object, HObjectAccess::ForHeapNumberValue()), + Token::EQ); + if_keyeqobject.Then(); + { + // Make the key_index available. + Push(key_index); + } + if_keyeqobject.JoinContinuation(&found); + } + if_keyisnotsmi.JoinContinuation(&found); + } + if_objectisnumber.Else(); + { + if (type->Is(Type::Number())) { + if_objectisnumber.Deopt("Expected heap number"); + } + } + if_objectisnumber.JoinContinuation(&found); + } + } + if_objectissmi.JoinContinuation(&found); + + // Check for cache hit. + IfBuilder if_found(this, &found); + if_found.Then(); + { + // Count number to string operation in native code. + AddIncrementCounter(isolate()->counters()->number_to_string_native()); + + // Load the value in case of cache hit. + HValue* key_index = Pop(); + HValue* value_index = Add<HAdd>(key_index, graph()->GetConstant1()); + Push(Add<HLoadKeyed>(number_string_cache, value_index, + static_cast<HValue*>(NULL), + FAST_ELEMENTS, ALLOW_RETURN_HOLE)); + } + if_found.Else(); + { + // Cache miss, fallback to runtime. + Add<HPushArgument>(object); + Push(Add<HCallRuntime>( + isolate()->factory()->empty_string(), + Runtime::FunctionForId(Runtime::kNumberToStringSkipCache), + 1)); + } + if_found.End(); + + return Pop(); +} + + HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess( HValue* checked_object, HValue* key, @@ -1303,7 +1505,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess( HValue* bounds_check = negative_checker.If<HCompareNumericAndBranch>( key, graph()->GetConstant0(), Token::GTE); negative_checker.Then(); - HInstruction* result = AddExternalArrayElementAccess( + HInstruction* result = AddElementAccess( external_elements, key, val, bounds_check, elements_kind, is_store); negative_checker.ElseDeopt("Negative key encountered"); length_checker.End(); @@ -1313,7 +1515,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess( checked_key = Add<HBoundsCheck>(key, length); HLoadExternalArrayPointer* external_elements = Add<HLoadExternalArrayPointer>(elements); - return AddExternalArrayElementAccess( + return AddElementAccess( external_elements, checked_key, val, checked_object, elements_kind, is_store); } @@ -1346,14 +1548,13 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess( elements_kind, length); } else { HCheckMaps* check_cow_map = Add<HCheckMaps>( - elements, isolate()->factory()->fixed_array_map(), - top_info()); + elements, isolate()->factory()->fixed_array_map(), top_info()); check_cow_map->ClearGVNFlag(kDependsOnElementsKind); } } } - return AddFastElementAccess(elements, checked_key, val, checked_object, - elements_kind, is_store, load_mode, store_mode); + return AddElementAccess(elements, checked_key, val, checked_object, + elements_kind, is_store, load_mode); } @@ -1443,85 +1644,31 @@ HInnerAllocatedObject* HGraphBuilder::BuildJSArrayHeader(HValue* array, } -HInstruction* HGraphBuilder::AddExternalArrayElementAccess( - HValue* external_elements, +HInstruction* HGraphBuilder::AddElementAccess( + HValue* elements, HValue* checked_key, HValue* val, HValue* dependency, ElementsKind elements_kind, - bool is_store) { + bool is_store, + LoadKeyedHoleMode load_mode) { if (is_store) { ASSERT(val != NULL); - switch (elements_kind) { - case EXTERNAL_PIXEL_ELEMENTS: { - val = Add<HClampToUint8>(val); - break; - } - case EXTERNAL_BYTE_ELEMENTS: - case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: - case EXTERNAL_SHORT_ELEMENTS: - case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: - case EXTERNAL_INT_ELEMENTS: - case EXTERNAL_UNSIGNED_INT_ELEMENTS: { - break; - } - case EXTERNAL_FLOAT_ELEMENTS: - case EXTERNAL_DOUBLE_ELEMENTS: - break; - case FAST_SMI_ELEMENTS: - case FAST_ELEMENTS: - case FAST_DOUBLE_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: - case DICTIONARY_ELEMENTS: - case NON_STRICT_ARGUMENTS_ELEMENTS: - UNREACHABLE(); - break; + if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) { + val = Add<HClampToUint8>(val); } - return Add<HStoreKeyed>(external_elements, checked_key, val, elements_kind); - } else { - ASSERT(val == NULL); - HLoadKeyed* load = Add<HLoadKeyed>(external_elements, - checked_key, - dependency, - elements_kind); - if (FLAG_opt_safe_uint32_operations && - elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) { - graph()->RecordUint32Instruction(load); - } - return load; + return Add<HStoreKeyed>(elements, checked_key, val, elements_kind); } -} - -HInstruction* HGraphBuilder::AddFastElementAccess( - HValue* elements, - HValue* checked_key, - HValue* val, - HValue* load_dependency, - ElementsKind elements_kind, - bool is_store, - LoadKeyedHoleMode load_mode, - KeyedAccessStoreMode store_mode) { - if (is_store) { - ASSERT(val != NULL); - switch (elements_kind) { - case FAST_SMI_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - case FAST_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - case FAST_DOUBLE_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: - return Add<HStoreKeyed>(elements, checked_key, val, elements_kind); - default: - UNREACHABLE(); - return NULL; - } + ASSERT(!is_store); + ASSERT(val == NULL); + HLoadKeyed* load = Add<HLoadKeyed>( + elements, checked_key, dependency, elements_kind, load_mode); + if (FLAG_opt_safe_uint32_operations && + elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) { + graph()->RecordUint32Instruction(load); } - // It's an element load (!is_store). - return Add<HLoadKeyed>( - elements, checked_key, load_dependency, elements_kind, load_mode); + return load; } @@ -1771,9 +1918,8 @@ HValue* HGraphBuilder::BuildCloneShallowArray(HValue* boilerplate, void HGraphBuilder::BuildCompareNil( HValue* value, Handle<Type> type, - int position, HIfContinuation* continuation) { - IfBuilder if_nil(this, position); + IfBuilder if_nil(this); bool some_case_handled = false; bool some_case_missing = false; @@ -1824,12 +1970,11 @@ void HGraphBuilder::BuildCompareNil( HValue* HGraphBuilder::BuildCreateAllocationMemento(HValue* previous_object, int previous_object_size, HValue* alloc_site) { - // TODO(mvstanton): ASSERT altered to CHECK to diagnose chromium bug 284577 - CHECK(alloc_site != NULL); + ASSERT(alloc_site != NULL); HInnerAllocatedObject* alloc_memento = Add<HInnerAllocatedObject>( previous_object, previous_object_size); - Handle<Map> alloc_memento_map( - isolate()->heap()->allocation_memento_map()); + Handle<Map> alloc_memento_map = + isolate()->factory()->allocation_memento_map(); AddStoreMapConstant(alloc_memento, alloc_memento_map); HObjectAccess access = HObjectAccess::ForAllocationMementoSite(); Add<HStoreNamedField>(alloc_memento, access, alloc_site); @@ -1886,8 +2031,7 @@ HValue* HGraphBuilder::JSArrayBuilder::EmitMapCode() { // No need for a context lookup if the kind_ matches the initial // map, because we can just load the map in that case. HObjectAccess access = HObjectAccess::ForPrototypeOrInitialMap(); - return builder()->AddInstruction( - builder()->BuildLoadNamedField(constructor_function_, access)); + return builder()->AddLoadNamedField(constructor_function_, access); } HInstruction* native_context = builder()->BuildGetNativeContext(); @@ -1907,8 +2051,7 @@ HValue* HGraphBuilder::JSArrayBuilder::EmitMapCode() { HValue* HGraphBuilder::JSArrayBuilder::EmitInternalMapCode() { // Find the map near the constructor function HObjectAccess access = HObjectAccess::ForPrototypeOrInitialMap(); - return builder()->AddInstruction( - builder()->BuildLoadNamedField(constructor_function_, access)); + return builder()->AddLoadNamedField(constructor_function_, access); } @@ -1983,6 +2126,11 @@ HValue* HGraphBuilder::JSArrayBuilder::AllocateArray(HValue* size_in_bytes, HAllocate* new_object = builder()->Add<HAllocate>(size_in_bytes, HType::JSArray(), NOT_TENURED, JS_ARRAY_TYPE); + // Folded array allocation should be aligned if it has fast double elements. + if (IsFastDoubleElementsKind(kind_)) { + new_object->MakeDoubleAligned(); + } + // Fill in the fields: map, properties, length HValue* map; if (allocation_site_payload_ == NULL) { @@ -2042,6 +2190,9 @@ HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info) // to know it's the initial state. function_state_= &initial_function_state_; InitializeAstVisitor(info->isolate()); + if (FLAG_emit_opt_code_positions) { + SetSourcePosition(info->shared_info()->start_position()); + } } @@ -2054,8 +2205,8 @@ HBasicBlock* HOptimizedGraphBuilder::CreateJoin(HBasicBlock* first, return first; } else { HBasicBlock* join_block = graph()->CreateBasicBlock(); - first->Goto(join_block); - second->Goto(join_block); + Goto(first, join_block); + Goto(second, join_block); join_block->SetJoinId(join_id); return join_block; } @@ -2066,7 +2217,7 @@ HBasicBlock* HOptimizedGraphBuilder::JoinContinue(IterationStatement* statement, HBasicBlock* exit_block, HBasicBlock* continue_block) { if (continue_block != NULL) { - if (exit_block != NULL) exit_block->Goto(continue_block); + if (exit_block != NULL) Goto(exit_block, continue_block); continue_block->SetJoinId(statement->ContinueId()); return continue_block; } @@ -2079,10 +2230,10 @@ HBasicBlock* HOptimizedGraphBuilder::CreateLoop(IterationStatement* statement, HBasicBlock* body_exit, HBasicBlock* loop_successor, HBasicBlock* break_block) { - if (body_exit != NULL) body_exit->Goto(loop_entry); + if (body_exit != NULL) Goto(body_exit, loop_entry); loop_entry->PostProcessLoopHeader(statement); if (break_block != NULL) { - if (loop_successor != NULL) loop_successor->Goto(break_block); + if (loop_successor != NULL) Goto(loop_successor, break_block); break_block->SetJoinId(statement->ExitId()); return break_block; } @@ -2090,8 +2241,26 @@ HBasicBlock* HOptimizedGraphBuilder::CreateLoop(IterationStatement* statement, } -void HBasicBlock::FinishExit(HControlInstruction* instruction) { - Finish(instruction); +// Build a new loop header block and set it as the current block. +HBasicBlock* HOptimizedGraphBuilder::BuildLoopEntry() { + HBasicBlock* loop_entry = CreateLoopHeaderBlock(); + Goto(loop_entry); + set_current_block(loop_entry); + return loop_entry; +} + + +HBasicBlock* HOptimizedGraphBuilder::BuildLoopEntry( + IterationStatement* statement) { + HBasicBlock* loop_entry = osr()->HasOsrEntryAt(statement) + ? osr()->BuildOsrLoopEntry(statement) + : BuildLoopEntry(); + return loop_entry; +} + + +void HBasicBlock::FinishExit(HControlInstruction* instruction, int position) { + Finish(instruction, position); ClearEnvironment(); } @@ -2109,7 +2278,6 @@ HGraph::HGraph(CompilationInfo* info) zone_(info->zone()), is_recursive_(false), use_optimistic_licm_(false), - has_soft_deoptimize_(false), depends_on_empty_array_proto_elements_(false), type_change_checksum_(0), maximum_environment_size_(0), @@ -2137,12 +2305,12 @@ HBasicBlock* HGraph::CreateBasicBlock() { } -void HGraph::FinalizeUniqueValueIds() { +void HGraph::FinalizeUniqueness() { DisallowHeapAllocation no_gc; ASSERT(!isolate()->optimizing_compiler_thread()->IsOptimizerThread()); for (int i = 0; i < blocks()->length(); ++i) { for (HInstructionIterator it(blocks()->at(i)); !it.Done(); it.Advance()) { - it.Current()->FinalizeUniqueValueId(); + it.Current()->FinalizeUniqueness(); } } } @@ -2640,7 +2808,7 @@ void EffectContext::ReturnControl(HControlInstruction* instr, HBasicBlock* empty_false = owner()->graph()->CreateBasicBlock(); instr->SetSuccessorAt(0, empty_true); instr->SetSuccessorAt(1, empty_false); - owner()->current_block()->Finish(instr); + owner()->FinishCurrentBlock(instr); HBasicBlock* join = owner()->CreateJoin(empty_true, empty_false, ast_id); owner()->set_current_block(join); } @@ -2650,7 +2818,7 @@ void EffectContext::ReturnContinuation(HIfContinuation* continuation, BailoutId ast_id) { HBasicBlock* true_branch = NULL; HBasicBlock* false_branch = NULL; - continuation->Continue(&true_branch, &false_branch, NULL); + continuation->Continue(&true_branch, &false_branch); if (!continuation->IsTrueReachable()) { owner()->set_current_block(false_branch); } else if (!continuation->IsFalseReachable()) { @@ -2684,7 +2852,7 @@ void ValueContext::ReturnControl(HControlInstruction* instr, BailoutId ast_id) { HBasicBlock* materialize_true = owner()->graph()->CreateBasicBlock(); instr->SetSuccessorAt(0, materialize_true); instr->SetSuccessorAt(1, materialize_false); - owner()->current_block()->Finish(instr); + owner()->FinishCurrentBlock(instr); owner()->set_current_block(materialize_true); owner()->Push(owner()->graph()->GetConstantTrue()); owner()->set_current_block(materialize_false); @@ -2699,7 +2867,7 @@ void ValueContext::ReturnContinuation(HIfContinuation* continuation, BailoutId ast_id) { HBasicBlock* materialize_true = NULL; HBasicBlock* materialize_false = NULL; - continuation->Continue(&materialize_true, &materialize_false, NULL); + continuation->Continue(&materialize_true, &materialize_false); if (continuation->IsTrueReachable()) { owner()->set_current_block(materialize_true); owner()->Push(owner()->graph()->GetConstantTrue()); @@ -2739,9 +2907,9 @@ void TestContext::ReturnControl(HControlInstruction* instr, BailoutId ast_id) { HBasicBlock* empty_false = owner()->graph()->CreateBasicBlock(); instr->SetSuccessorAt(0, empty_true); instr->SetSuccessorAt(1, empty_false); - owner()->current_block()->Finish(instr); - empty_true->Goto(if_true(), owner()->function_state()); - empty_false->Goto(if_false(), owner()->function_state()); + owner()->FinishCurrentBlock(instr); + owner()->Goto(empty_true, if_true(), owner()->function_state()); + owner()->Goto(empty_false, if_false(), owner()->function_state()); owner()->set_current_block(NULL); } @@ -2750,12 +2918,12 @@ void TestContext::ReturnContinuation(HIfContinuation* continuation, BailoutId ast_id) { HBasicBlock* true_branch = NULL; HBasicBlock* false_branch = NULL; - continuation->Continue(&true_branch, &false_branch, NULL); + continuation->Continue(&true_branch, &false_branch); if (continuation->IsTrueReachable()) { - true_branch->Goto(if_true(), owner()->function_state()); + owner()->Goto(true_branch, if_true(), owner()->function_state()); } if (continuation->IsFalseReachable()) { - false_branch->Goto(if_false(), owner()->function_state()); + owner()->Goto(false_branch, if_false(), owner()->function_state()); } owner()->set_current_block(NULL); } @@ -2773,11 +2941,11 @@ void TestContext::BuildBranch(HValue* value) { HBasicBlock* empty_true = builder->graph()->CreateBasicBlock(); HBasicBlock* empty_false = builder->graph()->CreateBasicBlock(); ToBooleanStub::Types expected(condition()->to_boolean_types()); - HBranch* test = new(zone()) HBranch(value, expected, empty_true, empty_false); - builder->current_block()->Finish(test); + builder->FinishCurrentBlock(builder->New<HBranch>( + value, expected, empty_true, empty_false)); - empty_true->Goto(if_true(), builder->function_state()); - empty_false->Goto(if_false(), builder->function_state()); + owner()->Goto(empty_true, if_true(), builder->function_state()); + owner()->Goto(empty_false , if_false(), builder->function_state()); builder->set_current_block(NULL); } @@ -2894,7 +3062,7 @@ bool HOptimizedGraphBuilder::BuildGraph() { // not replayed by the Lithium translation. HEnvironment* initial_env = environment()->CopyWithoutHistory(); HBasicBlock* body_entry = CreateBasicBlock(initial_env); - current_block()->Goto(body_entry); + Goto(body_entry); body_entry->SetJoinId(BailoutId::FunctionEntry()); set_current_block(body_entry); @@ -2906,8 +3074,7 @@ bool HOptimizedGraphBuilder::BuildGraph() { VisitDeclarations(scope->declarations()); Add<HSimulate>(BailoutId::Declarations()); - HValue* context = environment()->context(); - Add<HStackCheck>(context, HStackCheck::kFunctionEntry); + Add<HStackCheck>(HStackCheck::kFunctionEntry); VisitStatements(current_info()->function()->body()); if (HasStackOverflow()) return false; @@ -2932,7 +3099,7 @@ bool HOptimizedGraphBuilder::BuildGraph() { type_info->set_inlined_type_change_checksum(composite_checksum); // Perform any necessary OSR-specific cleanups or changes to the graph. - osr_->FinishGraph(); + osr()->FinishGraph(); return true; } @@ -2957,7 +3124,6 @@ bool HGraph::Optimize(BailoutReason* bailout_reason) { Run<HEnvironmentLivenessAnalysisPhase>(); } - Run<HPropagateDeoptimizingMarkPhase>(); if (!CheckConstPhiUses()) { *bailout_reason = kUnsupportedPhiUseOfConstVariable; return false; @@ -2968,11 +3134,16 @@ bool HGraph::Optimize(BailoutReason* bailout_reason) { return false; } - // Remove dead code and phis - if (FLAG_dead_code_elimination) Run<HDeadCodeEliminationPhase>(); + // Find and mark unreachable code to simplify optimizations, especially gvn, + // where unreachable code could unnecessarily defeat LICM. + Run<HMarkUnreachableBlocksPhase>(); + if (FLAG_check_elimination) Run<HCheckEliminationPhase>(); + if (FLAG_dead_code_elimination) Run<HDeadCodeEliminationPhase>(); if (FLAG_use_escape_analysis) Run<HEscapeAnalysisPhase>(); + if (FLAG_load_elimination) Run<HLoadEliminationPhase>(); + CollectPhis(); if (has_osr()) osr()->FinishOsrValues(); @@ -3006,17 +3177,17 @@ bool HGraph::Optimize(BailoutReason* bailout_reason) { // Eliminate redundant stack checks on backwards branches. Run<HStackCheckEliminationPhase>(); - if (FLAG_array_bounds_checks_elimination) { - Run<HBoundsCheckEliminationPhase>(); - } - if (FLAG_array_bounds_checks_hoisting) { - Run<HBoundsCheckHoistingPhase>(); - } + if (FLAG_array_bounds_checks_elimination) Run<HBoundsCheckEliminationPhase>(); + if (FLAG_array_bounds_checks_hoisting) Run<HBoundsCheckHoistingPhase>(); if (FLAG_array_index_dehoisting) Run<HDehoistIndexComputationsPhase>(); if (FLAG_dead_code_elimination) Run<HDeadCodeEliminationPhase>(); RestoreActualValues(); + // Find unreachable code a second time, GVN and other optimizations may have + // made blocks unreachable that were previously reachable. + Run<HMarkUnreachableBlocksPhase>(); + return true; } @@ -3049,12 +3220,6 @@ void HGraph::RestoreActualValues() { } -void HGraphBuilder::PushAndAdd(HInstruction* instr) { - Push(instr); - AddInstruction(instr); -} - - template <class Instruction> HInstruction* HOptimizedGraphBuilder::PreProcessCall(Instruction* call) { int count = call->argument_count(); @@ -3075,10 +3240,6 @@ void HOptimizedGraphBuilder::SetUpScope(Scope* scope) { HInstruction* context = Add<HContext>(); environment()->BindContext(context); - HConstant* undefined_constant = HConstant::cast(Add<HConstant>( - isolate()->factory()->undefined_value())); - graph()->set_undefined_constant(undefined_constant); - // Create an arguments object containing the initial parameters. Set the // initial values of parameters including "this" having parameter index 0. ASSERT_EQ(scope->num_parameters() + 1, environment()->parameter_count()); @@ -3092,6 +3253,7 @@ void HOptimizedGraphBuilder::SetUpScope(Scope* scope) { AddInstruction(arguments_object); graph()->SetArgumentsObject(arguments_object); + HConstant* undefined_constant = graph()->GetConstantUndefined(); // Initialize specials and locals to undefined. for (int i = environment()->parameter_count() + 1; i < environment()->length(); @@ -3134,7 +3296,7 @@ void HOptimizedGraphBuilder::VisitBlock(Block* stmt) { } HBasicBlock* break_block = break_info.break_block(); if (break_block != NULL) { - if (current_block() != NULL) current_block()->Goto(break_block); + if (current_block() != NULL) Goto(break_block); break_block->SetJoinId(stmt->ExitId()); set_current_block(break_block); } @@ -3244,7 +3406,7 @@ void HOptimizedGraphBuilder::VisitContinueStatement( HBasicBlock* continue_block = break_scope()->Get( stmt->target(), BreakAndContinueScope::CONTINUE, &drop_extra); Drop(drop_extra); - current_block()->Goto(continue_block); + Goto(continue_block); set_current_block(NULL); } @@ -3257,7 +3419,7 @@ void HOptimizedGraphBuilder::VisitBreakStatement(BreakStatement* stmt) { HBasicBlock* break_block = break_scope()->Get( stmt->target(), BreakAndContinueScope::BREAK, &drop_extra); Drop(drop_extra); - current_block()->Goto(break_block); + Goto(break_block); set_current_block(NULL); } @@ -3280,26 +3442,26 @@ void HOptimizedGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) { if (context->IsTest()) { TestContext* test = TestContext::cast(context); CHECK_ALIVE(VisitForEffect(stmt->expression())); - current_block()->Goto(test->if_true(), state); + Goto(test->if_true(), state); } else if (context->IsEffect()) { CHECK_ALIVE(VisitForEffect(stmt->expression())); - current_block()->Goto(function_return(), state); + Goto(function_return(), state); } else { ASSERT(context->IsValue()); CHECK_ALIVE(VisitForValue(stmt->expression())); HValue* return_value = Pop(); HValue* receiver = environment()->arguments_environment()->Lookup(0); HHasInstanceTypeAndBranch* typecheck = - new(zone()) HHasInstanceTypeAndBranch(return_value, - FIRST_SPEC_OBJECT_TYPE, - LAST_SPEC_OBJECT_TYPE); + New<HHasInstanceTypeAndBranch>(return_value, + FIRST_SPEC_OBJECT_TYPE, + LAST_SPEC_OBJECT_TYPE); HBasicBlock* if_spec_object = graph()->CreateBasicBlock(); HBasicBlock* not_spec_object = graph()->CreateBasicBlock(); typecheck->SetSuccessorAt(0, if_spec_object); typecheck->SetSuccessorAt(1, not_spec_object); - current_block()->Finish(typecheck); - if_spec_object->AddLeaveInlined(return_value, state); - not_spec_object->AddLeaveInlined(receiver, state); + FinishCurrentBlock(typecheck); + AddLeaveInlined(if_spec_object, return_value, state); + AddLeaveInlined(not_spec_object, receiver, state); } } else if (state->inlining_kind() == SETTER_CALL_RETURN) { // Return from an inlined setter call. The returned value is never used, the @@ -3309,11 +3471,11 @@ void HOptimizedGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) { HValue* rhs = environment()->arguments_environment()->Lookup(1); context->ReturnValue(rhs); } else if (context->IsEffect()) { - current_block()->Goto(function_return(), state); + Goto(function_return(), state); } else { ASSERT(context->IsValue()); HValue* rhs = environment()->arguments_environment()->Lookup(1); - current_block()->AddLeaveInlined(rhs, state); + AddLeaveInlined(rhs, state); } } else { // Return from a normal inlined function. Visit the subexpression in the @@ -3323,11 +3485,11 @@ void HOptimizedGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) { VisitForControl(stmt->expression(), test->if_true(), test->if_false()); } else if (context->IsEffect()) { CHECK_ALIVE(VisitForEffect(stmt->expression())); - current_block()->Goto(function_return(), state); + Goto(function_return(), state); } else { ASSERT(context->IsValue()); CHECK_ALIVE(VisitForValue(stmt->expression())); - current_block()->AddLeaveInlined(Pop(), state); + AddLeaveInlined(Pop(), state); } } set_current_block(NULL); @@ -3361,8 +3523,6 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) { return Bailout(kSwitchStatementMixedOrNonLiteralSwitchLabels); } - HValue* context = environment()->context(); - CHECK_ALIVE(VisitForValue(stmt->tag())); Add<HSimulate>(stmt->EntryId()); HValue* tag_value = Pop(); @@ -3373,13 +3533,11 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) { // Test switch's tag value if all clauses are string literals if (stmt->switch_type() == SwitchStatement::STRING_SWITCH) { - string_check = new(zone()) HIsStringAndBranch(tag_value); first_test_block = graph()->CreateBasicBlock(); not_string_block = graph()->CreateBasicBlock(); - - string_check->SetSuccessorAt(0, first_test_block); - string_check->SetSuccessorAt(1, not_string_block); - current_block()->Finish(string_check); + string_check = New<HIsStringAndBranch>( + tag_value, first_test_block, not_string_block); + FinishCurrentBlock(string_check); set_current_block(first_test_block); } @@ -3408,21 +3566,21 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) { } HCompareNumericAndBranch* compare_ = - new(zone()) HCompareNumericAndBranch(tag_value, - label_value, - Token::EQ_STRICT); + New<HCompareNumericAndBranch>(tag_value, + label_value, + Token::EQ_STRICT); compare_->set_observed_input_representation( Representation::Smi(), Representation::Smi()); compare = compare_; } else { - compare = new(zone()) HStringCompareAndBranch(context, tag_value, - label_value, - Token::EQ_STRICT); + compare = New<HStringCompareAndBranch>(tag_value, + label_value, + Token::EQ_STRICT); } compare->SetSuccessorAt(0, body_block); compare->SetSuccessorAt(1, next_test_block); - current_block()->Finish(compare); + FinishCurrentBlock(compare); set_current_block(next_test_block); } @@ -3455,6 +3613,13 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) { last_block = NULL; // Cleared to indicate we've handled it. } } else { + // If the current test block is deoptimizing due to an unhandled clause + // of the switch, the test instruction is in the next block since the + // deopt must end the current block. + if (curr_test_block->IsDeoptimizing()) { + ASSERT(curr_test_block->end()->SecondSuccessor() == NULL); + curr_test_block = curr_test_block->end()->FirstSuccessor(); + } normal_block = curr_test_block->end()->FirstSuccessor(); curr_test_block = curr_test_block->end()->SecondSuccessor(); } @@ -3496,8 +3661,8 @@ void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) { last_block, stmt->ExitId())); } else { - if (fall_through_block != NULL) fall_through_block->Goto(break_block); - if (last_block != NULL) last_block->Goto(break_block); + if (fall_through_block != NULL) Goto(fall_through_block, break_block); + if (last_block != NULL) Goto(last_block, break_block); break_block->SetJoinId(stmt->ExitId()); set_current_block(break_block); } @@ -3509,9 +3674,8 @@ void HOptimizedGraphBuilder::VisitLoopBody(IterationStatement* stmt, BreakAndContinueInfo* break_info) { BreakAndContinueScope push(break_info, this); Add<HSimulate>(stmt->StackCheckId()); - HValue* context = environment()->context(); - HStackCheck* stack_check = HStackCheck::cast(Add<HStackCheck>( - context, HStackCheck::kBackwardsBranch)); + HStackCheck* stack_check = + HStackCheck::cast(Add<HStackCheck>(HStackCheck::kBackwardsBranch)); ASSERT(loop_entry->IsLoopHeader()); loop_entry->loop_information()->set_stack_check(stack_check); CHECK_BAILOUT(Visit(stmt->body())); @@ -3523,7 +3687,7 @@ void HOptimizedGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) { ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); ASSERT(current_block() != NULL); - HBasicBlock* loop_entry = osr_->BuildPossibleOsrLoopEntry(stmt); + HBasicBlock* loop_entry = BuildLoopEntry(stmt); BreakAndContinueInfo break_info(stmt); CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry, &break_info)); @@ -3562,7 +3726,7 @@ void HOptimizedGraphBuilder::VisitWhileStatement(WhileStatement* stmt) { ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); ASSERT(current_block() != NULL); - HBasicBlock* loop_entry = osr_->BuildPossibleOsrLoopEntry(stmt); + HBasicBlock* loop_entry = BuildLoopEntry(stmt); // If the condition is constant true, do not generate a branch. HBasicBlock* loop_successor = NULL; @@ -3604,7 +3768,7 @@ void HOptimizedGraphBuilder::VisitForStatement(ForStatement* stmt) { CHECK_ALIVE(Visit(stmt->init())); } ASSERT(current_block() != NULL); - HBasicBlock* loop_entry = osr_->BuildPossibleOsrLoopEntry(stmt); + HBasicBlock* loop_entry = BuildLoopEntry(stmt); HBasicBlock* loop_successor = NULL; if (stmt->cond() != NULL) { @@ -3687,14 +3851,14 @@ void HOptimizedGraphBuilder::VisitForInStatement(ForInStatement* stmt) { HForInCacheArray::cast(array)->set_index_cache( HForInCacheArray::cast(index_cache)); - HBasicBlock* loop_entry = osr_->BuildPossibleOsrLoopEntry(stmt); + HBasicBlock* loop_entry = BuildLoopEntry(stmt); HValue* index = environment()->ExpressionStackAt(0); HValue* limit = environment()->ExpressionStackAt(1); // Check that we still have more keys. HCompareNumericAndBranch* compare_index = - new(zone()) HCompareNumericAndBranch(index, limit, Token::LT); + New<HCompareNumericAndBranch>(index, limit, Token::LT); compare_index->set_observed_input_representation( Representation::Smi(), Representation::Smi()); @@ -3703,7 +3867,7 @@ void HOptimizedGraphBuilder::VisitForInStatement(ForInStatement* stmt) { compare_index->SetSuccessorAt(0, loop_body); compare_index->SetSuccessorAt(1, loop_successor); - current_block()->Finish(compare_index); + FinishCurrentBlock(compare_index); set_current_block(loop_successor); Drop(5); @@ -3733,9 +3897,7 @@ void HOptimizedGraphBuilder::VisitForInStatement(ForInStatement* stmt) { set_current_block(body_exit); HValue* current_index = Pop(); - HInstruction* new_index = New<HAdd>(current_index, - graph()->GetConstant1()); - PushAndAdd(new_index); + Push(Add<HAdd>(current_index, graph()->GetConstant1())); body_exit = current_block(); } @@ -3782,6 +3944,11 @@ void HOptimizedGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) { } +void HOptimizedGraphBuilder::VisitCaseClause(CaseClause* clause) { + UNREACHABLE(); +} + + static Handle<SharedFunctionInfo> SearchSharedFunctionInfo( Code* unoptimized_code, FunctionLiteral* expr) { int start_position = expr->start_position(); @@ -3812,19 +3979,18 @@ void HOptimizedGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) { } // We also have a stack overflow if the recursive compilation did. if (HasStackOverflow()) return; - HValue* context = environment()->context(); HFunctionLiteral* instr = - new(zone()) HFunctionLiteral(context, shared_info, expr->pretenure()); + New<HFunctionLiteral>(shared_info, expr->pretenure()); return ast_context()->ReturnInstruction(instr, expr->id()); } -void HOptimizedGraphBuilder::VisitSharedFunctionInfoLiteral( - SharedFunctionInfoLiteral* expr) { +void HOptimizedGraphBuilder::VisitNativeFunctionLiteral( + NativeFunctionLiteral* expr) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); - return Bailout(kSharedFunctionInfoLiteral); + return Bailout(kNativeFunctionLiteral); } @@ -3938,19 +4104,15 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) { return ast_context()->ReturnInstruction(constant, expr->id()); } else { HLoadGlobalCell* instr = - new(zone()) HLoadGlobalCell(cell, lookup.GetPropertyDetails()); + New<HLoadGlobalCell>(cell, lookup.GetPropertyDetails()); return ast_context()->ReturnInstruction(instr, expr->id()); } } else { - HValue* context = environment()->context(); - HGlobalObject* global_object = new(zone()) HGlobalObject(context); - AddInstruction(global_object); + HGlobalObject* global_object = Add<HGlobalObject>(); HLoadGlobalGeneric* instr = - new(zone()) HLoadGlobalGeneric(context, - global_object, - variable->name(), - ast_context()->is_for_typeof()); - instr->set_position(expr->position()); + New<HLoadGlobalGeneric>(global_object, + variable->name(), + ast_context()->is_for_typeof()); return ast_context()->ReturnInstruction(instr, expr->id()); } } @@ -3993,13 +4155,10 @@ void HOptimizedGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) { ASSERT(current_block()->HasPredecessor()); Handle<JSFunction> closure = function_state()->compilation_info()->closure(); Handle<FixedArray> literals(closure->literals()); - HValue* context = environment()->context(); - - HRegExpLiteral* instr = new(zone()) HRegExpLiteral(context, - literals, - expr->pattern(), - expr->flags(), - expr->literal_index()); + HRegExpLiteral* instr = New<HRegExpLiteral>(literals, + expr->pattern(), + expr->flags(), + expr->literal_index()); return ast_context()->ReturnInstruction(instr, expr->id()); } @@ -4064,20 +4223,6 @@ static bool LookupAccessorPair(Handle<Map> map, } -static bool LookupGetter(Handle<Map> map, - Handle<String> name, - Handle<JSFunction>* getter, - Handle<JSObject>* holder) { - Handle<AccessorPair> accessors; - if (LookupAccessorPair(map, name, &accessors, holder) && - accessors->getter()->IsJSFunction()) { - *getter = Handle<JSFunction>(JSFunction::cast(accessors->getter())); - return true; - } - return false; -} - - static bool LookupSetter(Handle<Map> map, Handle<String> name, Handle<JSFunction>* setter, @@ -4085,7 +4230,11 @@ static bool LookupSetter(Handle<Map> map, Handle<AccessorPair> accessors; if (LookupAccessorPair(map, name, &accessors, holder) && accessors->setter()->IsJSFunction()) { - *setter = Handle<JSFunction>(JSFunction::cast(accessors->setter())); + Handle<JSFunction> func(JSFunction::cast(accessors->setter())); + CallOptimization call_optimization(func); + // TODO(dcarney): temporary hack unless crankshaft can handle api calls. + if (call_optimization.is_simple_api_call()) return false; + *setter = func; return true; } return false; @@ -4100,7 +4249,7 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate, int* max_properties) { if (boilerplate->map()->is_deprecated()) { Handle<Object> result = JSObject::TryMigrateInstance(boilerplate); - if (result->IsSmi()) return false; + if (result.is_null()) return false; } ASSERT(max_depth >= 0 && *max_properties >= 0); @@ -4166,18 +4315,23 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) { // Check whether to use fast or slow deep-copying for boilerplate. int max_properties = kMaxFastLiteralProperties; - Handle<Object> boilerplate(closure->literals()->get( - expr->literal_index()), isolate()); - if (boilerplate->IsJSObject() && - IsFastLiteral(Handle<JSObject>::cast(boilerplate), - kMaxFastLiteralDepth, - &max_properties)) { - Handle<JSObject> boilerplate_object = - Handle<JSObject>::cast(boilerplate); + Handle<Object> literals_cell(closure->literals()->get(expr->literal_index()), + isolate()); + Handle<AllocationSite> site; + Handle<JSObject> boilerplate; + if (!literals_cell->IsUndefined()) { + // Retrieve the boilerplate + site = Handle<AllocationSite>::cast(literals_cell); + boilerplate = Handle<JSObject>(JSObject::cast(site->transition_info()), + isolate()); + } - literal = BuildFastLiteral(boilerplate_object, - Handle<Object>::null(), - DONT_TRACK_ALLOCATION_SITE); + if (!boilerplate.is_null() && + IsFastLiteral(boilerplate, kMaxFastLiteralDepth, &max_properties)) { + AllocationSiteUsageContext usage_context(isolate(), site, false); + usage_context.EnterNewScope(); + literal = BuildFastLiteral(boilerplate, &usage_context); + usage_context.ExitScope(site, boilerplate); } else { NoObservableSideEffectsScope no_effects(this); Handle<FixedArray> closure_literals(closure->literals(), isolate()); @@ -4193,9 +4347,10 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) { Add<HPushArgument>(Add<HConstant>(constant_properties)); Add<HPushArgument>(Add<HConstant>(flags)); - Runtime::FunctionId function_id = - (expr->depth() > 1 || expr->may_store_doubles()) - ? Runtime::kCreateObjectLiteral : Runtime::kCreateObjectLiteralShallow; + // TODO(mvstanton): Add a flag to turn off creation of any + // AllocationMementos for this call: we are in crankshaft and should have + // learned enough about transition behavior to stop emitting mementos. + Runtime::FunctionId function_id = Runtime::kCreateObjectLiteral; literal = Add<HCallRuntime>(isolate()->factory()->empty_string(), Runtime::FunctionForId(function_id), 4); @@ -4285,51 +4440,50 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) { bool uninitialized = false; Handle<Object> literals_cell(literals->get(expr->literal_index()), isolate()); - Handle<Object> raw_boilerplate; + Handle<JSObject> boilerplate_object; if (literals_cell->IsUndefined()) { uninitialized = true; - raw_boilerplate = Runtime::CreateArrayLiteralBoilerplate( + Handle<Object> raw_boilerplate = Runtime::CreateArrayLiteralBoilerplate( isolate(), literals, expr->constant_elements()); if (raw_boilerplate.is_null()) { return Bailout(kArrayBoilerplateCreationFailed); } - site = isolate()->factory()->NewAllocationSite(); - site->set_transition_info(*raw_boilerplate); + boilerplate_object = Handle<JSObject>::cast(raw_boilerplate); + AllocationSiteCreationContext creation_context(isolate()); + site = creation_context.EnterNewScope(); + if (JSObject::DeepWalk(boilerplate_object, &creation_context).is_null()) { + return Bailout(kArrayBoilerplateCreationFailed); + } + creation_context.ExitScope(site, boilerplate_object); literals->set(expr->literal_index(), *site); - if (JSObject::cast(*raw_boilerplate)->elements()->map() == + if (boilerplate_object->elements()->map() == isolate()->heap()->fixed_cow_array_map()) { isolate()->counters()->cow_arrays_created_runtime()->Increment(); } } else { ASSERT(literals_cell->IsAllocationSite()); site = Handle<AllocationSite>::cast(literals_cell); - raw_boilerplate = Handle<Object>(site->transition_info(), isolate()); + boilerplate_object = Handle<JSObject>( + JSObject::cast(site->transition_info()), isolate()); } - ASSERT(!raw_boilerplate.is_null()); - ASSERT(site->IsLiteralSite()); + ASSERT(!boilerplate_object.is_null()); + ASSERT(site->SitePointsToLiteral()); - Handle<JSObject> boilerplate_object = - Handle<JSObject>::cast(raw_boilerplate); ElementsKind boilerplate_elements_kind = - Handle<JSObject>::cast(boilerplate_object)->GetElementsKind(); - - // TODO(mvstanton): This heuristic is only a temporary solution. In the - // end, we want to quit creating allocation site info after a certain number - // of GCs for a call site. - AllocationSiteMode mode = AllocationSite::GetMode( - boilerplate_elements_kind); + boilerplate_object->GetElementsKind(); // Check whether to use fast or slow deep-copying for boilerplate. int max_properties = kMaxFastLiteralProperties; if (IsFastLiteral(boilerplate_object, kMaxFastLiteralDepth, &max_properties)) { - literal = BuildFastLiteral(boilerplate_object, - site, - mode); + AllocationSiteUsageContext usage_context(isolate(), site, false); + usage_context.EnterNewScope(); + literal = BuildFastLiteral(boilerplate_object, &usage_context); + usage_context.ExitScope(site, boilerplate_object); } else { NoObservableSideEffectsScope no_effects(this); // Boilerplate already exists and constant elements are never accessed, @@ -4341,6 +4495,9 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) { Add<HPushArgument>(Add<HConstant>(literal_index)); Add<HPushArgument>(Add<HConstant>(constants)); + // TODO(mvstanton): Consider a flag to turn off creation of any + // AllocationMementos for this call: we are in crankshaft and should have + // learned enough about transition behavior to stop emitting mementos. Runtime::FunctionId function_id = (expr->depth() > 1) ? Runtime::kCreateArrayLiteral : Runtime::kCreateArrayLiteralShallow; literal = Add<HCallRuntime>(isolate()->factory()->empty_string(), @@ -4399,31 +4556,6 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) { } -// Sets the lookup result and returns true if the load/store can be inlined. -static bool ComputeLoadStoreField(Handle<Map> type, - Handle<String> name, - LookupResult* lookup, - bool is_store) { - ASSERT(!is_store || !type->is_observed()); - if (!CanInlinePropertyAccess(*type)) { - lookup->NotFound(); - return false; - } - // If we directly find a field, the access can be inlined. - type->LookupDescriptor(NULL, *name, lookup); - if (lookup->IsField()) return true; - - // For a load, we are out of luck if there is no such field. - if (!is_store) return false; - - // 2nd chance: A store into a non-existent field can still be inlined if we - // have a matching transition and some room left in the object. - type->LookupTransition(NULL, *name, lookup); - return lookup->IsTransitionToField(*type) && - (type->unused_property_fields() > 0); -} - - HCheckMaps* HOptimizedGraphBuilder::AddCheckMap(HValue* object, Handle<Map> map) { BuildCheckHeapObject(object); @@ -4519,9 +4651,7 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedGeneric( HValue* object, Handle<String> name, HValue* value) { - HValue* context = environment()->context(); - return new(zone()) HStoreNamedGeneric( - context, + return New<HStoreNamedGeneric>( object, name, value, @@ -4529,6 +4659,28 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedGeneric( } +// Sets the lookup result and returns true if the load/store can be inlined. +static bool ComputeStoreField(Handle<Map> type, + Handle<String> name, + LookupResult* lookup, + bool lookup_transition = true) { + ASSERT(!type->is_observed()); + if (!CanInlinePropertyAccess(*type)) { + lookup->NotFound(); + return false; + } + // If we directly find a field, the access can be inlined. + type->LookupDescriptor(NULL, *name, lookup); + if (lookup->IsField()) return true; + + if (!lookup_transition) return false; + + type->LookupTransition(NULL, *name, lookup); + return lookup->IsTransitionToField(*type) && + (type->unused_property_fields() > 0); +} + + HInstruction* HOptimizedGraphBuilder::BuildStoreNamedMonomorphic( HValue* object, Handle<String> name, @@ -4536,7 +4688,7 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedMonomorphic( Handle<Map> map) { // Handle a store to a known field. LookupResult lookup(isolate()); - if (ComputeLoadStoreField(map, name, &lookup, true)) { + if (ComputeStoreField(map, name, &lookup)) { HCheckMaps* checked_object = AddCheckMap(object, map); return BuildStoreNamedField(checked_object, name, value, map, &lookup); } @@ -4546,140 +4698,192 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedMonomorphic( } -static bool CanLoadPropertyFromPrototype(Handle<Map> map, - Handle<Name> name, - LookupResult* lookup) { - if (!CanInlinePropertyAccess(*map)) return false; - map->LookupDescriptor(NULL, *name, lookup); - if (lookup->IsFound()) return false; +bool HOptimizedGraphBuilder::PropertyAccessInfo::IsCompatibleForLoad( + PropertyAccessInfo* info) { + if (!CanInlinePropertyAccess(*map_)) return false; + + if (!LookupDescriptor()) return false; + + if (!lookup_.IsFound()) { + return (!info->lookup_.IsFound() || info->has_holder()) && + map_->prototype() == info->map_->prototype(); + } + + // Mismatch if the other access info found the property in the prototype + // chain. + if (info->has_holder()) return false; + + if (lookup_.IsPropertyCallbacks()) { + return accessor_.is_identical_to(info->accessor_); + } + + if (lookup_.IsConstant()) { + return constant_.is_identical_to(info->constant_); + } + + ASSERT(lookup_.IsField()); + if (!info->lookup_.IsField()) return false; + + Representation r = access_.representation(); + if (!info->access_.representation().IsCompatibleForLoad(r)) return false; + if (info->access_.offset() != access_.offset()) return false; + if (info->access_.IsInobject() != access_.IsInobject()) return false; + info->GeneralizeRepresentation(r); return true; } -HInstruction* HOptimizedGraphBuilder::TryLoadPolymorphicAsMonomorphic( - HValue* object, - SmallMapList* types, - Handle<String> name) { - // Use monomorphic load if property lookup results in the same field index - // for all maps. Requires special map check on the set of all handled maps. - if (types->length() > kMaxLoadPolymorphism) return NULL; +bool HOptimizedGraphBuilder::PropertyAccessInfo::LookupDescriptor() { + map_->LookupDescriptor(NULL, *name_, &lookup_); + return LoadResult(map_); +} - LookupResult lookup(isolate()); - int count; - HObjectAccess access = HObjectAccess::ForMap(); // initial value unused. - for (count = 0; count < types->length(); ++count) { - Handle<Map> map = types->at(count); - if (!ComputeLoadStoreField(map, name, &lookup, false)) break; - HObjectAccess new_access = HObjectAccess::ForField(map, &lookup, name); +bool HOptimizedGraphBuilder::PropertyAccessInfo::LoadResult(Handle<Map> map) { + if (lookup_.IsField()) { + access_ = HObjectAccess::ForField(map, &lookup_, name_); + } else if (lookup_.IsPropertyCallbacks()) { + Handle<Object> callback(lookup_.GetValueFromMap(*map), isolate()); + if (!callback->IsAccessorPair()) return false; + Object* getter = Handle<AccessorPair>::cast(callback)->getter(); + if (!getter->IsJSFunction()) return false; + Handle<JSFunction> accessor = handle(JSFunction::cast(getter)); + CallOptimization call_optimization(accessor); + // TODO(dcarney): temporary hack unless crankshaft can handle api calls. + if (call_optimization.is_simple_api_call()) return false; + accessor_ = accessor; + } else if (lookup_.IsConstant()) { + constant_ = handle(lookup_.GetConstantFromMap(*map), isolate()); + } - if (count == 0) { - // First time through the loop; set access and representation. - access = new_access; - } else if (!access.representation().IsCompatibleForLoad( - new_access.representation())) { - // Representations did not match. - break; - } else if (access.offset() != new_access.offset()) { - // Offsets did not match. - break; - } else if (access.IsInobject() != new_access.IsInobject()) { - // In-objectness did not match. - break; + return true; +} + + +bool HOptimizedGraphBuilder::PropertyAccessInfo::LookupInPrototypes() { + Handle<Map> map = map_; + while (map->prototype()->IsJSObject()) { + holder_ = handle(JSObject::cast(map->prototype())); + if (holder_->map()->is_deprecated()) { + JSObject::TryMigrateInstance(holder_); + } + map = Handle<Map>(holder_->map()); + if (!CanInlinePropertyAccess(*map)) { + lookup_.NotFound(); + return false; } - access = access.WithRepresentation( - access.representation().generalize(new_access.representation())); + map->LookupDescriptor(*holder_, *name_, &lookup_); + if (lookup_.IsFound()) return LoadResult(map); } + lookup_.NotFound(); + return true; +} - if (count == types->length()) { - // Everything matched; can use monomorphic load. - BuildCheckHeapObject(object); - HCheckMaps* checked_object = Add<HCheckMaps>(object, types); - return BuildLoadNamedField(checked_object, access); - } - if (count != 0) return NULL; +bool HOptimizedGraphBuilder::PropertyAccessInfo::CanLoadMonomorphic() { + if (!CanInlinePropertyAccess(*map_)) return IsStringLength(); + if (IsJSObjectFieldAccessor()) return true; + if (!LookupDescriptor()) return false; + if (lookup_.IsFound()) return true; + return LookupInPrototypes(); +} - // Second chance: the property is on the prototype and all maps have the - // same prototype. - Handle<Map> map(types->at(0)); - if (!CanLoadPropertyFromPrototype(map, name, &lookup)) return NULL; - Handle<Object> prototype(map->prototype(), isolate()); - for (count = 1; count < types->length(); ++count) { - Handle<Map> test_map(types->at(count)); - if (!CanLoadPropertyFromPrototype(test_map, name, &lookup)) return NULL; - if (test_map->prototype() != *prototype) return NULL; +bool HOptimizedGraphBuilder::PropertyAccessInfo::CanLoadAsMonomorphic( + SmallMapList* types) { + ASSERT(map_.is_identical_to(types->first())); + if (!CanLoadMonomorphic()) return false; + if (types->length() > kMaxLoadPolymorphism) return false; + + if (IsStringLength()) { + for (int i = 1; i < types->length(); ++i) { + if (types->at(i)->instance_type() >= FIRST_NONSTRING_TYPE) return false; + } + return true; + } + + if (IsArrayLength()) { + bool is_fast = IsFastElementsKind(map_->elements_kind()); + for (int i = 1; i < types->length(); ++i) { + Handle<Map> test_map = types->at(i); + if (test_map->instance_type() != JS_ARRAY_TYPE) return false; + if (IsFastElementsKind(test_map->elements_kind()) != is_fast) { + return false; + } + } + return true; } - LookupInPrototypes(map, name, &lookup); - if (!lookup.IsField()) return NULL; + if (IsJSObjectFieldAccessor()) { + InstanceType instance_type = map_->instance_type(); + for (int i = 1; i < types->length(); ++i) { + if (types->at(i)->instance_type() != instance_type) return false; + } + return true; + } - BuildCheckHeapObject(object); - Add<HCheckMaps>(object, types); + for (int i = 1; i < types->length(); ++i) { + PropertyAccessInfo test_info(isolate(), types->at(i), name_); + if (!test_info.IsCompatibleForLoad(this)) return false; + } - Handle<JSObject> holder(lookup.holder()); - Handle<Map> holder_map(holder->map()); - HValue* checked_holder = BuildCheckPrototypeMaps( - Handle<JSObject>::cast(prototype), holder); - return BuildLoadNamedField(checked_holder, - HObjectAccess::ForField(holder_map, &lookup, name)); + return true; } -// Returns true if an instance of this map can never find a property with this -// name in its prototype chain. This means all prototypes up to the top are -// fast and don't have the name in them. It would be good if we could optimize -// polymorphic loads where the property is sometimes found in the prototype -// chain. -static bool PrototypeChainCanNeverResolve( - Handle<Map> map, Handle<String> name) { - Isolate* isolate = map->GetIsolate(); - Object* current = map->prototype(); - while (current != isolate->heap()->null_value()) { - if (current->IsJSGlobalProxy() || - current->IsGlobalObject() || - !current->IsJSObject() || - !CanInlinePropertyAccess(JSObject::cast(current)->map()) || - JSObject::cast(current)->IsAccessCheckNeeded()) { - return false; - } +HInstruction* HOptimizedGraphBuilder::BuildLoadMonomorphic( + PropertyAccessInfo* info, + HValue* object, + HInstruction* checked_object, + BailoutId ast_id, + BailoutId return_id, + bool can_inline_accessor) { - LookupResult lookup(isolate); - Map* map = JSObject::cast(current)->map(); - map->LookupDescriptor(NULL, *name, &lookup); - if (lookup.IsFound()) return false; - if (!lookup.IsCacheable()) return false; - current = JSObject::cast(current)->GetPrototype(); + HObjectAccess access = HObjectAccess::ForMap(); // bogus default + if (info->GetJSObjectFieldAccess(&access)) { + return New<HLoadNamedField>(checked_object, access); } - return true; + + HValue* checked_holder = checked_object; + if (info->has_holder()) { + Handle<JSObject> prototype(JSObject::cast(info->map()->prototype())); + checked_holder = BuildCheckPrototypeMaps(prototype, info->holder()); + } + + if (!info->lookup()->IsFound()) return graph()->GetConstantUndefined(); + + if (info->lookup()->IsField()) { + return BuildLoadNamedField(checked_holder, info->access()); + } + + if (info->lookup()->IsPropertyCallbacks()) { + Push(checked_object); + if (FLAG_inline_accessors && + can_inline_accessor && + TryInlineGetter(info->accessor(), ast_id, return_id)) { + return NULL; + } + Add<HPushArgument>(Pop()); + return New<HCallConstantFunction>(info->accessor(), 1); + } + + ASSERT(info->lookup()->IsConstant()); + return New<HConstant>(info->constant()); } void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField( - int position, BailoutId ast_id, + BailoutId return_id, HValue* object, SmallMapList* types, Handle<String> name) { - HInstruction* instr = TryLoadPolymorphicAsMonomorphic(object, types, name); - if (instr != NULL) { - instr->set_position(position); - return ast_context()->ReturnInstruction(instr, ast_id); - } - // Something did not match; must use a polymorphic load. int count = 0; HBasicBlock* join = NULL; for (int i = 0; i < types->length() && count < kMaxLoadPolymorphism; ++i) { - Handle<Map> map = types->at(i); - LookupResult lookup(isolate()); - if (ComputeLoadStoreField(map, name, &lookup, false) || - (lookup.IsCacheable() && - CanInlinePropertyAccess(*map) && - (lookup.IsConstant() || - (!lookup.IsFound() && - PrototypeChainCanNeverResolve(map, name))))) { + PropertyAccessInfo info(isolate(), types->at(i), name); + if (info.CanLoadMonomorphic()) { if (count == 0) { BuildCheckHeapObject(object); join = graph()->CreateBasicBlock(); @@ -4687,37 +4891,24 @@ void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField( ++count; HBasicBlock* if_true = graph()->CreateBasicBlock(); HBasicBlock* if_false = graph()->CreateBasicBlock(); - HCompareMap* compare = - new(zone()) HCompareMap(object, map, if_true, if_false); - current_block()->Finish(compare); + HCompareMap* compare = New<HCompareMap>( + object, info.map(), if_true, if_false); + FinishCurrentBlock(compare); set_current_block(if_true); - // TODO(verwaest): Merge logic with BuildLoadNamedMonomorphic. - if (lookup.IsField()) { - HObjectAccess access = HObjectAccess::ForField(map, &lookup, name); - HLoadNamedField* load = BuildLoadNamedField(compare, access); - load->set_position(position); - AddInstruction(load); - if (!ast_context()->IsEffect()) Push(load); - } else if (lookup.IsConstant()) { - Handle<Object> constant(lookup.GetConstantFromMap(*map), isolate()); - HConstant* hconstant = Add<HConstant>(constant); - if (!ast_context()->IsEffect()) Push(hconstant); + HInstruction* load = BuildLoadMonomorphic( + &info, object, compare, ast_id, return_id, FLAG_polymorphic_inlining); + if (load == NULL) { + if (HasStackOverflow()) return; } else { - ASSERT(!lookup.IsFound()); - if (map->prototype()->IsJSObject()) { - Handle<JSObject> prototype(JSObject::cast(map->prototype())); - Handle<JSObject> holder = prototype; - while (holder->map()->prototype()->IsJSObject()) { - holder = handle(JSObject::cast(holder->map()->prototype())); - } - BuildCheckPrototypeMaps(prototype, holder); + if (!load->IsLinked()) { + AddInstruction(load); } - if (!ast_context()->IsEffect()) Push(graph()->GetConstantUndefined()); + if (!ast_context()->IsEffect()) Push(load); } - current_block()->Goto(join); + if (current_block() != NULL) Goto(join); set_current_block(if_false); } } @@ -4726,16 +4917,17 @@ void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField( // know about and do not want to handle ones we've never seen. Otherwise // use a generic IC. if (count == types->length() && FLAG_deoptimize_uncommon_cases) { + // Because the deopt may be the only path in the polymorphic load, make sure + // that the environment stack matches the depth on deopt that it otherwise + // would have had after a successful load. + if (!ast_context()->IsEffect()) Push(graph()->GetConstant0()); FinishExitWithHardDeoptimization("Unknown map in polymorphic load", join); } else { - HValue* context = environment()->context(); - HInstruction* load = new(zone()) HLoadNamedGeneric(context, object, name); - load->set_position(position); - AddInstruction(load); + HInstruction* load = Add<HLoadNamedGeneric>(object, name); if (!ast_context()->IsEffect()) Push(load); if (join != NULL) { - current_block()->Goto(join); + Goto(join); } else { Add<HSimulate>(ast_id, REMOVABLE_SIMULATE); if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop()); @@ -4751,7 +4943,6 @@ void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField( bool HOptimizedGraphBuilder::TryStorePolymorphicAsMonomorphic( - int position, BailoutId assignment_id, HValue* object, HValue* value, @@ -4761,8 +4952,6 @@ bool HOptimizedGraphBuilder::TryStorePolymorphicAsMonomorphic( // for all maps. Requires special map check on the set of all handled maps. if (types->length() > kMaxStorePolymorphism) return false; - // TODO(verwaest): Merge the checking logic with the code in - // TryLoadPolymorphicAsMonomorphic. LookupResult lookup(isolate()); int count; Representation representation = Representation::None(); @@ -4770,7 +4959,7 @@ bool HOptimizedGraphBuilder::TryStorePolymorphicAsMonomorphic( for (count = 0; count < types->length(); ++count) { Handle<Map> map = types->at(count); // Pass false to ignore transitions. - if (!ComputeLoadStoreField(map, name, &lookup, false)) break; + if (!ComputeStoreField(map, name, &lookup, false)) break; ASSERT(!map->is_observed()); HObjectAccess new_access = HObjectAccess::ForField(map, &lookup, name); @@ -4803,7 +4992,6 @@ bool HOptimizedGraphBuilder::TryStorePolymorphicAsMonomorphic( checked_object, name, value, types->at(count - 1), &lookup), true); if (!ast_context()->IsEffect()) Push(value); - store->set_position(position); AddInstruction(store); Add<HSimulate>(assignment_id); if (!ast_context()->IsEffect()) Drop(1); @@ -4813,14 +5001,13 @@ bool HOptimizedGraphBuilder::TryStorePolymorphicAsMonomorphic( void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField( - int position, BailoutId assignment_id, HValue* object, HValue* value, SmallMapList* types, Handle<String> name) { if (TryStorePolymorphicAsMonomorphic( - position, assignment_id, object, value, types, name)) { + assignment_id, object, value, types, name)) { return; } @@ -4832,7 +5019,7 @@ void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField( for (int i = 0; i < types->length() && count < kMaxStorePolymorphism; ++i) { Handle<Map> map = types->at(i); LookupResult lookup(isolate()); - if (ComputeLoadStoreField(map, name, &lookup, true)) { + if (ComputeStoreField(map, name, &lookup)) { if (count == 0) { BuildCheckHeapObject(object); join = graph()->CreateBasicBlock(); @@ -4840,19 +5027,17 @@ void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField( ++count; HBasicBlock* if_true = graph()->CreateBasicBlock(); HBasicBlock* if_false = graph()->CreateBasicBlock(); - HCompareMap* compare = - new(zone()) HCompareMap(object, map, if_true, if_false); - current_block()->Finish(compare); + HCompareMap* compare = New<HCompareMap>(object, map, if_true, if_false); + FinishCurrentBlock(compare); set_current_block(if_true); HInstruction* instr; CHECK_ALIVE(instr = BuildStoreNamedField( compare, name, value, map, &lookup)); - instr->set_position(position); // Goto will add the HSimulate for the store. AddInstruction(instr); if (!ast_context()->IsEffect()) Push(value); - current_block()->Goto(join); + Goto(join); set_current_block(if_false); } @@ -4865,14 +5050,13 @@ void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField( FinishExitWithHardDeoptimization("Unknown map in polymorphic store", join); } else { HInstruction* instr = BuildStoreNamedGeneric(object, name, value); - instr->set_position(position); AddInstruction(instr); if (join != NULL) { if (!ast_context()->IsEffect()) { Push(value); } - current_block()->Goto(join); + Goto(join); } else { // The HSimulate for the store should not see the stored value in // effect contexts (it is not materialized at expr->id() in the @@ -4926,8 +5110,7 @@ void HOptimizedGraphBuilder::BuildStore(Expression* expr, HValue* key = environment()->ExpressionStackAt(1); HValue* object = environment()->ExpressionStackAt(2); bool has_side_effects = false; - HandleKeyedElementAccess(object, key, value, expr, return_id, - expr->position(), + HandleKeyedElementAccess(object, key, value, expr, true, // is_store &has_side_effects); Drop(3); @@ -4966,7 +5149,7 @@ void HOptimizedGraphBuilder::BuildStore(Expression* expr, Drop(2); Add<HPushArgument>(object); Add<HPushArgument>(value); - instr = new(zone()) HCallConstantFunction(setter, 2); + instr = New<HCallConstantFunction>(setter, 2); } else { Drop(2); CHECK_ALIVE(instr = BuildStoreNamedMonomorphic(object, @@ -4976,15 +5159,13 @@ void HOptimizedGraphBuilder::BuildStore(Expression* expr, } } else if (types != NULL && types->length() > 1) { Drop(2); - return HandlePolymorphicStoreNamedField( - expr->position(), ast_id, object, value, types, name); + return HandlePolymorphicStoreNamedField(ast_id, object, value, types, name); } else { Drop(2); instr = BuildStoreNamedGeneric(object, name, value); } if (!ast_context()->IsEffect()) Push(value); - instr->set_position(expr->position()); AddInstruction(instr); if (instr->HasObservableSideEffects()) { Add<HSimulate>(ast_id, REMOVABLE_SIMULATE); @@ -5013,7 +5194,6 @@ void HOptimizedGraphBuilder::HandlePropertyAssignment(Assignment* expr) { void HOptimizedGraphBuilder::HandleGlobalVariableAssignment( Variable* var, HValue* value, - int position, BailoutId ast_id) { LookupResult lookup(isolate()); GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, true); @@ -5036,7 +5216,6 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment( } HInstruction* instr = Add<HStoreGlobalCell>(value, cell, lookup.GetPropertyDetails()); - instr->set_position(position); if (instr->HasObservableSideEffects()) { Add<HSimulate>(ast_id, REMOVABLE_SIMULATE); } @@ -5045,7 +5224,7 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment( HStoreGlobalGeneric* instr = Add<HStoreGlobalGeneric>(global_object, var->name(), value, function_strict_mode_flag()); - instr->set_position(position); + USE(instr); ASSERT(instr->HasObservableSideEffects()); Add<HSimulate>(ast_id, REMOVABLE_SIMULATE); } @@ -5074,7 +5253,6 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) { case Variable::UNALLOCATED: HandleGlobalVariableAssignment(var, Top(), - expr->position(), expr->AssignmentId()); break; @@ -5136,22 +5314,21 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) { CHECK_ALIVE(VisitForValue(prop->obj())); HValue* object = Top(); HValue* key = NULL; - if ((!prop->IsStringLength() && - !prop->IsFunctionPrototype() && - !prop->key()->IsPropertyName()) || + if ((!prop->IsFunctionPrototype() && !prop->key()->IsPropertyName()) || prop->IsStringAccess()) { CHECK_ALIVE(VisitForValue(prop->key())); key = Top(); } - CHECK_ALIVE(PushLoad(prop, object, key, expr->position())); + CHECK_ALIVE(PushLoad(prop, object, key)); CHECK_ALIVE(VisitForValue(expr->value())); HValue* right = Pop(); HValue* left = Pop(); HInstruction* instr = BuildBinaryOperation(operation, left, right); - PushAndAdd(instr); + AddInstruction(instr); + Push(instr); if (instr->HasObservableSideEffects()) { Add<HSimulate>(operation->id(), REMOVABLE_SIMULATE); } @@ -5207,7 +5384,6 @@ void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) { CHECK_ALIVE(VisitForValue(expr->value())); HandleGlobalVariableAssignment(var, Top(), - expr->position(), expr->AssignmentId()); return ast_context()->ReturnValue(Pop()); @@ -5306,9 +5482,16 @@ void HOptimizedGraphBuilder::VisitThrow(Throw* expr) { CHECK_ALIVE(VisitForValue(expr->exception())); HValue* value = environment()->Pop(); - HThrow* instr = Add<HThrow>(value); - instr->set_position(expr->position()); + if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position()); + Add<HThrow>(value); Add<HSimulate>(expr->id()); + + // If the throw definitely exits the function, we can finish with a dummy + // control flow at this point. This is not the case if the throw is inside + // an inlined function which may be replaced. + if (call_context() == NULL) { + FinishExitCurrentBlock(New<HAbnormalExit>()); + } } @@ -5327,6 +5510,12 @@ HLoadNamedField* HGraphBuilder::BuildLoadNamedField(HValue* object, } +HInstruction* HGraphBuilder::AddLoadNamedField(HValue* object, + HObjectAccess access) { + return AddInstruction(BuildLoadNamedField(object, access)); +} + + HInstruction* HGraphBuilder::BuildLoadStringLength(HValue* object, HValue* checked_string) { if (FLAG_fold_constants && object->IsConstant()) { @@ -5347,93 +5536,14 @@ HInstruction* HOptimizedGraphBuilder::BuildLoadNamedGeneric( Add<HDeoptimize>("Insufficient type feedback for generic named load", Deoptimizer::SOFT); } - HValue* context = environment()->context(); - return new(zone()) HLoadNamedGeneric(context, object, name); -} - - -HInstruction* HOptimizedGraphBuilder::BuildCallGetter( - HValue* object, - Handle<Map> map, - Handle<JSFunction> getter, - Handle<JSObject> holder) { - AddCheckConstantFunction(holder, object, map); - Add<HPushArgument>(object); - return new(zone()) HCallConstantFunction(getter, 1); + return New<HLoadNamedGeneric>(object, name); } -HInstruction* HOptimizedGraphBuilder::BuildLoadNamedMonomorphic( - HValue* object, - Handle<String> name, - Handle<Map> map) { - // Handle a load from a known field. - ASSERT(!map->is_dictionary_map()); - - // Handle access to various length properties - if (name->Equals(isolate()->heap()->length_string())) { - if (map->instance_type() == JS_ARRAY_TYPE) { - HCheckMaps* checked_object = AddCheckMap(object, map); - return New<HLoadNamedField>( - checked_object, HObjectAccess::ForArrayLength(map->elements_kind())); - } - } - - LookupResult lookup(isolate()); - map->LookupDescriptor(NULL, *name, &lookup); - if (lookup.IsField()) { - HCheckMaps* checked_object = AddCheckMap(object, map); - ASSERT(map->IsJSObjectMap()); - return BuildLoadNamedField( - checked_object, HObjectAccess::ForField(map, &lookup, name)); - } - - // Handle a load of a constant known function. - if (lookup.IsConstant()) { - AddCheckMap(object, map); - Handle<Object> constant(lookup.GetConstantFromMap(*map), isolate()); - return New<HConstant>(constant); - } - - if (lookup.IsFound()) { - // Cannot handle the property, do a generic load instead. - HValue* context = environment()->context(); - return new(zone()) HLoadNamedGeneric(context, object, name); - } - - // Handle a load from a known field somewhere in the prototype chain. - LookupInPrototypes(map, name, &lookup); - if (lookup.IsField()) { - Handle<JSObject> prototype(JSObject::cast(map->prototype())); - Handle<JSObject> holder(lookup.holder()); - Handle<Map> holder_map(holder->map()); - AddCheckMap(object, map); - HValue* checked_holder = BuildCheckPrototypeMaps(prototype, holder); - return BuildLoadNamedField( - checked_holder, HObjectAccess::ForField(holder_map, &lookup, name)); - } - - // Handle a load of a constant function somewhere in the prototype chain. - if (lookup.IsConstant()) { - Handle<JSObject> prototype(JSObject::cast(map->prototype())); - Handle<JSObject> holder(lookup.holder()); - Handle<Map> holder_map(holder->map()); - AddCheckMap(object, map); - BuildCheckPrototypeMaps(prototype, holder); - Handle<Object> constant(lookup.GetConstantFromMap(*holder_map), isolate()); - return New<HConstant>(constant); - } - - // No luck, do a generic load. - HValue* context = environment()->context(); - return new(zone()) HLoadNamedGeneric(context, object, name); -} - HInstruction* HOptimizedGraphBuilder::BuildLoadKeyedGeneric(HValue* object, HValue* key) { - HValue* context = environment()->context(); - return new(zone()) HLoadKeyedGeneric(context, object, key); + return New<HLoadKeyedGeneric>(object, key); } @@ -5547,8 +5657,6 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess( HValue* key, HValue* val, SmallMapList* maps, - BailoutId ast_id, - int position, bool is_store, KeyedAccessStoreMode store_mode, bool* has_side_effects) { @@ -5560,9 +5668,6 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess( TryBuildConsolidatedElementLoad(object, key, val, maps); if (consolidated_load != NULL) { *has_side_effects |= consolidated_load->HasObservableSideEffects(); - if (position != RelocInfo::kNoPosition) { - consolidated_load->set_position(position); - } return consolidated_load; } } @@ -5619,7 +5724,6 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess( store_mode); } *has_side_effects |= instr->HasObservableSideEffects(); - if (position != RelocInfo::kNoPosition) instr->set_position(position); return is_store ? NULL : instr; } @@ -5632,8 +5736,8 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess( HBasicBlock* this_map = graph()->CreateBasicBlock(); HBasicBlock* other_map = graph()->CreateBasicBlock(); HCompareMap* mapcompare = - new(zone()) HCompareMap(object, map, this_map, other_map); - current_block()->Finish(mapcompare); + New<HCompareMap>(object, map, this_map, other_map); + FinishCurrentBlock(mapcompare); set_current_block(this_map); HInstruction* access = NULL; @@ -5656,12 +5760,11 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess( *has_side_effects |= access->HasObservableSideEffects(); // The caller will use has_side_effects and add a correct Simulate. access->SetFlag(HValue::kHasNoObservableSideEffects); - if (position != RelocInfo::kNoPosition) access->set_position(position); if (!is_store) { Push(access); } NoObservableSideEffectsScope scope(this); - current_block()->GotoNoSimulate(join); + GotoNoSimulate(join); set_current_block(other_map); } @@ -5679,8 +5782,6 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess( HValue* key, HValue* val, Expression* expr, - BailoutId ast_id, - int position, bool is_store, bool* has_side_effects) { ASSERT(!expr->IsPropertyName()); @@ -5702,17 +5803,18 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess( } } else if (types != NULL && !types->is_empty()) { return HandlePolymorphicElementAccess( - obj, key, val, types, ast_id, position, is_store, + obj, key, val, types, is_store, expr->GetStoreMode(), has_side_effects); } else { if (is_store) { - if (expr->IsAssignment() && expr->AsAssignment()->IsUninitialized()) { + if (expr->IsAssignment() && + expr->AsAssignment()->HasNoTypeInformation()) { Add<HDeoptimize>("Insufficient type feedback for keyed store", Deoptimizer::SOFT); } instr = BuildStoreKeyedGeneric(obj, key, val); } else { - if (expr->AsProperty()->IsUninitialized()) { + if (expr->AsProperty()->HasNoTypeInformation()) { Add<HDeoptimize>("Insufficient type feedback for keyed load", Deoptimizer::SOFT); } @@ -5720,7 +5822,6 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess( } AddInstruction(instr); } - if (position != RelocInfo::kNoPosition) instr->set_position(position); *has_side_effects = instr->HasObservableSideEffects(); return instr; } @@ -5730,9 +5831,7 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreKeyedGeneric( HValue* object, HValue* key, HValue* value) { - HValue* context = environment()->context(); - return new(zone()) HStoreKeyedGeneric( - context, + return New<HStoreKeyedGeneric>( object, key, value, @@ -5799,7 +5898,7 @@ bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) { HInstruction* elements = Add<HArgumentsElements>(false); HInstruction* length = Add<HArgumentsLength>(elements); HInstruction* checked_key = Add<HBoundsCheck>(key, length); - result = new(zone()) HAccessArgumentsAt(elements, length, checked_key); + result = New<HAccessArgumentsAt>(elements, length, checked_key); } else { EnsureArgumentsArePushedForAccess(); @@ -5809,7 +5908,7 @@ bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) { arguments_environment()->parameter_count() - 1; HInstruction* length = Add<HConstant>(argument_count); HInstruction* checked_key = Add<HBoundsCheck>(key, length); - result = new(zone()) HAccessArgumentsAt(elements, length, checked_key); + result = New<HAccessArgumentsAt>(elements, length, checked_key); } } ast_context()->ReturnInstruction(result, expr->id()); @@ -5819,66 +5918,66 @@ bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) { void HOptimizedGraphBuilder::PushLoad(Property* expr, HValue* object, - HValue* key, - int position) { + HValue* key) { ValueContext for_value(this, ARGUMENTS_NOT_ALLOWED); Push(object); if (key != NULL) Push(key); - BuildLoad(expr, position, expr->LoadId()); + BuildLoad(expr, expr->LoadId()); +} + + +static bool AreStringTypes(SmallMapList* types) { + for (int i = 0; i < types->length(); i++) { + if (types->at(i)->instance_type() >= FIRST_NONSTRING_TYPE) return false; + } + return true; } void HOptimizedGraphBuilder::BuildLoad(Property* expr, - int position, BailoutId ast_id) { HInstruction* instr = NULL; - if (expr->IsStringLength()) { - HValue* string = Pop(); - BuildCheckHeapObject(string); - HInstruction* checkstring = - AddInstruction(HCheckInstanceType::NewIsString(string, zone())); - instr = BuildLoadStringLength(string, checkstring); - } else if (expr->IsStringAccess()) { + if (expr->IsStringAccess()) { HValue* index = Pop(); HValue* string = Pop(); - HValue* context = environment()->context(); - HInstruction* char_code = - BuildStringCharCodeAt(string, index); + HInstruction* char_code = BuildStringCharCodeAt(string, index); AddInstruction(char_code); - instr = HStringCharFromCode::New(zone(), context, char_code); + instr = NewUncasted<HStringCharFromCode>(char_code); } else if (expr->IsFunctionPrototype()) { HValue* function = Pop(); BuildCheckHeapObject(function); - instr = new(zone()) HLoadFunctionPrototype(function); + instr = New<HLoadFunctionPrototype>(function); } else if (expr->key()->IsPropertyName()) { Handle<String> name = expr->key()->AsLiteral()->AsPropertyName(); - HValue* object = Top(); + HValue* object = Pop(); SmallMapList* types; - bool monomorphic = ComputeReceiverTypes(expr, object, &types); + ComputeReceiverTypes(expr, object, &types); + ASSERT(types != NULL); + + if (types->length() > 0) { + PropertyAccessInfo info(isolate(), types->first(), name); + if (!info.CanLoadAsMonomorphic(types)) { + return HandlePolymorphicLoadNamedField( + ast_id, expr->LoadId(), object, types, name); + } - if (monomorphic) { - Handle<Map> map = types->first(); - Handle<JSFunction> getter; - Handle<JSObject> holder; - if (LookupGetter(map, name, &getter, &holder)) { - AddCheckConstantFunction(holder, Top(), map); - if (FLAG_inline_accessors && - TryInlineGetter(getter, ast_id, expr->LoadId())) { - return; - } - Add<HPushArgument>(Pop()); - instr = new(zone()) HCallConstantFunction(getter, 1); + BuildCheckHeapObject(object); + HInstruction* checked_object; + if (AreStringTypes(types)) { + checked_object = + Add<HCheckInstanceType>(object, HCheckInstanceType::IS_STRING); } else { - instr = BuildLoadNamedMonomorphic(Pop(), name, map); + checked_object = Add<HCheckMaps>(object, types); } - } else if (types != NULL && types->length() > 1) { - return HandlePolymorphicLoadNamedField( - position, ast_id, Pop(), types, name); + instr = BuildLoadMonomorphic( + &info, object, checked_object, ast_id, expr->LoadId()); + if (instr == NULL) return; + if (instr->IsLinked()) return ast_context()->ReturnValue(instr); } else { - instr = BuildLoadNamedGeneric(Pop(), name, expr); + instr = BuildLoadNamedGeneric(object, name, expr); } } else { @@ -5887,7 +5986,7 @@ void HOptimizedGraphBuilder::BuildLoad(Property* expr, bool has_side_effects = false; HValue* load = HandleKeyedElementAccess( - obj, key, NULL, expr, ast_id, position, + obj, key, NULL, expr, false, // is_store &has_side_effects); if (has_side_effects) { @@ -5901,7 +6000,6 @@ void HOptimizedGraphBuilder::BuildLoad(Property* expr, } return ast_context()->ReturnValue(load); } - instr->set_position(position); return ast_context()->ReturnInstruction(instr, ast_id); } @@ -5914,14 +6012,12 @@ void HOptimizedGraphBuilder::VisitProperty(Property* expr) { if (TryArgumentsAccess(expr)) return; CHECK_ALIVE(VisitForValue(expr->obj())); - if ((!expr->IsStringLength() && - !expr->IsFunctionPrototype() && - !expr->key()->IsPropertyName()) || + if ((!expr->IsFunctionPrototype() && !expr->key()->IsPropertyName()) || expr->IsStringAccess()) { CHECK_ALIVE(VisitForValue(expr->key())); } - BuildLoad(expr, expr->position(), expr->id()); + BuildLoad(expr, expr->id()); } @@ -6015,22 +6111,13 @@ bool HOptimizedGraphBuilder::TryCallPolymorphicAsMonomorphic( Handle<String> name) { if (types->length() > kMaxCallPolymorphism) return false; - Handle<Map> map(types->at(0)); - LookupResult lookup(isolate()); - if (!CanLoadPropertyFromPrototype(map, name, &lookup)) return false; - - Handle<Object> prototype(map->prototype(), isolate()); - for (int count = 1; count < types->length(); ++count) { - Handle<Map> test_map(types->at(count)); - if (!CanLoadPropertyFromPrototype(test_map, name, &lookup)) return false; - if (test_map->prototype() != *prototype) return false; - } - - if (!expr->ComputeTarget(map, name)) return false; + PropertyAccessInfo info(isolate(), types->at(0), name); + if (!info.CanLoadAsMonomorphic(types)) return false; + if (!expr->ComputeTarget(info.map(), name)) return false; BuildCheckHeapObject(receiver); Add<HCheckMaps>(receiver, types); - AddCheckPrototypeMaps(expr->holder(), map); + AddCheckPrototypeMaps(expr->holder(), info.map()); if (FLAG_trace_inlining) { Handle<JSFunction> caller = current_info()->closure(); SmartArrayPointer<char> caller_name = @@ -6042,8 +6129,7 @@ bool HOptimizedGraphBuilder::TryCallPolymorphicAsMonomorphic( if (!TryInlineCall(expr)) { int argument_count = expr->arguments()->length() + 1; // Includes receiver. HCallConstantFunction* call = - new(zone()) HCallConstantFunction(expr->target(), argument_count); - call->set_position(expr->position()); + New<HCallConstantFunction>(expr->target(), argument_count); PreProcessCall(call); AddInstruction(call); if (!ast_context()->IsEffect()) Push(call); @@ -6107,11 +6193,9 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed( HBasicBlock* empty_smi_block = graph()->CreateBasicBlock(); HBasicBlock* not_smi_block = graph()->CreateBasicBlock(); number_block = graph()->CreateBasicBlock(); - HIsSmiAndBranch* smicheck = new(zone()) HIsSmiAndBranch(receiver); - smicheck->SetSuccessorAt(0, empty_smi_block); - smicheck->SetSuccessorAt(1, not_smi_block); - current_block()->Finish(smicheck); - empty_smi_block->Goto(number_block); + FinishCurrentBlock(New<HIsSmiAndBranch>( + receiver, empty_smi_block, not_smi_block)); + Goto(empty_smi_block, number_block); set_current_block(not_smi_block); } else { BuildCheckHeapObject(receiver); @@ -6122,27 +6206,24 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed( HUnaryControlInstruction* compare; if (handle_smi && map.is_identical_to(number_marker_map)) { - compare = new(zone()) HCompareMap( - receiver, heap_number_map, if_true, if_false); + compare = New<HCompareMap>(receiver, heap_number_map, if_true, if_false); map = initial_number_map; expr->set_number_check( Handle<JSObject>(JSObject::cast(map->prototype()))); } else if (map.is_identical_to(string_marker_map)) { - compare = new(zone()) HIsStringAndBranch(receiver); - compare->SetSuccessorAt(0, if_true); - compare->SetSuccessorAt(1, if_false); + compare = New<HIsStringAndBranch>(receiver, if_true, if_false); map = initial_string_map; expr->set_string_check( Handle<JSObject>(JSObject::cast(map->prototype()))); } else { - compare = new(zone()) HCompareMap(receiver, map, if_true, if_false); + compare = New<HCompareMap>(receiver, map, if_true, if_false); expr->set_map_check(); } - current_block()->Finish(compare); + FinishCurrentBlock(compare); if (expr->check_type() == NUMBER_CHECK) { - if_true->Goto(number_block); + Goto(if_true, number_block); if_true = number_block; number_block->SetJoinId(expr->id()); } @@ -6164,14 +6245,13 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed( if (HasStackOverflow()) return; } else { HCallConstantFunction* call = - new(zone()) HCallConstantFunction(expr->target(), argument_count); - call->set_position(expr->position()); + New<HCallConstantFunction>(expr->target(), argument_count); PreProcessCall(call); AddInstruction(call); if (!ast_context()->IsEffect()) Push(call); } - if (current_block() != NULL) current_block()->Goto(join); + if (current_block() != NULL) Goto(join); set_current_block(if_false); } @@ -6182,18 +6262,17 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed( // Because the deopt may be the only path in the polymorphic call, make sure // that the environment stack matches the depth on deopt that it otherwise // would have had after a successful call. - Drop(argument_count - (ast_context()->IsEffect() ? 0 : 1)); + Drop(argument_count); + if (!ast_context()->IsEffect()) Push(graph()->GetConstant0()); FinishExitWithHardDeoptimization("Unknown map in polymorphic call", join); } else { - HValue* context = environment()->context(); - HCallNamed* call = new(zone()) HCallNamed(context, name, argument_count); - call->set_position(expr->position()); + HCallNamed* call = New<HCallNamed>(name, argument_count); PreProcessCall(call); if (join != NULL) { AddInstruction(call); if (!ast_context()->IsEffect()) Push(call); - current_block()->Goto(join); + Goto(join); } else { return ast_context()->ReturnInstruction(call, expr->id()); } @@ -6282,7 +6361,7 @@ bool HOptimizedGraphBuilder::TryInline(CallKind call_kind, return false; } -#if !V8_TARGET_ARCH_IA32 +#if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS // Target must be able to use caller's context. CompilationInfo* outer_info = current_info(); if (target->context() != outer_info->closure()->context() || @@ -6431,9 +6510,9 @@ bool HOptimizedGraphBuilder::TryInline(CallKind call_kind, undefined, function_state()->inlining_kind(), undefined_receiver); -#if V8_TARGET_ARCH_IA32 - // IA32 only, overwrite the caller's context in the deoptimization - // environment with the correct one. +#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS + // IA32, ARM and MIPS only, overwrite the caller's context in the + // deoptimization environment with the correct one. // // TODO(kmillikin): implement the same inlining on other platforms so we // can remove the unsightly ifdefs in this function. @@ -6495,12 +6574,12 @@ bool HOptimizedGraphBuilder::TryInline(CallKind call_kind, // return value will always evaluate to true, in a value context the // return value is the newly allocated receiver. if (call_context()->IsTest()) { - current_block()->Goto(inlined_test_context()->if_true(), state); + Goto(inlined_test_context()->if_true(), state); } else if (call_context()->IsEffect()) { - current_block()->Goto(function_return(), state); + Goto(function_return(), state); } else { ASSERT(call_context()->IsValue()); - current_block()->AddLeaveInlined(implicit_return_value, state); + AddLeaveInlined(implicit_return_value, state); } } else if (state->inlining_kind() == SETTER_CALL_RETURN) { // Falling off the end of an inlined setter call. The returned value is @@ -6509,21 +6588,21 @@ bool HOptimizedGraphBuilder::TryInline(CallKind call_kind, if (call_context()->IsTest()) { inlined_test_context()->ReturnValue(implicit_return_value); } else if (call_context()->IsEffect()) { - current_block()->Goto(function_return(), state); + Goto(function_return(), state); } else { ASSERT(call_context()->IsValue()); - current_block()->AddLeaveInlined(implicit_return_value, state); + AddLeaveInlined(implicit_return_value, state); } } else { // Falling off the end of a normal inlined function. This basically means // returning undefined. if (call_context()->IsTest()) { - current_block()->Goto(inlined_test_context()->if_false(), state); + Goto(inlined_test_context()->if_false(), state); } else if (call_context()->IsEffect()) { - current_block()->Goto(function_return(), state); + Goto(function_return(), state); } else { ASSERT(call_context()->IsValue()); - current_block()->AddLeaveInlined(undefined, state); + AddLeaveInlined(undefined, state); } } } @@ -6545,13 +6624,13 @@ bool HOptimizedGraphBuilder::TryInline(CallKind call_kind, entry->RegisterReturnTarget(if_true, zone()); if_true->SetJoinId(ast_id); HBasicBlock* true_target = TestContext::cast(ast_context())->if_true(); - if_true->Goto(true_target, function_state()); + Goto(if_true, true_target, function_state()); } if (if_false->HasPredecessor()) { entry->RegisterReturnTarget(if_false, zone()); if_false->SetJoinId(ast_id); HBasicBlock* false_target = TestContext::cast(ast_context())->if_false(); - if_false->Goto(false_target, function_state()); + Goto(if_false, false_target, function_state()); } set_current_block(NULL); return true; @@ -6654,11 +6733,8 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr, case kMathTan: if (expr->arguments()->length() == 1) { HValue* argument = Pop(); - HValue* context = environment()->context(); Drop(1); // Receiver. - HInstruction* op = - HUnaryMathOperation::New(zone(), context, argument, id); - op->set_position(expr->position()); + HInstruction* op = NewUncasted<HUnaryMathOperation>(argument, id); if (drop_extra) Drop(1); // Optionally drop the function. ast_context()->ReturnInstruction(op, expr->id()); return true; @@ -6669,8 +6745,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr, HValue* right = Pop(); HValue* left = Pop(); Drop(1); // Receiver. - HValue* context = environment()->context(); - HInstruction* op = HMul::NewImul(zone(), context, left, right); + HInstruction* op = HMul::NewImul(zone(), context(), left, right); if (drop_extra) Drop(1); // Optionally drop the function. ast_context()->ReturnInstruction(op, expr->id()); return true; @@ -6700,7 +6775,6 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall( if (argument_count == 2 && check_type == STRING_CHECK) { HValue* index = Pop(); HValue* string = Pop(); - HValue* context = environment()->context(); ASSERT(!expr->holder().is_null()); BuildCheckPrototypeMaps(Call::GetPrototypeForPrimitiveCheck( STRING_CHECK, expr->holder()->GetIsolate()), @@ -6712,8 +6786,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall( return true; } AddInstruction(char_code); - HInstruction* result = - HStringCharFromCode::New(zone(), context, char_code); + HInstruction* result = NewUncasted<HStringCharFromCode>(char_code); ast_context()->ReturnInstruction(result, expr->id()); return true; } @@ -6722,10 +6795,8 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall( if (argument_count == 2 && check_type == RECEIVER_MAP_CHECK) { AddCheckConstantFunction(expr->holder(), receiver, receiver_map); HValue* argument = Pop(); - HValue* context = environment()->context(); Drop(1); // Receiver. - HInstruction* result = - HStringCharFromCode::New(zone(), context, argument); + HInstruction* result = NewUncasted<HStringCharFromCode>(argument); ast_context()->ReturnInstruction(result, expr->id()); return true; } @@ -6744,11 +6815,8 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall( if (argument_count == 2 && check_type == RECEIVER_MAP_CHECK) { AddCheckConstantFunction(expr->holder(), receiver, receiver_map); HValue* argument = Pop(); - HValue* context = environment()->context(); Drop(1); // Receiver. - HInstruction* op = - HUnaryMathOperation::New(zone(), context, argument, id); - op->set_position(expr->position()); + HInstruction* op = NewUncasted<HUnaryMathOperation>(argument, id); ast_context()->ReturnInstruction(op, expr->id()); return true; } @@ -6759,30 +6827,27 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall( HValue* right = Pop(); HValue* left = Pop(); Pop(); // Pop receiver. - HValue* context = environment()->context(); HInstruction* result = NULL; // Use sqrt() if exponent is 0.5 or -0.5. if (right->IsConstant() && HConstant::cast(right)->HasDoubleValue()) { double exponent = HConstant::cast(right)->DoubleValue(); if (exponent == 0.5) { - result = - HUnaryMathOperation::New(zone(), context, left, kMathPowHalf); + result = NewUncasted<HUnaryMathOperation>(left, kMathPowHalf); } else if (exponent == -0.5) { HValue* one = graph()->GetConstant1(); - HInstruction* sqrt = - HUnaryMathOperation::New(zone(), context, left, kMathPowHalf); - AddInstruction(sqrt); + HInstruction* sqrt = AddUncasted<HUnaryMathOperation>( + left, kMathPowHalf); // MathPowHalf doesn't have side effects so there's no need for // an environment simulation here. ASSERT(!sqrt->HasObservableSideEffects()); - result = HDiv::New(zone(), context, one, sqrt); + result = NewUncasted<HDiv>(one, sqrt); } else if (exponent == 2.0) { - result = HMul::New(zone(), context, left, left); + result = NewUncasted<HMul>(left, left); } } if (result == NULL) { - result = HPower::New(zone(), context, left, right); + result = NewUncasted<HPower>(left, right); } ast_context()->ReturnInstruction(result, expr->id()); return true; @@ -6793,7 +6858,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall( AddCheckConstantFunction(expr->holder(), receiver, receiver_map); Drop(1); // Receiver. HGlobalObject* global_object = Add<HGlobalObject>(); - HRandom* result = new(zone()) HRandom(global_object); + HRandom* result = New<HRandom>(global_object); ast_context()->ReturnInstruction(result, expr->id()); return true; } @@ -6805,11 +6870,9 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall( HValue* right = Pop(); HValue* left = Pop(); Drop(1); // Receiver. - HValue* context = environment()->context(); HMathMinMax::Operation op = (id == kMathMin) ? HMathMinMax::kMathMin : HMathMinMax::kMathMax; - HInstruction* result = - HMathMinMax::New(zone(), context, left, right, op); + HInstruction* result = NewUncasted<HMathMinMax>(left, right, op); ast_context()->ReturnInstruction(result, expr->id()); return true; } @@ -6820,8 +6883,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall( HValue* right = Pop(); HValue* left = Pop(); Drop(1); // Receiver. - HValue* context = environment()->context(); - HInstruction* result = HMul::NewImul(zone(), context, left, right); + HInstruction* result = HMul::NewImul(zone(), context(), left, right); ast_context()->ReturnInstruction(result, expr->id()); return true; } @@ -6872,12 +6934,10 @@ bool HOptimizedGraphBuilder::TryCallApply(Call* expr) { HInstruction* elements = Add<HArgumentsElements>(false); HInstruction* length = Add<HArgumentsLength>(elements); HValue* wrapped_receiver = BuildWrapReceiver(receiver, function); - HInstruction* result = - new(zone()) HApplyArguments(function, - wrapped_receiver, - length, - elements); - result->set_position(expr->position()); + HInstruction* result = New<HApplyArguments>(function, + wrapped_receiver, + length, + elements); ast_context()->ReturnInstruction(result, expr->id()); return true; } else { @@ -6903,19 +6963,15 @@ bool HOptimizedGraphBuilder::TryCallApply(Call* expr) { } Drop(arguments_count - 1); - PushAndAdd(New<HPushArgument>(Pop())); + Push(Add<HPushArgument>(Pop())); for (int i = 1; i < arguments_count; i++) { - PushAndAdd(New<HPushArgument>(arguments_values->at(i))); + Push(Add<HPushArgument>(arguments_values->at(i))); } - HValue* context = environment()->context(); - HInvokeFunction* call = new(zone()) HInvokeFunction( - context, - function, - known_function, - arguments_count); + HInvokeFunction* call = New<HInvokeFunction>(function, + known_function, + arguments_count); Drop(arguments_count); - call->set_position(expr->position()); ast_context()->ReturnInstruction(call, expr->id()); return true; } @@ -6945,9 +7001,7 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) { CHECK_ALIVE(VisitArgumentList(expr->arguments())); - HValue* context = environment()->context(); - call = new(zone()) HCallKeyed(context, key, argument_count); - call->set_position(expr->position()); + call = New<HCallKeyed>(key, argument_count); Drop(argument_count + 1); // 1 is the key. return ast_context()->ReturnInstruction(call, expr->id()); } @@ -6985,16 +7039,13 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) { // When the target has a custom call IC generator, use the IC, // because it is likely to generate better code. Also use the IC // when a primitive receiver check is required. - HValue* context = environment()->context(); - call = PreProcessCall( - new(zone()) HCallNamed(context, name, argument_count)); + call = PreProcessCall(New<HCallNamed>(name, argument_count)); } else { AddCheckConstantFunction(expr->holder(), receiver, map); if (TryInlineCall(expr)) return; call = PreProcessCall( - new(zone()) HCallConstantFunction(expr->target(), - argument_count)); + New<HCallConstantFunction>(expr->target(), argument_count)); } } else if (types != NULL && types->length() > 1) { ASSERT(expr->check_type() == RECEIVER_MAP_CHECK); @@ -7002,11 +7053,8 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) { return; } else { - HValue* context = environment()->context(); - call = PreProcessCall( - new(zone()) HCallNamed(context, name, argument_count)); + call = PreProcessCall(New<HCallNamed>(name, argument_count)); } - } else { VariableProxy* proxy = expr->expression()->AsVariableProxy(); if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) { @@ -7030,9 +7078,8 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) { if (known_global_function) { // Push the global object instead of the global receiver because // code generated by the full code generator expects it. - HValue* context = environment()->context(); - HGlobalObject* global_object = new(zone()) HGlobalObject(context); - PushAndAdd(global_object); + HGlobalObject* global_object = Add<HGlobalObject>(); + Push(global_object); CHECK_ALIVE(VisitExpressions(expr->arguments())); CHECK_ALIVE(VisitForValue(expr->expression())); @@ -7064,16 +7111,14 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) { if (CallStubCompiler::HasCustomCallGenerator(expr->target())) { // When the target has a custom call IC generator, use the IC, // because it is likely to generate better code. - HValue* context = environment()->context(); - call = PreProcessCall( - new(zone()) HCallNamed(context, var->name(), argument_count)); + call = PreProcessCall(New<HCallNamed>(var->name(), argument_count)); } else { - call = PreProcessCall(new(zone()) HCallKnownGlobal(expr->target(), - argument_count)); + call = PreProcessCall(New<HCallKnownGlobal>( + expr->target(), argument_count)); } } else { HGlobalObject* receiver = Add<HGlobalObject>(); - PushAndAdd(New<HPushArgument>(receiver)); + Push(Add<HPushArgument>(receiver)); CHECK_ALIVE(VisitArgumentList(expr->arguments())); call = New<HCallGlobal>(var->name(), argument_count); @@ -7086,8 +7131,8 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) { CHECK_ALIVE(VisitForValue(expr->expression())); HValue* function = Top(); HGlobalObject* global = Add<HGlobalObject>(); - HGlobalReceiver* receiver = New<HGlobalReceiver>(global); - PushAndAdd(receiver); + HGlobalReceiver* receiver = Add<HGlobalReceiver>(global); + Push(receiver); CHECK_ALIVE(VisitExpressions(expr->arguments())); Add<HCheckValue>(function, expr->target()); @@ -7113,7 +7158,7 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) { HValue* function = Top(); HGlobalObject* global_object = Add<HGlobalObject>(); HGlobalReceiver* receiver = Add<HGlobalReceiver>(global_object); - PushAndAdd(New<HPushArgument>(receiver)); + Push(Add<HPushArgument>(receiver)); CHECK_ALIVE(VisitArgumentList(expr->arguments())); call = New<HCallFunction>(function, argument_count); @@ -7121,7 +7166,6 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) { } } - call->set_position(expr->position()); return ast_context()->ReturnInstruction(call, expr->id()); } @@ -7139,8 +7183,8 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); + if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position()); int argument_count = expr->arguments()->length() + 1; // Plus constructor. - HValue* context = environment()->context(); Factory* factory = isolate()->factory(); if (FLAG_inline_construct && @@ -7229,9 +7273,8 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) { receiver->DeleteAndReplaceWith(NULL); check->DeleteAndReplaceWith(NULL); environment()->SetExpressionStackAt(receiver_index, function); - HInstruction* call = PreProcessCall( - new(zone()) HCallNew(context, function, argument_count)); - call->set_position(expr->position()); + HInstruction* call = + PreProcessCall(New<HCallNew>(function, argument_count)); return ast_context()->ReturnInstruction(call, expr->id()); } else { // The constructor function is both an operand to the instruction and an @@ -7245,13 +7288,12 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) { if (expr->target().is_identical_to(array_function)) { Handle<Cell> cell = expr->allocation_info_cell(); Add<HCheckValue>(constructor, array_function); - call = new(zone()) HCallNewArray(context, constructor, argument_count, - cell, expr->elements_kind()); + call = New<HCallNewArray>(constructor, argument_count, + cell, expr->elements_kind()); } else { - call = new(zone()) HCallNew(context, constructor, argument_count); + call = New<HCallNew>(constructor, argument_count); } Drop(argument_count); - call->set_position(expr->position()); return ast_context()->ReturnInstruction(call, expr->id()); } } @@ -7373,8 +7415,7 @@ void HOptimizedGraphBuilder::VisitVoid(UnaryOperation* expr) { void HOptimizedGraphBuilder::VisitTypeof(UnaryOperation* expr) { CHECK_ALIVE(VisitForTypeOf(expr->expression())); HValue* value = Pop(); - HValue* context = environment()->context(); - HInstruction* instr = new(zone()) HTypeof(context, value); + HInstruction* instr = New<HTypeof>(value); return ast_context()->ReturnInstruction(instr, expr->id()); } @@ -7427,7 +7468,7 @@ HInstruction* HOptimizedGraphBuilder::BuildIncrement( bool returns_original_input, CountOperation* expr) { // The input to the count operation is on top of the expression stack. - TypeInfo info = expr->type(); + Handle<Type> info = expr->type(); Representation rep = Representation::FromType(info); if (rep.IsNone() || rep.IsTagged()) { rep = Representation::Smi(); @@ -7483,6 +7524,7 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); + if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position()); Expression* target = expr->expression(); VariableProxy* proxy = target->AsVariableProxy(); Property* prop = target->AsProperty(); @@ -7515,7 +7557,6 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) { case Variable::UNALLOCATED: HandleGlobalVariableAssignment(var, after, - expr->position(), expr->AssignmentId()); break; @@ -7567,15 +7608,13 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) { HValue* object = Top(); HValue* key = NULL; - if ((!prop->IsStringLength() && - !prop->IsFunctionPrototype() && - !prop->key()->IsPropertyName()) || + if ((!prop->IsFunctionPrototype() && !prop->key()->IsPropertyName()) || prop->IsStringAccess()) { CHECK_ALIVE(VisitForValue(prop->key())); key = Top(); } - CHECK_ALIVE(PushLoad(prop, object, key, expr->position())); + CHECK_ALIVE(PushLoad(prop, object, key)); after = BuildIncrement(returns_original_input, expr); @@ -7611,7 +7650,7 @@ HInstruction* HOptimizedGraphBuilder::BuildStringCharCodeAt( } BuildCheckHeapObject(string); HValue* checkstring = - AddInstruction(HCheckInstanceType::NewIsString(string, zone())); + Add<HCheckInstanceType>(string, HCheckInstanceType::IS_STRING); HInstruction* length = BuildLoadStringLength(string, checkstring); AddInstruction(length); HInstruction* checked_index = Add<HBoundsCheck>(index, length); @@ -7619,9 +7658,16 @@ HInstruction* HOptimizedGraphBuilder::BuildStringCharCodeAt( } -// Checks if the given shift amounts have form: (sa) and (32 - sa). +// Checks if the given shift amounts have following forms: +// (N1) and (N2) with N1 + N2 = 32; (sa) and (32 - sa). static bool ShiftAmountsAllowReplaceByRotate(HValue* sa, HValue* const32_minus_sa) { + if (sa->IsConstant() && const32_minus_sa->IsConstant()) { + const HConstant* c1 = HConstant::cast(sa); + const HConstant* c2 = HConstant::cast(const32_minus_sa); + return c1->HasInteger32Value() && c2->HasInteger32Value() && + (c1->Integer32Value() + c2->Integer32Value() == 32); + } if (!const32_minus_sa->IsSub()) return false; HSub* sub = HSub::cast(const32_minus_sa); if (sa != sub->right()) return false; @@ -7638,10 +7684,10 @@ static bool ShiftAmountsAllowReplaceByRotate(HValue* sa, // directions that can be replaced by one rotate right instruction or not. // Returns the operand and the shift amount for the rotate instruction in the // former case. -bool HOptimizedGraphBuilder::MatchRotateRight(HValue* left, - HValue* right, - HValue** operand, - HValue** shift_amount) { +bool HGraphBuilder::MatchRotateRight(HValue* left, + HValue* right, + HValue** operand, + HValue** shift_amount) { HShl* shl; HShr* shr; if (left->IsShl() && right->IsShr()) { @@ -7677,6 +7723,18 @@ bool CanBeZero(HValue* right) { } +HValue* HGraphBuilder::EnforceNumberType(HValue* number, + Handle<Type> expected) { + if (expected->Is(Type::Smi())) { + return Add<HForceRepresentation>(number, Representation::Smi()); + } + if (expected->Is(Type::Signed32())) { + return Add<HForceRepresentation>(number, Representation::Integer32()); + } + return number; +} + + HValue* HGraphBuilder::TruncateToNumber(HValue* value, Handle<Type>* expected) { if (value->IsConstant()) { HConstant* constant = HConstant::cast(value); @@ -7687,6 +7745,33 @@ HValue* HGraphBuilder::TruncateToNumber(HValue* value, Handle<Type>* expected) { } } + // We put temporary values on the stack, which don't correspond to anything + // in baseline code. Since nothing is observable we avoid recording those + // pushes with a NoObservableSideEffectsScope. + NoObservableSideEffectsScope no_effects(this); + + Handle<Type> expected_type = *expected; + + // Separate the number type from the rest. + Handle<Type> expected_obj = handle(Type::Intersect( + expected_type, handle(Type::NonNumber(), isolate())), isolate()); + Handle<Type> expected_number = handle(Type::Intersect( + expected_type, handle(Type::Number(), isolate())), isolate()); + + // We expect to get a number. + // (We need to check first, since Type::None->Is(Type::Any()) == true. + if (expected_obj->Is(Type::None())) { + ASSERT(!expected_number->Is(Type::None())); + return value; + } + + if (expected_obj->Is(Type::Undefined())) { + // This is already done by HChange. + *expected = handle(Type::Union( + expected_number, handle(Type::Double(), isolate())), isolate()); + return value; + } + return value; } @@ -7695,89 +7780,156 @@ HInstruction* HOptimizedGraphBuilder::BuildBinaryOperation( BinaryOperation* expr, HValue* left, HValue* right) { - HValue* context = environment()->context(); Handle<Type> left_type = expr->left()->bounds().lower; Handle<Type> right_type = expr->right()->bounds().lower; Handle<Type> result_type = expr->bounds().lower; Maybe<int> fixed_right_arg = expr->fixed_right_arg(); + + return HGraphBuilder::BuildBinaryOperation(expr->op(), left, right, + left_type, right_type, result_type, fixed_right_arg); +} + + +HInstruction* HGraphBuilder::BuildBinaryOperation( + Token::Value op, + HValue* left, + HValue* right, + Handle<Type> left_type, + Handle<Type> right_type, + Handle<Type> result_type, + Maybe<int> fixed_right_arg, + bool binop_stub) { + Representation left_rep = Representation::FromType(left_type); Representation right_rep = Representation::FromType(right_type); - Representation result_rep = Representation::FromType(result_type); - if (expr->op() != Token::ADD || - (left->type().IsNonString() && right->type().IsNonString())) { - // For addition we can only truncate the arguments to number if we can - // prove that we will not end up in string concatenation mode. - left = TruncateToNumber(left, &left_type); - right = TruncateToNumber(right, &right_type); - } + bool maybe_string_add = op == Token::ADD && + (left_type->Maybe(Type::String()) || + right_type->Maybe(Type::String())); if (left_type->Is(Type::None())) { Add<HDeoptimize>("Insufficient type feedback for LHS of binary operation", Deoptimizer::SOFT); - // TODO(rossberg): we should be able to get rid of non-continuous defaults. + // TODO(rossberg): we should be able to get rid of non-continuous + // defaults. left_type = handle(Type::Any(), isolate()); + } else { + if (!maybe_string_add) left = TruncateToNumber(left, &left_type); + left_rep = Representation::FromType(left_type); } + if (right_type->Is(Type::None())) { Add<HDeoptimize>("Insufficient type feedback for RHS of binary operation", Deoptimizer::SOFT); right_type = handle(Type::Any(), isolate()); + } else { + if (!maybe_string_add) right = TruncateToNumber(right, &right_type); + right_rep = Representation::FromType(right_type); + } + + // Special case for string addition here. + if (op == Token::ADD && + (left_type->Is(Type::String()) || right_type->Is(Type::String()))) { + if (left_type->Is(Type::String())) { + IfBuilder if_isstring(this); + if_isstring.If<HIsStringAndBranch>(left); + if_isstring.Then(); + if_isstring.ElseDeopt("Expected string for LHS of binary operation"); + } else if (left_type->Is(Type::Number())) { + left = BuildNumberToString(left, left_type); + } else { + ASSERT(right_type->Is(Type::String())); + HValue* function = AddLoadJSBuiltin(Builtins::STRING_ADD_RIGHT); + Add<HPushArgument>(left); + Add<HPushArgument>(right); + return NewUncasted<HInvokeFunction>(function, 2); + } + + if (right_type->Is(Type::String())) { + IfBuilder if_isstring(this); + if_isstring.If<HIsStringAndBranch>(right); + if_isstring.Then(); + if_isstring.ElseDeopt("Expected string for RHS of binary operation"); + } else if (right_type->Is(Type::Number())) { + right = BuildNumberToString(right, right_type); + } else { + ASSERT(left_type->Is(Type::String())); + HValue* function = AddLoadJSBuiltin(Builtins::STRING_ADD_LEFT); + Add<HPushArgument>(left); + Add<HPushArgument>(right); + return NewUncasted<HInvokeFunction>(function, 2); + } + + return NewUncasted<HStringAdd>(left, right, STRING_ADD_CHECK_NONE); + } + + if (binop_stub) { + left = EnforceNumberType(left, left_type); + right = EnforceNumberType(right, right_type); } + + Representation result_rep = Representation::FromType(result_type); + + bool is_non_primitive = (left_rep.IsTagged() && !left_rep.IsSmi()) || + (right_rep.IsTagged() && !right_rep.IsSmi()); + HInstruction* instr = NULL; - switch (expr->op()) { - case Token::ADD: - if (left_type->Is(Type::String()) && right_type->Is(Type::String())) { - BuildCheckHeapObject(left); - AddInstruction(HCheckInstanceType::NewIsString(left, zone())); - BuildCheckHeapObject(right); - AddInstruction(HCheckInstanceType::NewIsString(right, zone())); - instr = HStringAdd::New(zone(), context, left, right); - } else { - instr = HAdd::New(zone(), context, left, right); - } - break; - case Token::SUB: - instr = HSub::New(zone(), context, left, right); - break; - case Token::MUL: - instr = HMul::New(zone(), context, left, right); - break; - case Token::MOD: - instr = HMod::New(zone(), context, left, right, fixed_right_arg); - break; - case Token::DIV: - instr = HDiv::New(zone(), context, left, right); - break; - case Token::BIT_XOR: - case Token::BIT_AND: - instr = NewUncasted<HBitwise>(expr->op(), left, right); - break; - case Token::BIT_OR: { - HValue* operand, *shift_amount; - if (left_type->Is(Type::Signed32()) && - right_type->Is(Type::Signed32()) && - MatchRotateRight(left, right, &operand, &shift_amount)) { - instr = new(zone()) HRor(context, operand, shift_amount); - } else { - instr = NewUncasted<HBitwise>(expr->op(), left, right); + // Only the stub is allowed to call into the runtime, since otherwise we would + // inline several instructions (including the two pushes) for every tagged + // operation in optimized code, which is more expensive, than a stub call. + if (binop_stub && is_non_primitive) { + HValue* function = AddLoadJSBuiltin(BinaryOpIC::TokenToJSBuiltin(op)); + Add<HPushArgument>(left); + Add<HPushArgument>(right); + instr = NewUncasted<HInvokeFunction>(function, 2); + } else { + switch (op) { + case Token::ADD: + instr = NewUncasted<HAdd>(left, right); + break; + case Token::SUB: + instr = NewUncasted<HSub>(left, right); + break; + case Token::MUL: + instr = NewUncasted<HMul>(left, right); + break; + case Token::MOD: + instr = NewUncasted<HMod>(left, right, fixed_right_arg); + break; + case Token::DIV: + instr = NewUncasted<HDiv>(left, right); + break; + case Token::BIT_XOR: + case Token::BIT_AND: + instr = NewUncasted<HBitwise>(op, left, right); + break; + case Token::BIT_OR: { + HValue* operand, *shift_amount; + if (left_type->Is(Type::Signed32()) && + right_type->Is(Type::Signed32()) && + MatchRotateRight(left, right, &operand, &shift_amount)) { + instr = NewUncasted<HRor>(operand, shift_amount); + } else { + instr = NewUncasted<HBitwise>(op, left, right); + } + break; } - break; + case Token::SAR: + instr = NewUncasted<HSar>(left, right); + break; + case Token::SHR: + instr = NewUncasted<HShr>(left, right); + if (FLAG_opt_safe_uint32_operations && instr->IsShr() && + CanBeZero(right)) { + graph()->RecordUint32Instruction(instr); + } + break; + case Token::SHL: + instr = NewUncasted<HShl>(left, right); + break; + default: + UNREACHABLE(); } - case Token::SAR: - instr = HSar::New(zone(), context, left, right); - break; - case Token::SHR: - instr = HShr::New(zone(), context, left, right); - if (FLAG_opt_safe_uint32_operations && instr->IsShr() && - CanBeZero(right)) { - graph()->RecordUint32Instruction(instr); - } - break; - case Token::SHL: - instr = HShl::New(zone(), context, left, right); - break; - default: - UNREACHABLE(); } if (instr->IsBinaryOperation()) { @@ -7785,6 +7937,19 @@ HInstruction* HOptimizedGraphBuilder::BuildBinaryOperation( binop->set_observed_input_representation(1, left_rep); binop->set_observed_input_representation(2, right_rep); binop->initialize_output_representation(result_rep); + if (binop_stub) { + // Stub should not call into stub. + instr->SetFlag(HValue::kCannotBeTagged); + // And should truncate on HForceRepresentation already. + if (left->IsForceRepresentation()) { + left->CopyFlag(HValue::kTruncatingToSmi, instr); + left->CopyFlag(HValue::kTruncatingToInt32, instr); + } + if (right->IsForceRepresentation()) { + right->CopyFlag(HValue::kTruncatingToSmi, instr); + right->CopyFlag(HValue::kTruncatingToInt32, instr); + } + } } return instr; } @@ -7874,9 +8039,9 @@ void HOptimizedGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) { HBasicBlock* eval_right = graph()->CreateBasicBlock(); ToBooleanStub::Types expected(expr->left()->to_boolean_types()); HBranch* test = is_logical_and - ? new(zone()) HBranch(left_value, expected, eval_right, empty_block) - : new(zone()) HBranch(left_value, expected, empty_block, eval_right); - current_block()->Finish(test); + ? New<HBranch>(left_value, expected, eval_right, empty_block) + : New<HBranch>(left_value, expected, empty_block, eval_right); + FinishCurrentBlock(test); set_current_block(eval_right); Drop(1); // Value of the left subexpression. @@ -7933,10 +8098,10 @@ void HOptimizedGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) { void HOptimizedGraphBuilder::VisitArithmeticExpression(BinaryOperation* expr) { CHECK_ALIVE(VisitForValue(expr->left())); CHECK_ALIVE(VisitForValue(expr->right())); + if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position()); HValue* right = Pop(); HValue* left = Pop(); HInstruction* instr = BuildBinaryOperation(expr, left, right); - instr->set_position(expr->position()); return ast_context()->ReturnInstruction(instr, expr->id()); } @@ -7945,9 +8110,9 @@ void HOptimizedGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* expr, Expression* sub_expr, Handle<String> check) { CHECK_ALIVE(VisitForTypeOf(sub_expr)); + if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position()); HValue* value = Pop(); - HTypeofIsAndBranch* instr = new(zone()) HTypeofIsAndBranch(value, check); - instr->set_position(expr->position()); + HTypeofIsAndBranch* instr = New<HTypeofIsAndBranch>(value, check); return ast_context()->ReturnControl(instr, expr->id()); } @@ -7969,6 +8134,8 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) { ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); + if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position()); + // Check for a few fast cases. The AST visiting behavior must be in sync // with the full codegen: We don't push both left and right values onto // the expression stack when one side is a special-case literal. @@ -7991,9 +8158,7 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) { HValue* value = Pop(); Literal* literal = expr->right()->AsLiteral(); Handle<String> rhs = Handle<String>::cast(literal->value()); - HClassOfTestAndBranch* instr = - new(zone()) HClassOfTestAndBranch(value, rhs); - instr->set_position(expr->position()); + HClassOfTestAndBranch* instr = New<HClassOfTestAndBranch>(value, rhs); return ast_context()->ReturnControl(instr, expr->id()); } @@ -8007,7 +8172,6 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) { CHECK_ALIVE(VisitForValue(expr->left())); CHECK_ALIVE(VisitForValue(expr->right())); - HValue* context = environment()->context(); HValue* right = Pop(); HValue* left = Pop(); Token::Value op = expr->op(); @@ -8015,7 +8179,6 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) { if (IsLiteralCompareBool(isolate(), left, op, right)) { HCompareObjectEqAndBranch* result = New<HCompareObjectEqAndBranch>(left, right); - result->set_position(expr->position()); return ast_context()->ReturnControl(result, expr->id()); } @@ -8046,14 +8209,12 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) { // If the target is not null we have found a known global function that is // assumed to stay the same for this instanceof. if (target.is_null()) { - HInstanceOf* result = new(zone()) HInstanceOf(context, left, right); - result->set_position(expr->position()); + HInstanceOf* result = New<HInstanceOf>(left, right); return ast_context()->ReturnInstruction(result, expr->id()); } else { Add<HCheckValue>(right, target); HInstanceOfKnownGlobal* result = - new(zone()) HInstanceOfKnownGlobal(context, left, target); - result->set_position(expr->position()); + New<HInstanceOfKnownGlobal>(left, target); return ast_context()->ReturnInstruction(result, expr->id()); } @@ -8065,8 +8226,7 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) { Add<HPushArgument>(right); // TODO(olivf) InvokeFunction produces a check for the parameter count, // even though we are certain to pass the correct number of arguments here. - HInstruction* result = new(zone()) HInvokeFunction(context, function, 2); - result->set_position(expr->position()); + HInstruction* result = New<HInvokeFunction>(function, 2); return ast_context()->ReturnInstruction(result, expr->id()); } @@ -8090,16 +8250,14 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) { AddCheckMap(right, map); HCompareObjectEqAndBranch* result = New<HCompareObjectEqAndBranch>(left, right); - result->set_position(expr->position()); return ast_context()->ReturnControl(result, expr->id()); } else { BuildCheckHeapObject(left); - AddInstruction(HCheckInstanceType::NewIsSpecObject(left, zone())); + Add<HCheckInstanceType>(left, HCheckInstanceType::IS_SPEC_OBJECT); BuildCheckHeapObject(right); - AddInstruction(HCheckInstanceType::NewIsSpecObject(right, zone())); + Add<HCheckInstanceType>(right, HCheckInstanceType::IS_SPEC_OBJECT); HCompareObjectEqAndBranch* result = - new(zone()) HCompareObjectEqAndBranch(left, right); - result->set_position(expr->position()); + New<HCompareObjectEqAndBranch>(left, right); return ast_context()->ReturnControl(result, expr->id()); } } @@ -8109,26 +8267,30 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) { } else if (combined_type->Is(Type::InternalizedString()) && Token::IsEqualityOp(op)) { BuildCheckHeapObject(left); - AddInstruction(HCheckInstanceType::NewIsInternalizedString(left, zone())); + Add<HCheckInstanceType>(left, HCheckInstanceType::IS_INTERNALIZED_STRING); BuildCheckHeapObject(right); - AddInstruction(HCheckInstanceType::NewIsInternalizedString(right, zone())); + Add<HCheckInstanceType>(right, HCheckInstanceType::IS_INTERNALIZED_STRING); HCompareObjectEqAndBranch* result = - new(zone()) HCompareObjectEqAndBranch(left, right); - result->set_position(expr->position()); + New<HCompareObjectEqAndBranch>(left, right); + return ast_context()->ReturnControl(result, expr->id()); + } else if (combined_type->Is(Type::String())) { + BuildCheckHeapObject(left); + Add<HCheckInstanceType>(left, HCheckInstanceType::IS_STRING); + BuildCheckHeapObject(right); + Add<HCheckInstanceType>(right, HCheckInstanceType::IS_STRING); + HStringCompareAndBranch* result = + New<HStringCompareAndBranch>(left, right, op); return ast_context()->ReturnControl(result, expr->id()); } else { if (combined_rep.IsTagged() || combined_rep.IsNone()) { - HCompareGeneric* result = - new(zone()) HCompareGeneric(context, left, right, op); + HCompareGeneric* result = New<HCompareGeneric>(left, right, op); result->set_observed_input_representation(1, left_rep); result->set_observed_input_representation(2, right_rep); - result->set_position(expr->position()); return ast_context()->ReturnInstruction(result, expr->id()); } else { HCompareNumericAndBranch* result = - new(zone()) HCompareNumericAndBranch(left, right, op); + New<HCompareNumericAndBranch>(left, right, op); result->set_observed_input_representation(left_rep, right_rep); - result->set_position(expr->position()); return ast_context()->ReturnControl(result, expr->id()); } } @@ -8142,6 +8304,7 @@ void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr, ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); ASSERT(expr->op() == Token::EQ || expr->op() == Token::EQ_STRICT); + if (!FLAG_emit_opt_code_positions) SetSourcePosition(expr->position()); CHECK_ALIVE(VisitForValue(sub_expr)); HValue* value = Pop(); if (expr->op() == Token::EQ_STRICT) { @@ -8150,7 +8313,6 @@ void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr, : graph()->GetConstantUndefined(); HCompareObjectEqAndBranch* instr = New<HCompareObjectEqAndBranch>(value, nil_constant); - instr->set_position(expr->position()); return ast_context()->ReturnControl(instr, expr->id()); } else { ASSERT_EQ(Token::EQ, expr->op()); @@ -8158,7 +8320,7 @@ void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr, ? handle(Type::Any(), isolate_) : expr->combined_type(); HIfContinuation continuation; - BuildCompareNil(value, type, expr->position(), &continuation); + BuildCompareNil(value, type, &continuation); return ast_context()->ReturnContinuation(&continuation, expr->id()); } } @@ -8171,49 +8333,28 @@ HInstruction* HOptimizedGraphBuilder::BuildThisFunction() { return New<HConstant>( function_state()->compilation_info()->closure()); } else { - return new(zone()) HThisFunction; + return New<HThisFunction>(); } } HInstruction* HOptimizedGraphBuilder::BuildFastLiteral( Handle<JSObject> boilerplate_object, - Handle<Object> allocation_site_object, - AllocationSiteMode mode) { + AllocationSiteContext* site_context) { NoObservableSideEffectsScope no_effects(this); - - Handle<FixedArrayBase> elements(boilerplate_object->elements()); - int object_size = boilerplate_object->map()->instance_size(); - int object_offset = object_size; - InstanceType instance_type = boilerplate_object->map()->instance_type(); - bool create_allocation_site_info = mode == TRACK_ALLOCATION_SITE && - AllocationSite::CanTrack(instance_type); - - // If using allocation sites, then the payload on the site should already - // be filled in as a valid (boilerplate) array. - ASSERT(!create_allocation_site_info || - AllocationSite::cast(*allocation_site_object)->IsLiteralSite()); - - if (create_allocation_site_info) { - object_size += AllocationMemento::kSize; - } - ASSERT(instance_type == JS_ARRAY_TYPE || instance_type == JS_OBJECT_TYPE); + HType type = instance_type == JS_ARRAY_TYPE ? HType::JSArray() : HType::JSObject(); - HValue* object_size_constant = Add<HConstant>(object_size); + HValue* object_size_constant = Add<HConstant>( + boilerplate_object->map()->instance_size()); HInstruction* object = Add<HAllocate>(object_size_constant, type, isolate()->heap()->GetPretenureMode(), instance_type); - BuildEmitObjectHeader(boilerplate_object, object); - if (create_allocation_site_info) { - HInstruction* allocation_site = Add<HConstant>(allocation_site_object); - BuildCreateAllocationMemento(object, object_offset, allocation_site); - } - + Handle<FixedArrayBase> elements(boilerplate_object->elements()); int elements_size = (elements->length() > 0 && elements->map() != isolate()->heap()->fixed_cow_array_map()) ? elements->Size() : 0; @@ -8231,15 +8372,15 @@ HInstruction* HOptimizedGraphBuilder::BuildFastLiteral( } BuildInitElementsInObjectHeader(boilerplate_object, object, object_elements); - // Copy object elements if non-COW. if (object_elements != NULL) { - BuildEmitElements(boilerplate_object, elements, object_elements); + BuildEmitElements(boilerplate_object, elements, object_elements, + site_context); } // Copy in-object properties. if (boilerplate_object->map()->NumberOfFields() != 0) { - BuildEmitInObjectProperties(boilerplate_object, object); + BuildEmitInObjectProperties(boilerplate_object, object, site_context); } return object; } @@ -8291,7 +8432,8 @@ void HOptimizedGraphBuilder::BuildInitElementsInObjectHeader( void HOptimizedGraphBuilder::BuildEmitInObjectProperties( Handle<JSObject> boilerplate_object, - HInstruction* object) { + HInstruction* object, + AllocationSiteContext* site_context) { Handle<DescriptorArray> descriptors( boilerplate_object->map()->instance_descriptors()); int limit = boilerplate_object->map()->NumberOfOwnDescriptors(); @@ -8315,9 +8457,10 @@ void HOptimizedGraphBuilder::BuildEmitInObjectProperties( if (value->IsJSObject()) { Handle<JSObject> value_object = Handle<JSObject>::cast(value); + Handle<AllocationSite> current_site = site_context->EnterNewScope(); HInstruction* result = - BuildFastLiteral(value_object, - Handle<Object>::null(), DONT_TRACK_ALLOCATION_SITE); + BuildFastLiteral(value_object, site_context); + site_context->ExitScope(current_site, value_object); Add<HStoreNamedField>(object, access, result); } else { Representation representation = details.representation(); @@ -8326,6 +8469,12 @@ void HOptimizedGraphBuilder::BuildEmitInObjectProperties( if (representation.IsDouble()) { // Allocate a HeapNumber box and store the value into it. HValue* heap_number_constant = Add<HConstant>(HeapNumber::kSize); + // TODO(mvstanton): This heap number alloc does not have a corresponding + // AllocationSite. That is okay because + // 1) it's a child object of another object with a valid allocation site + // 2) we can just use the mode of the parent object for pretenuring + // The todo is replace GetPretenureMode() with + // site_context->top()->GetPretenureMode(). HInstruction* double_box = Add<HAllocate>(heap_number_constant, HType::HeapNumber(), isolate()->heap()->GetPretenureMode(), HEAP_NUMBER_TYPE); @@ -8355,7 +8504,8 @@ void HOptimizedGraphBuilder::BuildEmitInObjectProperties( void HOptimizedGraphBuilder::BuildEmitElements( Handle<JSObject> boilerplate_object, Handle<FixedArrayBase> elements, - HValue* object_elements) { + HValue* object_elements, + AllocationSiteContext* site_context) { ElementsKind kind = boilerplate_object->map()->elements_kind(); int elements_length = elements->length(); HValue* object_elements_length = Add<HConstant>(elements_length); @@ -8365,7 +8515,8 @@ void HOptimizedGraphBuilder::BuildEmitElements( if (elements->IsFixedDoubleArray()) { BuildEmitFixedDoubleArray(elements, kind, object_elements); } else if (elements->IsFixedArray()) { - BuildEmitFixedArray(elements, kind, object_elements); + BuildEmitFixedArray(elements, kind, object_elements, + site_context); } else { UNREACHABLE(); } @@ -8394,7 +8545,8 @@ void HOptimizedGraphBuilder::BuildEmitFixedDoubleArray( void HOptimizedGraphBuilder::BuildEmitFixedArray( Handle<FixedArrayBase> elements, ElementsKind kind, - HValue* object_elements) { + HValue* object_elements, + AllocationSiteContext* site_context) { HInstruction* boilerplate_elements = Add<HConstant>(elements); int elements_length = elements->length(); Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements); @@ -8403,9 +8555,10 @@ void HOptimizedGraphBuilder::BuildEmitFixedArray( HValue* key_constant = Add<HConstant>(i); if (value->IsJSObject()) { Handle<JSObject> value_object = Handle<JSObject>::cast(value); + Handle<AllocationSite> current_site = site_context->EnterNewScope(); HInstruction* result = - BuildFastLiteral(value_object, - Handle<Object>::null(), DONT_TRACK_ALLOCATION_SITE); + BuildFastLiteral(value_object, site_context); + site_context->ExitScope(current_site, value_object); Add<HStoreKeyed>(object_elements, key_constant, result, kind); } else { HInstruction* value_instruction = @@ -8568,7 +8721,7 @@ void HOptimizedGraphBuilder::GenerateIsSmi(CallRuntime* call) { ASSERT(call->arguments()->length() == 1); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* value = Pop(); - HIsSmiAndBranch* result = new(zone()) HIsSmiAndBranch(value); + HIsSmiAndBranch* result = New<HIsSmiAndBranch>(value); return ast_context()->ReturnControl(result, call->id()); } @@ -8578,9 +8731,9 @@ void HOptimizedGraphBuilder::GenerateIsSpecObject(CallRuntime* call) { CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* value = Pop(); HHasInstanceTypeAndBranch* result = - new(zone()) HHasInstanceTypeAndBranch(value, - FIRST_SPEC_OBJECT_TYPE, - LAST_SPEC_OBJECT_TYPE); + New<HHasInstanceTypeAndBranch>(value, + FIRST_SPEC_OBJECT_TYPE, + LAST_SPEC_OBJECT_TYPE); return ast_context()->ReturnControl(result, call->id()); } @@ -8590,7 +8743,7 @@ void HOptimizedGraphBuilder::GenerateIsFunction(CallRuntime* call) { CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* value = Pop(); HHasInstanceTypeAndBranch* result = - new(zone()) HHasInstanceTypeAndBranch(value, JS_FUNCTION_TYPE); + New<HHasInstanceTypeAndBranch>(value, JS_FUNCTION_TYPE); return ast_context()->ReturnControl(result, call->id()); } @@ -8600,7 +8753,7 @@ void HOptimizedGraphBuilder::GenerateHasCachedArrayIndex(CallRuntime* call) { CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* value = Pop(); HHasCachedArrayIndexAndBranch* result = - new(zone()) HHasCachedArrayIndexAndBranch(value); + New<HHasCachedArrayIndexAndBranch>(value); return ast_context()->ReturnControl(result, call->id()); } @@ -8610,7 +8763,7 @@ void HOptimizedGraphBuilder::GenerateIsArray(CallRuntime* call) { CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* value = Pop(); HHasInstanceTypeAndBranch* result = - new(zone()) HHasInstanceTypeAndBranch(value, JS_ARRAY_TYPE); + New<HHasInstanceTypeAndBranch>(value, JS_ARRAY_TYPE); return ast_context()->ReturnControl(result, call->id()); } @@ -8620,7 +8773,7 @@ void HOptimizedGraphBuilder::GenerateIsRegExp(CallRuntime* call) { CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* value = Pop(); HHasInstanceTypeAndBranch* result = - new(zone()) HHasInstanceTypeAndBranch(value, JS_REGEXP_TYPE); + New<HHasInstanceTypeAndBranch>(value, JS_REGEXP_TYPE); return ast_context()->ReturnControl(result, call->id()); } @@ -8629,7 +8782,7 @@ void HOptimizedGraphBuilder::GenerateIsObject(CallRuntime* call) { ASSERT(call->arguments()->length() == 1); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* value = Pop(); - HIsObjectAndBranch* result = new(zone()) HIsObjectAndBranch(value); + HIsObjectAndBranch* result = New<HIsObjectAndBranch>(value); return ast_context()->ReturnControl(result, call->id()); } @@ -8643,8 +8796,7 @@ void HOptimizedGraphBuilder::GenerateIsUndetectableObject(CallRuntime* call) { ASSERT(call->arguments()->length() == 1); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* value = Pop(); - HIsUndetectableAndBranch* result = - new(zone()) HIsUndetectableAndBranch(value); + HIsUndetectableAndBranch* result = New<HIsUndetectableAndBranch>(value); return ast_context()->ReturnControl(result, call->id()); } @@ -8665,7 +8817,7 @@ void HOptimizedGraphBuilder::GenerateIsConstructCall(CallRuntime* call) { : graph()->GetConstantFalse(); return ast_context()->ReturnValue(value); } else { - return ast_context()->ReturnControl(new(zone()) HIsConstructCallAndBranch, + return ast_context()->ReturnControl(New<HIsConstructCallAndBranch>(), call->id()); } } @@ -8695,8 +8847,8 @@ void HOptimizedGraphBuilder::GenerateArguments(CallRuntime* call) { HInstruction* elements = Add<HArgumentsElements>(false); HInstruction* length = Add<HArgumentsLength>(elements); HInstruction* checked_index = Add<HBoundsCheck>(index, length); - HAccessArgumentsAt* result = - new(zone()) HAccessArgumentsAt(elements, length, checked_index); + HAccessArgumentsAt* result = New<HAccessArgumentsAt>( + elements, length, checked_index); return ast_context()->ReturnInstruction(result, call->id()); } @@ -8713,7 +8865,7 @@ void HOptimizedGraphBuilder::GenerateValueOf(CallRuntime* call) { ASSERT(call->arguments()->length() == 1); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* value = Pop(); - HValueOf* result = new(zone()) HValueOf(value); + HValueOf* result = New<HValueOf>(value); return ast_context()->ReturnInstruction(result, call->id()); } @@ -8724,7 +8876,7 @@ void HOptimizedGraphBuilder::GenerateDateField(CallRuntime* call) { Smi* index = Smi::cast(*(call->arguments()->at(1)->AsLiteral()->value())); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* date = Pop(); - HDateField* result = new(zone()) HDateField(date, index); + HDateField* result = New<HDateField>(date, index); return ast_context()->ReturnInstruction(result, call->id()); } @@ -8738,7 +8890,7 @@ void HOptimizedGraphBuilder::GenerateOneByteSeqStringSetChar( HValue* value = Pop(); HValue* index = Pop(); HValue* string = Pop(); - HSeqStringSetChar* result = new(zone()) HSeqStringSetChar( + HSeqStringSetChar* result = New<HSeqStringSetChar>( String::ONE_BYTE_ENCODING, string, index, value); return ast_context()->ReturnInstruction(result, call->id()); } @@ -8753,7 +8905,7 @@ void HOptimizedGraphBuilder::GenerateTwoByteSeqStringSetChar( HValue* value = Pop(); HValue* index = Pop(); HValue* string = Pop(); - HSeqStringSetChar* result = new(zone()) HSeqStringSetChar( + HSeqStringSetChar* result = New<HSeqStringSetChar>( String::TWO_BYTE_ENCODING, string, index, value); return ast_context()->ReturnInstruction(result, call->id()); } @@ -8766,31 +8918,28 @@ void HOptimizedGraphBuilder::GenerateSetValueOf(CallRuntime* call) { HValue* value = Pop(); HValue* object = Pop(); // Check if object is a not a smi. - HIsSmiAndBranch* smicheck = new(zone()) HIsSmiAndBranch(object); HBasicBlock* if_smi = graph()->CreateBasicBlock(); HBasicBlock* if_heap_object = graph()->CreateBasicBlock(); HBasicBlock* join = graph()->CreateBasicBlock(); - smicheck->SetSuccessorAt(0, if_smi); - smicheck->SetSuccessorAt(1, if_heap_object); - current_block()->Finish(smicheck); - if_smi->Goto(join); + FinishCurrentBlock(New<HIsSmiAndBranch>(object, if_smi, if_heap_object)); + Goto(if_smi, join); // Check if object is a JSValue. set_current_block(if_heap_object); HHasInstanceTypeAndBranch* typecheck = - new(zone()) HHasInstanceTypeAndBranch(object, JS_VALUE_TYPE); + New<HHasInstanceTypeAndBranch>(object, JS_VALUE_TYPE); HBasicBlock* if_js_value = graph()->CreateBasicBlock(); HBasicBlock* not_js_value = graph()->CreateBasicBlock(); typecheck->SetSuccessorAt(0, if_js_value); typecheck->SetSuccessorAt(1, not_js_value); - current_block()->Finish(typecheck); - not_js_value->Goto(join); + FinishCurrentBlock(typecheck); + Goto(not_js_value, join); // Create in-object property store to kValueOffset. set_current_block(if_js_value); Add<HStoreNamedField>(object, HObjectAccess::ForJSObjectOffset(JSValue::kValueOffset), value); - if_js_value->Goto(join); + Goto(if_js_value, join); join->SetJoinId(call->id()); set_current_block(join); return ast_context()->ReturnValue(value); @@ -8814,7 +8963,7 @@ void HOptimizedGraphBuilder::GenerateStringCharFromCode(CallRuntime* call) { ASSERT(call->arguments()->length() == 1); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* char_code = Pop(); - HInstruction* result = New<HStringCharFromCode>(char_code); + HInstruction* result = NewUncasted<HStringCharFromCode>(char_code); return ast_context()->ReturnInstruction(result, call->id()); } @@ -8828,7 +8977,7 @@ void HOptimizedGraphBuilder::GenerateStringCharAt(CallRuntime* call) { HValue* string = Pop(); HInstruction* char_code = BuildStringCharCodeAt(string, index); AddInstruction(char_code); - HInstruction* result = New<HStringCharFromCode>(char_code); + HInstruction* result = NewUncasted<HStringCharFromCode>(char_code); return ast_context()->ReturnInstruction(result, call->id()); } @@ -8855,7 +9004,7 @@ void HOptimizedGraphBuilder::GenerateLog(CallRuntime* call) { // Fast support for Math.random(). void HOptimizedGraphBuilder::GenerateRandomHeapNumber(CallRuntime* call) { HGlobalObject* global_object = Add<HGlobalObject>(); - HRandom* result = new(zone()) HRandom(global_object); + HRandom* result = New<HRandom>(global_object); return ast_context()->ReturnInstruction(result, call->id()); } @@ -8867,9 +9016,7 @@ void HOptimizedGraphBuilder::GenerateStringAdd(CallRuntime* call) { CHECK_ALIVE(VisitForValue(call->arguments()->at(1))); HValue* right = Pop(); HValue* left = Pop(); - HValue* context = environment()->context(); - HInstruction* result = HStringAdd::New( - zone(), context, left, right, STRING_ADD_CHECK_BOTH); + HInstruction* result = New<HStringAdd>(left, right, STRING_ADD_CHECK_BOTH); return ast_context()->ReturnInstruction(result, call->id()); } @@ -8878,8 +9025,7 @@ void HOptimizedGraphBuilder::GenerateStringAdd(CallRuntime* call) { void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) { ASSERT_EQ(3, call->arguments()->length()); CHECK_ALIVE(VisitArgumentList(call->arguments())); - HValue* context = environment()->context(); - HCallStub* result = new(zone()) HCallStub(context, CodeStub::SubString, 3); + HCallStub* result = New<HCallStub>(CodeStub::SubString, 3); Drop(3); return ast_context()->ReturnInstruction(result, call->id()); } @@ -8889,9 +9035,7 @@ void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) { void HOptimizedGraphBuilder::GenerateStringCompare(CallRuntime* call) { ASSERT_EQ(2, call->arguments()->length()); CHECK_ALIVE(VisitArgumentList(call->arguments())); - HValue* context = environment()->context(); - HCallStub* result = - new(zone()) HCallStub(context, CodeStub::StringCompare, 2); + HCallStub* result = New<HCallStub>(CodeStub::StringCompare, 2); Drop(2); return ast_context()->ReturnInstruction(result, call->id()); } @@ -8901,8 +9045,7 @@ void HOptimizedGraphBuilder::GenerateStringCompare(CallRuntime* call) { void HOptimizedGraphBuilder::GenerateRegExpExec(CallRuntime* call) { ASSERT_EQ(4, call->arguments()->length()); CHECK_ALIVE(VisitArgumentList(call->arguments())); - HValue* context = environment()->context(); - HCallStub* result = new(zone()) HCallStub(context, CodeStub::RegExpExec, 4); + HCallStub* result = New<HCallStub>(CodeStub::RegExpExec, 4); Drop(4); return ast_context()->ReturnInstruction(result, call->id()); } @@ -8912,9 +9055,7 @@ void HOptimizedGraphBuilder::GenerateRegExpExec(CallRuntime* call) { void HOptimizedGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) { ASSERT_EQ(3, call->arguments()->length()); CHECK_ALIVE(VisitArgumentList(call->arguments())); - HValue* context = environment()->context(); - HCallStub* result = - new(zone()) HCallStub(context, CodeStub::RegExpConstructResult, 3); + HCallStub* result = New<HCallStub>(CodeStub::RegExpConstructResult, 3); Drop(3); return ast_context()->ReturnInstruction(result, call->id()); } @@ -8929,12 +9070,11 @@ void HOptimizedGraphBuilder::GenerateGetFromCache(CallRuntime* call) { // Fast support for number to string. void HOptimizedGraphBuilder::GenerateNumberToString(CallRuntime* call) { ASSERT_EQ(1, call->arguments()->length()); - CHECK_ALIVE(VisitArgumentList(call->arguments())); - HValue* context = environment()->context(); - HCallStub* result = - new(zone()) HCallStub(context, CodeStub::NumberToString, 1); - Drop(1); - return ast_context()->ReturnInstruction(result, call->id()); + CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); + HValue* number = Pop(); + HValue* result = BuildNumberToString( + number, handle(Type::Number(), isolate())); + return ast_context()->ReturnValue(result); } @@ -8953,25 +9093,25 @@ void HOptimizedGraphBuilder::GenerateCallFunction(CallRuntime* call) { // Branch for function proxies, or other non-functions. HHasInstanceTypeAndBranch* typecheck = - new(zone()) HHasInstanceTypeAndBranch(function, JS_FUNCTION_TYPE); + New<HHasInstanceTypeAndBranch>(function, JS_FUNCTION_TYPE); HBasicBlock* if_jsfunction = graph()->CreateBasicBlock(); HBasicBlock* if_nonfunction = graph()->CreateBasicBlock(); HBasicBlock* join = graph()->CreateBasicBlock(); typecheck->SetSuccessorAt(0, if_jsfunction); typecheck->SetSuccessorAt(1, if_nonfunction); - current_block()->Finish(typecheck); + FinishCurrentBlock(typecheck); set_current_block(if_jsfunction); HInstruction* invoke_result = Add<HInvokeFunction>(function, arg_count); Drop(arg_count); Push(invoke_result); - if_jsfunction->Goto(join); + Goto(if_jsfunction, join); set_current_block(if_nonfunction); HInstruction* call_result = Add<HCallFunction>(function, arg_count); Drop(arg_count); Push(call_result); - if_nonfunction->Goto(join); + Goto(if_nonfunction, join); set_current_block(join); join->SetJoinId(call->id()); @@ -8986,7 +9126,7 @@ void HOptimizedGraphBuilder::GenerateMathPow(CallRuntime* call) { CHECK_ALIVE(VisitForValue(call->arguments()->at(1))); HValue* right = Pop(); HValue* left = Pop(); - HInstruction* result = HPower::New(zone(), context(), left, right); + HInstruction* result = NewUncasted<HPower>(left, right); return ast_context()->ReturnInstruction(result, call->id()); } @@ -8994,9 +9134,7 @@ void HOptimizedGraphBuilder::GenerateMathPow(CallRuntime* call) { void HOptimizedGraphBuilder::GenerateMathSin(CallRuntime* call) { ASSERT_EQ(1, call->arguments()->length()); CHECK_ALIVE(VisitArgumentList(call->arguments())); - HValue* context = environment()->context(); - HCallStub* result = - new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1); + HCallStub* result = New<HCallStub>(CodeStub::TranscendentalCache, 1); result->set_transcendental_type(TranscendentalCache::SIN); Drop(1); return ast_context()->ReturnInstruction(result, call->id()); @@ -9006,9 +9144,7 @@ void HOptimizedGraphBuilder::GenerateMathSin(CallRuntime* call) { void HOptimizedGraphBuilder::GenerateMathCos(CallRuntime* call) { ASSERT_EQ(1, call->arguments()->length()); CHECK_ALIVE(VisitArgumentList(call->arguments())); - HValue* context = environment()->context(); - HCallStub* result = - new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1); + HCallStub* result = New<HCallStub>(CodeStub::TranscendentalCache, 1); result->set_transcendental_type(TranscendentalCache::COS); Drop(1); return ast_context()->ReturnInstruction(result, call->id()); @@ -9018,9 +9154,7 @@ void HOptimizedGraphBuilder::GenerateMathCos(CallRuntime* call) { void HOptimizedGraphBuilder::GenerateMathTan(CallRuntime* call) { ASSERT_EQ(1, call->arguments()->length()); CHECK_ALIVE(VisitArgumentList(call->arguments())); - HValue* context = environment()->context(); - HCallStub* result = - new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1); + HCallStub* result = New<HCallStub>(CodeStub::TranscendentalCache, 1); result->set_transcendental_type(TranscendentalCache::TAN); Drop(1); return ast_context()->ReturnInstruction(result, call->id()); @@ -9030,9 +9164,7 @@ void HOptimizedGraphBuilder::GenerateMathTan(CallRuntime* call) { void HOptimizedGraphBuilder::GenerateMathLog(CallRuntime* call) { ASSERT_EQ(1, call->arguments()->length()); CHECK_ALIVE(VisitArgumentList(call->arguments())); - HValue* context = environment()->context(); - HCallStub* result = - new(zone()) HCallStub(context, CodeStub::TranscendentalCache, 1); + HCallStub* result = New<HCallStub>(CodeStub::TranscendentalCache, 1); result->set_transcendental_type(TranscendentalCache::LOG); Drop(1); return ast_context()->ReturnInstruction(result, call->id()); @@ -9043,9 +9175,7 @@ void HOptimizedGraphBuilder::GenerateMathSqrt(CallRuntime* call) { ASSERT(call->arguments()->length() == 1); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* value = Pop(); - HValue* context = environment()->context(); - HInstruction* result = - HUnaryMathOperation::New(zone(), context, value, kMathSqrt); + HInstruction* result = New<HUnaryMathOperation>(value, kMathSqrt); return ast_context()->ReturnInstruction(result, call->id()); } @@ -9060,7 +9190,7 @@ void HOptimizedGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) { ASSERT(call->arguments()->length() == 1); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* value = Pop(); - HGetCachedArrayIndex* result = new(zone()) HGetCachedArrayIndex(value); + HGetCachedArrayIndex* result = New<HGetCachedArrayIndex>(value); return ast_context()->ReturnInstruction(result, call->id()); } @@ -9083,7 +9213,7 @@ void HOptimizedGraphBuilder::GenerateGeneratorThrow(CallRuntime* call) { void HOptimizedGraphBuilder::GenerateDebugBreakInOptimizedCode( CallRuntime* call) { - AddInstruction(new(zone()) HDebugBreak()); + Add<HDebugBreak>(); return ast_context()->ReturnValue(graph()->GetConstant0()); } diff --git a/deps/v8/src/hydrogen.h b/deps/v8/src/hydrogen.h index c1dafa8b5a..b5046bd001 100644 --- a/deps/v8/src/hydrogen.h +++ b/deps/v8/src/hydrogen.h @@ -30,6 +30,7 @@ #include "v8.h" +#include "accessors.h" #include "allocation.h" #include "ast.h" #include "compiler.h" @@ -109,7 +110,7 @@ class HBasicBlock V8_FINAL : public ZoneObject { bool IsFinished() const { return end_ != NULL; } void AddPhi(HPhi* phi); void RemovePhi(HPhi* phi); - void AddInstruction(HInstruction* instr); + void AddInstruction(HInstruction* instr, int position); bool Dominates(HBasicBlock* other) const; int LoopNestingDepth() const; @@ -132,30 +133,18 @@ class HBasicBlock V8_FINAL : public ZoneObject { void SetJoinId(BailoutId ast_id); - void Finish(HControlInstruction* last); - void FinishExit(HControlInstruction* instruction); - void Goto(HBasicBlock* block, - FunctionState* state = NULL, - bool add_simulate = true); - void GotoNoSimulate(HBasicBlock* block) { - Goto(block, NULL, false); - } - int PredecessorIndexOf(HBasicBlock* predecessor) const; HPhi* AddNewPhi(int merged_index); HSimulate* AddNewSimulate(BailoutId ast_id, + int position, RemovableSimulate removable = FIXED_SIMULATE) { HSimulate* instr = CreateSimulate(ast_id, removable); - AddInstruction(instr); + AddInstruction(instr, position); return instr; } void AssignCommonDominator(HBasicBlock* other); void AssignLoopSuccessorDominators(); - // Add the inlined function exit sequence, adding an HLeaveInlined - // instruction and updating the bailout environment. - void AddLeaveInlined(HValue* return_value, FunctionState* state); - // If a target block is tagged as an inline function return, all // predecessors should contain the inlined exit sequence: // @@ -169,8 +158,13 @@ class HBasicBlock V8_FINAL : public ZoneObject { } HBasicBlock* inlined_entry_block() { return inlined_entry_block_; } - bool IsDeoptimizing() const { return is_deoptimizing_; } - void MarkAsDeoptimizing() { is_deoptimizing_ = true; } + bool IsDeoptimizing() const { + return end() != NULL && end()->IsDeoptimize(); + } + + void MarkUnreachable(); + bool IsUnreachable() const { return !is_reachable_; } + bool IsReachable() const { return is_reachable_; } bool IsLoopSuccessorDominator() const { return dominates_loop_successors_; @@ -185,14 +179,30 @@ class HBasicBlock V8_FINAL : public ZoneObject { void Verify(); #endif - private: + protected: friend class HGraphBuilder; + HSimulate* CreateSimulate(BailoutId ast_id, RemovableSimulate removable); + void Finish(HControlInstruction* last, int position); + void FinishExit(HControlInstruction* instruction, int position); + void Goto(HBasicBlock* block, + int position, + FunctionState* state = NULL, + bool add_simulate = true); + void GotoNoSimulate(HBasicBlock* block, int position) { + Goto(block, position, NULL, false); + } + + // Add the inlined function exit sequence, adding an HLeaveInlined + // instruction and updating the bailout environment. + void AddLeaveInlined(HValue* return_value, + FunctionState* state, + int position); + + private: void RegisterPredecessor(HBasicBlock* pred); void AddDominatedBlock(HBasicBlock* block); - HSimulate* CreateSimulate(BailoutId ast_id, RemovableSimulate removable); - int block_id_; HGraph* graph_; ZoneList<HPhi*> phis_; @@ -214,7 +224,7 @@ class HBasicBlock V8_FINAL : public ZoneObject { // For blocks marked as inline return target: the block with HEnterInlined. HBasicBlock* inlined_entry_block_; bool is_inline_return_target_ : 1; - bool is_deoptimizing_ : 1; + bool is_reachable_ : 1; bool dominates_loop_successors_ : 1; bool is_osr_entry_ : 1; }; @@ -316,7 +326,7 @@ class HGraph V8_FINAL : public ZoneObject { HBasicBlock* entry_block() const { return entry_block_; } HEnvironment* start_environment() const { return start_environment_; } - void FinalizeUniqueValueIds(); + void FinalizeUniqueness(); bool ProcessArgumentsObject(); void OrderBlocks(); void AssignDominators(); @@ -332,10 +342,7 @@ class HGraph V8_FINAL : public ZoneObject { void CollectPhis(); - void set_undefined_constant(HConstant* constant) { - undefined_constant_.set(constant); - } - HConstant* GetConstantUndefined() const { return undefined_constant_.get(); } + HConstant* GetConstantUndefined(); HConstant* GetConstant0(); HConstant* GetConstant1(); HConstant* GetConstantMinus1(); @@ -405,14 +412,6 @@ class HGraph V8_FINAL : public ZoneObject { use_optimistic_licm_ = value; } - bool has_soft_deoptimize() { - return has_soft_deoptimize_; - } - - void set_has_soft_deoptimize(bool value) { - has_soft_deoptimize_ = value; - } - void MarkRecursive() { is_recursive_ = true; } @@ -458,6 +457,7 @@ class HGraph V8_FINAL : public ZoneObject { bool IsInsideNoSideEffectsScope() { return no_side_effects_scope_count_ > 0; } private: + HConstant* ReinsertConstantIfNecessary(HConstant* constant); HConstant* GetConstant(SetOncePointer<HConstant>* pointer, int32_t integer_value); @@ -477,7 +477,7 @@ class HGraph V8_FINAL : public ZoneObject { ZoneList<HValue*> values_; ZoneList<HPhi*>* phi_list_; ZoneList<HInstruction*>* uint32_instructions_; - SetOncePointer<HConstant> undefined_constant_; + SetOncePointer<HConstant> constant_undefined_; SetOncePointer<HConstant> constant_0_; SetOncePointer<HConstant> constant_1_; SetOncePointer<HConstant> constant_minus1_; @@ -495,7 +495,6 @@ class HGraph V8_FINAL : public ZoneObject { bool is_recursive_; bool use_optimistic_licm_; - bool has_soft_deoptimize_; bool depends_on_empty_array_proto_elements_; int type_change_checksum_; int maximum_environment_size_; @@ -941,26 +940,26 @@ class FunctionState V8_FINAL { class HIfContinuation V8_FINAL { public: - HIfContinuation() { continuation_captured_ = false; } + HIfContinuation() : continuation_captured_(false) {} + HIfContinuation(HBasicBlock* true_branch, + HBasicBlock* false_branch) + : continuation_captured_(true), true_branch_(true_branch), + false_branch_(false_branch) {} ~HIfContinuation() { ASSERT(!continuation_captured_); } void Capture(HBasicBlock* true_branch, - HBasicBlock* false_branch, - int position) { + HBasicBlock* false_branch) { ASSERT(!continuation_captured_); true_branch_ = true_branch; false_branch_ = false_branch; - position_ = position; continuation_captured_ = true; } void Continue(HBasicBlock** true_branch, - HBasicBlock** false_branch, - int* position) { + HBasicBlock** false_branch) { ASSERT(continuation_captured_); *true_branch = true_branch_; *false_branch = false_branch_; - if (position != NULL) *position = position_; continuation_captured_ = false; } @@ -970,10 +969,13 @@ class HIfContinuation V8_FINAL { return IsTrueReachable() || IsFalseReachable(); } + HBasicBlock* true_branch() const { return true_branch_; } + HBasicBlock* false_branch() const { return false_branch_; } + + private: bool continuation_captured_; HBasicBlock* true_branch_; HBasicBlock* false_branch_; - int position_; }; @@ -982,7 +984,8 @@ class HGraphBuilder { explicit HGraphBuilder(CompilationInfo* info) : info_(info), graph_(NULL), - current_block_(NULL) {} + current_block_(NULL), + position_(RelocInfo::kNoPosition) {} virtual ~HGraphBuilder() {} HBasicBlock* current_block() const { return current_block_; } @@ -1005,6 +1008,34 @@ class HGraphBuilder { // Adding instructions. HInstruction* AddInstruction(HInstruction* instr); + void FinishCurrentBlock(HControlInstruction* last); + void FinishExitCurrentBlock(HControlInstruction* instruction); + + void Goto(HBasicBlock* from, + HBasicBlock* target, + FunctionState* state = NULL, + bool add_simulate = true) { + from->Goto(target, position_, state, add_simulate); + } + void Goto(HBasicBlock* target, + FunctionState* state = NULL, + bool add_simulate = true) { + Goto(current_block(), target, state, add_simulate); + } + void GotoNoSimulate(HBasicBlock* from, HBasicBlock* target) { + Goto(from, target, NULL, false); + } + void GotoNoSimulate(HBasicBlock* target) { + Goto(target, NULL, false); + } + void AddLeaveInlined(HBasicBlock* block, + HValue* return_value, + FunctionState* state) { + block->AddLeaveInlined(return_value, state, position_); + } + void AddLeaveInlined(HValue* return_value, FunctionState* state) { + return AddLeaveInlined(current_block(), return_value, state); + } template<class I> HInstruction* NewUncasted() { return I::New(zone(), context()); } @@ -1199,6 +1230,8 @@ class HGraphBuilder { void AddSimulate(BailoutId id, RemovableSimulate removable = FIXED_SIMULATE); + int position() const { return position_; } + protected: virtual bool BuildGraph() = 0; @@ -1228,6 +1261,8 @@ class HGraphBuilder { ElementsKind to_kind, bool is_jsarray); + HValue* BuildNumberToString(HValue* object, Handle<Type> type); + HInstruction* BuildUncheckedMonomorphicElementAccess( HValue* checked_object, HValue* key, @@ -1238,46 +1273,50 @@ class HGraphBuilder { LoadKeyedHoleMode load_mode, KeyedAccessStoreMode store_mode); - HInstruction* AddExternalArrayElementAccess( - HValue* external_elements, - HValue* checked_key, - HValue* val, - HValue* dependency, - ElementsKind elements_kind, - bool is_store); - - HInstruction* AddFastElementAccess( + HInstruction* AddElementAccess( HValue* elements, HValue* checked_key, HValue* val, HValue* dependency, ElementsKind elements_kind, bool is_store, - LoadKeyedHoleMode load_mode, - KeyedAccessStoreMode store_mode); + LoadKeyedHoleMode load_mode = NEVER_RETURN_HOLE); HLoadNamedField* BuildLoadNamedField(HValue* object, HObjectAccess access); + HInstruction* AddLoadNamedField(HValue* object, HObjectAccess access); HInstruction* BuildLoadStringLength(HValue* object, HValue* checked_value); HStoreNamedField* AddStoreMapConstant(HValue* object, Handle<Map>); HLoadNamedField* AddLoadElements(HValue* object); + + bool MatchRotateRight(HValue* left, + HValue* right, + HValue** operand, + HValue** shift_amount); + + HInstruction* BuildBinaryOperation(Token::Value op, + HValue* left, + HValue* right, + Handle<Type> left_type, + Handle<Type> right_type, + Handle<Type> result_type, + Maybe<int> fixed_right_arg, + bool binop_stub = false); + HLoadNamedField* AddLoadFixedArrayLength(HValue *object); HValue* AddLoadJSBuiltin(Builtins::JavaScript builtin); + HValue* EnforceNumberType(HValue* number, Handle<Type> expected); HValue* TruncateToNumber(HValue* value, Handle<Type>* expected); - void PushAndAdd(HInstruction* instr); - void FinishExitWithHardDeoptimization(const char* reason, HBasicBlock* continuation); - void AddIncrementCounter(StatsCounter* counter, - HValue* context); + void AddIncrementCounter(StatsCounter* counter); class IfBuilder V8_FINAL { public: - explicit IfBuilder(HGraphBuilder* builder, - int position = RelocInfo::kNoPosition); + explicit IfBuilder(HGraphBuilder* builder); IfBuilder(HGraphBuilder* builder, HIfContinuation* continuation); @@ -1286,80 +1325,79 @@ class HGraphBuilder { } template<class Condition> - HInstruction* If(HValue *p) { - HControlInstruction* compare = new(zone()) Condition(p); + Condition* If(HValue *p) { + Condition* compare = builder()->New<Condition>(p); AddCompare(compare); return compare; } template<class Condition, class P2> - HInstruction* If(HValue* p1, P2 p2) { - HControlInstruction* compare = new(zone()) Condition(p1, p2); + Condition* If(HValue* p1, P2 p2) { + Condition* compare = builder()->New<Condition>(p1, p2); AddCompare(compare); return compare; } template<class Condition, class P2, class P3> - HInstruction* If(HValue* p1, P2 p2, P3 p3) { - HControlInstruction* compare = new(zone()) Condition(p1, p2, p3); + Condition* If(HValue* p1, P2 p2, P3 p3) { + Condition* compare = builder()->New<Condition>(p1, p2, p3); AddCompare(compare); return compare; } + template<class Condition> + Condition* IfNot(HValue* p) { + Condition* compare = If<Condition>(p); + compare->Not(); + return compare; + } + template<class Condition, class P2> - HInstruction* IfNot(HValue* p1, P2 p2) { - HControlInstruction* compare = new(zone()) Condition(p1, p2); - AddCompare(compare); - HBasicBlock* block0 = compare->SuccessorAt(0); - HBasicBlock* block1 = compare->SuccessorAt(1); - compare->SetSuccessorAt(0, block1); - compare->SetSuccessorAt(1, block0); + Condition* IfNot(HValue* p1, P2 p2) { + Condition* compare = If<Condition>(p1, p2); + compare->Not(); return compare; } template<class Condition, class P2, class P3> - HInstruction* IfNot(HValue* p1, P2 p2, P3 p3) { - HControlInstruction* compare = new(zone()) Condition(p1, p2, p3); - AddCompare(compare); - HBasicBlock* block0 = compare->SuccessorAt(0); - HBasicBlock* block1 = compare->SuccessorAt(1); - compare->SetSuccessorAt(0, block1); - compare->SetSuccessorAt(1, block0); + Condition* IfNot(HValue* p1, P2 p2, P3 p3) { + Condition* compare = If<Condition>(p1, p2, p3); + compare->Not(); return compare; } template<class Condition> - HInstruction* OrIf(HValue *p) { + Condition* OrIf(HValue *p) { Or(); return If<Condition>(p); } template<class Condition, class P2> - HInstruction* OrIf(HValue* p1, P2 p2) { + Condition* OrIf(HValue* p1, P2 p2) { Or(); return If<Condition>(p1, p2); } template<class Condition, class P2, class P3> - HInstruction* OrIf(HValue* p1, P2 p2, P3 p3) { + Condition* OrIf(HValue* p1, P2 p2, P3 p3) { Or(); return If<Condition>(p1, p2, p3); } template<class Condition> - HInstruction* AndIf(HValue *p) { + Condition* AndIf(HValue *p) { And(); return If<Condition>(p); } template<class Condition, class P2> - HInstruction* AndIf(HValue* p1, P2 p2) { + Condition* AndIf(HValue* p1, P2 p2) { And(); return If<Condition>(p1, p2); } template<class Condition, class P2, class P3> - HInstruction* AndIf(HValue* p1, P2 p2, P3 p3) { + Condition* AndIf(HValue* p1, P2 p2, P3 p3) { And(); return If<Condition>(p1, p2, p3); } @@ -1367,8 +1405,50 @@ class HGraphBuilder { void Or(); void And(); + // Captures the current state of this IfBuilder in the specified + // continuation and ends this IfBuilder. void CaptureContinuation(HIfContinuation* continuation); + // Joins the specified continuation from this IfBuilder and ends this + // IfBuilder. This appends a Goto instruction from the true branch of + // this IfBuilder to the true branch of the continuation unless the + // true branch of this IfBuilder is already finished. And vice versa + // for the false branch. + // + // The basic idea is as follows: You have several nested IfBuilder's + // that you want to join based on two possible outcomes (i.e. success + // and failure, or whatever). You can do this easily using this method + // now, for example: + // + // HIfContinuation cont(graph()->CreateBasicBlock(), + // graph()->CreateBasicBlock()); + // ... + // IfBuilder if_whatever(this); + // if_whatever.If<Condition>(arg); + // if_whatever.Then(); + // ... + // if_whatever.Else(); + // ... + // if_whatever.JoinContinuation(&cont); + // ... + // IfBuilder if_something(this); + // if_something.If<Condition>(arg1, arg2); + // if_something.Then(); + // ... + // if_something.Else(); + // ... + // if_something.JoinContinuation(&cont); + // ... + // IfBuilder if_finally(this, &cont); + // if_finally.Then(); + // // continues after then code of if_whatever or if_something. + // ... + // if_finally.Else(); + // // continues after else code of if_whatever or if_something. + // ... + // if_finally.End(); + void JoinContinuation(HIfContinuation* continuation); + void Then(); void Else(); void End(); @@ -1382,12 +1462,11 @@ class HGraphBuilder { void Return(HValue* value); private: - void AddCompare(HControlInstruction* compare); + HControlInstruction* AddCompare(HControlInstruction* compare); - Zone* zone() { return builder_->zone(); } + HGraphBuilder* builder() const { return builder_; } HGraphBuilder* builder_; - int position_; bool finished_ : 1; bool deopt_then_ : 1; bool deopt_else_ : 1; @@ -1548,7 +1627,6 @@ class HGraphBuilder { void BuildCompareNil( HValue* value, Handle<Type> type, - int position, HIfContinuation* continuation); HValue* BuildCreateAllocationMemento(HValue* previous_object, @@ -1563,6 +1641,12 @@ class HGraphBuilder { HInstruction* BuildGetNativeContext(); HInstruction* BuildGetArrayFunction(); + protected: + void SetSourcePosition(int position) { + ASSERT(position != RelocInfo::kNoPosition); + position_ = position; + } + private: HGraphBuilder(); @@ -1572,6 +1656,7 @@ class HGraphBuilder { CompilationInfo* info_; HGraph* graph_; HBasicBlock* current_block_; + int position_; }; @@ -1583,13 +1668,14 @@ inline HInstruction* HGraphBuilder::AddUncasted<HDeoptimize>( if (FLAG_always_opt) return NULL; } if (current_block()->IsDeoptimizing()) return NULL; - HDeoptimize* instr = New<HDeoptimize>(reason, type); - AddInstruction(instr); + HBasicBlock* after_deopt_block = CreateBasicBlock( + current_block()->last_environment()); + HDeoptimize* instr = New<HDeoptimize>(reason, type, after_deopt_block); if (type == Deoptimizer::SOFT) { isolate()->counters()->soft_deopts_inserted()->Increment(); - graph()->set_has_soft_deoptimize(true); } - current_block()->MarkAsDeoptimizing(); + FinishCurrentBlock(instr); + set_current_block(after_deopt_block); return instr; } @@ -1622,7 +1708,7 @@ inline HInstruction* HGraphBuilder::AddUncasted<HReturn>(HValue* value) { int num_parameters = graph()->info()->num_parameters(); HValue* params = AddUncasted<HConstant>(num_parameters); HReturn* return_instruction = New<HReturn>(value, params); - current_block()->FinishExit(return_instruction); + FinishExitCurrentBlock(return_instruction); return return_instruction; } @@ -1634,13 +1720,29 @@ inline HInstruction* HGraphBuilder::AddUncasted<HReturn>(HConstant* value) { template<> +inline HInstruction* HGraphBuilder::AddUncasted<HCallRuntime>( + Handle<String> name, + const Runtime::Function* c_function, + int argument_count) { + HCallRuntime* instr = New<HCallRuntime>(name, c_function, argument_count); + if (graph()->info()->IsStub()) { + // When compiling code stubs, we don't want to save all double registers + // upon entry to the stub, but instead have the call runtime instruction + // save the double registers only on-demand (in the fallback case). + instr->set_save_doubles(kSaveFPRegs); + } + AddInstruction(instr); + return instr; +} + + +template<> inline HInstruction* HGraphBuilder::NewUncasted<HContext>() { return HContext::New(zone()); } -class HOptimizedGraphBuilder V8_FINAL - : public HGraphBuilder, public AstVisitor { +class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor { public: // A class encapsulating (lazily-allocated) break and continue blocks for // a breakable statement. Separated from BreakAndContinueScope so that it @@ -1707,6 +1809,8 @@ class HOptimizedGraphBuilder V8_FINAL HValue* context() { return environment()->context(); } + HOsrBuilder* osr() const { return osr_; } + void Bailout(BailoutReason reason); HBasicBlock* CreateJoin(HBasicBlock* first, @@ -1725,7 +1829,7 @@ class HOptimizedGraphBuilder V8_FINAL DEFINE_AST_VISITOR_SUBCLASS_MEMBERS(); - private: + protected: // Type of a member function that generates inline code for a native function. typedef void (HOptimizedGraphBuilder::*InlineFunctionGenerator) (CallRuntime* call); @@ -1812,6 +1916,12 @@ class HOptimizedGraphBuilder V8_FINAL HBasicBlock* loop_successor, HBasicBlock* break_block); + // Build a loop entry + HBasicBlock* BuildLoopEntry(); + + // Builds a loop entry respectful of OSR requirements + HBasicBlock* BuildLoopEntry(IterationStatement* statement); + HBasicBlock* JoinContinue(IterationStatement* statement, HBasicBlock* exit_block, HBasicBlock* continue_block); @@ -1837,21 +1947,22 @@ class HOptimizedGraphBuilder V8_FINAL env->Bind(index, value); if (IsEligibleForEnvironmentLivenessAnalysis(var, index, value, env)) { HEnvironmentMarker* bind = - new(zone()) HEnvironmentMarker(HEnvironmentMarker::BIND, index); - AddInstruction(bind); + Add<HEnvironmentMarker>(HEnvironmentMarker::BIND, index); + USE(bind); #ifdef DEBUG bind->set_closure(env->closure()); #endif } } + HValue* LookupAndMakeLive(Variable* var) { HEnvironment* env = environment(); int index = env->IndexFor(var); HValue* value = env->Lookup(index); if (IsEligibleForEnvironmentLivenessAnalysis(var, index, value, env)) { HEnvironmentMarker* lookup = - new(zone()) HEnvironmentMarker(HEnvironmentMarker::LOOKUP, index); - AddInstruction(lookup); + Add<HEnvironmentMarker>(HEnvironmentMarker::LOOKUP, index); + USE(lookup); #ifdef DEBUG lookup->set_closure(env->closure()); #endif @@ -1889,6 +2000,7 @@ class HOptimizedGraphBuilder V8_FINAL AST_NODE_LIST(DECLARE_VISIT) #undef DECLARE_VISIT + private: // Helpers for flow graph construction. enum GlobalPropertyAccess { kUseCell, @@ -1940,27 +2052,113 @@ class HOptimizedGraphBuilder V8_FINAL void HandleGlobalVariableAssignment(Variable* var, HValue* value, - int position, BailoutId ast_id); void HandlePropertyAssignment(Assignment* expr); void HandleCompoundAssignment(Assignment* expr); - void HandlePolymorphicLoadNamedField(int position, + void HandlePolymorphicLoadNamedField(BailoutId ast_id, BailoutId return_id, HValue* object, SmallMapList* types, Handle<String> name); - HInstruction* TryLoadPolymorphicAsMonomorphic(HValue* object, - SmallMapList* types, - Handle<String> name); - void HandlePolymorphicStoreNamedField(int position, - BailoutId assignment_id, + + class PropertyAccessInfo { + public: + PropertyAccessInfo(Isolate* isolate, Handle<Map> map, Handle<String> name) + : lookup_(isolate), + map_(map), + name_(name), + access_(HObjectAccess::ForMap()) { } + + // Checkes whether this PropertyAccessInfo can be handled as a monomorphic + // load named. It additionally fills in the fields necessary to generate the + // lookup code. + bool CanLoadMonomorphic(); + + // Checks whether all types behave uniform when loading name. If all maps + // behave the same, a single monomorphic load instruction can be emitted, + // guarded by a single map-checks instruction that whether the receiver is + // an instance of any of the types. + // This method skips the first type in types, assuming that this + // PropertyAccessInfo is built for types->first(). + bool CanLoadAsMonomorphic(SmallMapList* types); + + bool IsJSObjectFieldAccessor() { + int offset; // unused + return Accessors::IsJSObjectFieldAccessor(map_, name_, &offset); + } + + bool GetJSObjectFieldAccess(HObjectAccess* access) { + if (IsStringLength()) { + *access = HObjectAccess::ForStringLength(); + return true; + } else if (IsArrayLength()) { + *access = HObjectAccess::ForArrayLength(map_->elements_kind()); + return true; + } else { + int offset; + if (Accessors::IsJSObjectFieldAccessor(map_, name_, &offset)) { + *access = HObjectAccess::ForJSObjectOffset(offset); + return true; + } + return false; + } + } + + bool has_holder() { return !holder_.is_null(); } + + LookupResult* lookup() { return &lookup_; } + Handle<Map> map() { return map_; } + Handle<JSObject> holder() { return holder_; } + Handle<JSFunction> accessor() { return accessor_; } + Handle<Object> constant() { return constant_; } + HObjectAccess access() { return access_; } + + private: + Isolate* isolate() { return lookup_.isolate(); } + + bool IsStringLength() { + return map_->instance_type() < FIRST_NONSTRING_TYPE && + name_->Equals(isolate()->heap()->length_string()); + } + + bool IsArrayLength() { + return map_->instance_type() == JS_ARRAY_TYPE && + name_->Equals(isolate()->heap()->length_string()); + } + + bool LoadResult(Handle<Map> map); + bool LookupDescriptor(); + bool LookupInPrototypes(); + bool IsCompatibleForLoad(PropertyAccessInfo* other); + + void GeneralizeRepresentation(Representation r) { + access_ = access_.WithRepresentation( + access_.representation().generalize(r)); + } + + LookupResult lookup_; + Handle<Map> map_; + Handle<String> name_; + Handle<JSObject> holder_; + Handle<JSFunction> accessor_; + Handle<Object> constant_; + HObjectAccess access_; + }; + + HInstruction* BuildLoadMonomorphic(PropertyAccessInfo* info, + HValue* object, + HInstruction* checked_object, + BailoutId ast_id, + BailoutId return_id, + bool can_inline_accessor = true); + + void HandlePolymorphicStoreNamedField(BailoutId assignment_id, HValue* object, HValue* value, SmallMapList* types, Handle<String> name); - bool TryStorePolymorphicAsMonomorphic(int position, - BailoutId assignment_id, + bool TryStorePolymorphicAsMonomorphic(BailoutId assignment_id, HValue* object, HValue* value, SmallMapList* types, @@ -2009,8 +2207,6 @@ class HOptimizedGraphBuilder V8_FINAL HValue* key, HValue* val, SmallMapList* maps, - BailoutId ast_id, - int position, bool is_store, KeyedAccessStoreMode store_mode, bool* has_side_effects); @@ -2019,31 +2215,20 @@ class HOptimizedGraphBuilder V8_FINAL HValue* key, HValue* val, Expression* expr, - BailoutId ast_id, - int position, bool is_store, bool* has_side_effects); HInstruction* BuildLoadNamedGeneric(HValue* object, Handle<String> name, Property* expr); - HInstruction* BuildCallGetter(HValue* object, - Handle<Map> map, - Handle<JSFunction> getter, - Handle<JSObject> holder); - HInstruction* BuildLoadNamedMonomorphic(HValue* object, - Handle<String> name, - Handle<Map> map); HCheckMaps* AddCheckMap(HValue* object, Handle<Map> map); void BuildLoad(Property* property, - int position, BailoutId ast_id); void PushLoad(Property* property, HValue* object, - HValue* key, - int position); + HValue* key); void BuildStoreForEffect(Expression* expression, Property* prop, @@ -2080,8 +2265,7 @@ class HOptimizedGraphBuilder V8_FINAL HInstruction* BuildThisFunction(); HInstruction* BuildFastLiteral(Handle<JSObject> boilerplate_object, - Handle<Object> allocation_site, - AllocationSiteMode mode); + AllocationSiteContext* site_context); void BuildEmitObjectHeader(Handle<JSObject> boilerplate_object, HInstruction* object); @@ -2091,11 +2275,13 @@ class HOptimizedGraphBuilder V8_FINAL HInstruction* object_elements); void BuildEmitInObjectProperties(Handle<JSObject> boilerplate_object, - HInstruction* object); + HInstruction* object, + AllocationSiteContext* site_context); void BuildEmitElements(Handle<JSObject> boilerplate_object, Handle<FixedArrayBase> elements, - HValue* object_elements); + HValue* object_elements, + AllocationSiteContext* site_context); void BuildEmitFixedDoubleArray(Handle<FixedArrayBase> elements, ElementsKind kind, @@ -2103,7 +2289,8 @@ class HOptimizedGraphBuilder V8_FINAL void BuildEmitFixedArray(Handle<FixedArrayBase> elements, ElementsKind kind, - HValue* object_elements); + HValue* object_elements, + AllocationSiteContext* site_context); void AddCheckPrototypeMaps(Handle<JSObject> holder, Handle<Map> receiver_map); @@ -2112,11 +2299,6 @@ class HOptimizedGraphBuilder V8_FINAL HValue* receiver, Handle<Map> receiver_map); - bool MatchRotateRight(HValue* left, - HValue* right, - HValue** operand, - HValue** shift_amount); - // The translation state of the currently-being-translated function. FunctionState* function_state_; diff --git a/deps/v8/src/i18n.cc b/deps/v8/src/i18n.cc index 0ae19c8232..dbff6e5f52 100644 --- a/deps/v8/src/i18n.cc +++ b/deps/v8/src/i18n.cc @@ -464,7 +464,7 @@ void SetResolvedNumberSettings(Isolate* isolate, Handle<String> key = isolate->factory()->NewStringFromAscii( CStrVector("minimumSignificantDigits")); - if (resolved->HasLocalProperty(*key)) { + if (JSReceiver::HasLocalProperty(resolved, key)) { JSObject::SetProperty( resolved, isolate->factory()->NewStringFromAscii( @@ -477,7 +477,7 @@ void SetResolvedNumberSettings(Isolate* isolate, key = isolate->factory()->NewStringFromAscii( CStrVector("maximumSignificantDigits")); - if (resolved->HasLocalProperty(*key)) { + if (JSReceiver::HasLocalProperty(resolved, key)) { JSObject::SetProperty( resolved, isolate->factory()->NewStringFromAscii( @@ -855,7 +855,7 @@ icu::SimpleDateFormat* DateFormat::UnpackDateFormat( Handle<JSObject> obj) { Handle<String> key = isolate->factory()->NewStringFromAscii(CStrVector("dateFormat")); - if (obj->HasLocalProperty(*key)) { + if (JSReceiver::HasLocalProperty(obj, key)) { return reinterpret_cast<icu::SimpleDateFormat*>( obj->GetInternalField(0)); } @@ -920,7 +920,7 @@ icu::DecimalFormat* NumberFormat::UnpackNumberFormat( Handle<JSObject> obj) { Handle<String> key = isolate->factory()->NewStringFromAscii(CStrVector("numberFormat")); - if (obj->HasLocalProperty(*key)) { + if (JSReceiver::HasLocalProperty(obj, key)) { return reinterpret_cast<icu::DecimalFormat*>(obj->GetInternalField(0)); } @@ -981,7 +981,7 @@ icu::Collator* Collator::UnpackCollator(Isolate* isolate, Handle<JSObject> obj) { Handle<String> key = isolate->factory()->NewStringFromAscii(CStrVector("collator")); - if (obj->HasLocalProperty(*key)) { + if (JSReceiver::HasLocalProperty(obj, key)) { return reinterpret_cast<icu::Collator*>(obj->GetInternalField(0)); } @@ -1045,7 +1045,7 @@ icu::BreakIterator* BreakIterator::UnpackBreakIterator(Isolate* isolate, Handle<JSObject> obj) { Handle<String> key = isolate->factory()->NewStringFromAscii(CStrVector("breakIterator")); - if (obj->HasLocalProperty(*key)) { + if (JSReceiver::HasLocalProperty(obj, key)) { return reinterpret_cast<icu::BreakIterator*>(obj->GetInternalField(0)); } diff --git a/deps/v8/src/i18n.js b/deps/v8/src/i18n.js index 1798bbba7a..a64c7e6784 100644 --- a/deps/v8/src/i18n.js +++ b/deps/v8/src/i18n.js @@ -258,8 +258,8 @@ function addBoundMethod(obj, methodName, implementation, length) { // DateTimeFormat.format needs to be 0 arg method, but can stil // receive optional dateValue param. If one was provided, pass it // along. - if (arguments.length > 0) { - return implementation(that, arguments[0]); + if (%_ArgumentsLength() > 0) { + return implementation(that, %_Arguments(0)); } else { return implementation(that); } @@ -290,7 +290,7 @@ function addBoundMethod(obj, methodName, implementation, length) { * Parameter locales is treated as a priority list. */ function supportedLocalesOf(service, locales, options) { - if (service.match(GetServiceRE()) === null) { + if (IS_NULL(service.match(GetServiceRE()))) { throw new $Error('Internal error, wrong service type: ' + service); } @@ -447,7 +447,7 @@ function resolveLocale(service, requestedLocales, options) { * lookup algorithm. */ function lookupMatcher(service, requestedLocales) { - if (service.match(GetServiceRE()) === null) { + if (IS_NULL(service.match(GetServiceRE()))) { throw new $Error('Internal error, wrong service type: ' + service); } @@ -463,7 +463,7 @@ function lookupMatcher(service, requestedLocales) { if (AVAILABLE_LOCALES[service][locale] !== undefined) { // Return the resolved locale and extension. var extensionMatch = requestedLocales[i].match(GetUnicodeExtensionRE()); - var extension = (extensionMatch === null) ? '' : extensionMatch[0]; + var extension = IS_NULL(extensionMatch) ? '' : extensionMatch[0]; return {'locale': locale, 'extension': extension, 'position': i}; } // Truncate locale if possible. @@ -535,7 +535,7 @@ function parseExtension(extension) { * Converts parameter to an Object if possible. */ function toObject(value) { - if (value === undefined || value === null) { + if (IS_NULL_OR_UNDEFINED(value)) { throw new $TypeError('Value cannot be converted to an Object.'); } @@ -733,7 +733,7 @@ function toTitleCaseWord(word) { function canonicalizeLanguageTag(localeID) { // null is typeof 'object' so we have to do extra check. if (typeof localeID !== 'string' && typeof localeID !== 'object' || - localeID === null) { + IS_NULL(localeID)) { throw new $TypeError('Language ID should be string or object.'); } @@ -978,8 +978,8 @@ function initializeCollator(collator, locales, options) { * @constructor */ %SetProperty(Intl, 'Collator', function() { - var locales = arguments[0]; - var options = arguments[1]; + var locales = %_Arguments(0); + var options = %_Arguments(1); if (!this || this === Intl) { // Constructor is called as a function. @@ -1038,7 +1038,7 @@ function initializeCollator(collator, locales, options) { throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR); } - return supportedLocalesOf('collator', locales, arguments[1]); + return supportedLocalesOf('collator', locales, %_Arguments(1)); }, DONT_ENUM ); @@ -1207,8 +1207,8 @@ function initializeNumberFormat(numberFormat, locales, options) { * @constructor */ %SetProperty(Intl, 'NumberFormat', function() { - var locales = arguments[0]; - var options = arguments[1]; + var locales = %_Arguments(0); + var options = %_Arguments(1); if (!this || this === Intl) { // Constructor is called as a function. @@ -1286,7 +1286,7 @@ function initializeNumberFormat(numberFormat, locales, options) { throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR); } - return supportedLocalesOf('numberformat', locales, arguments[1]); + return supportedLocalesOf('numberformat', locales, %_Arguments(1)); }, DONT_ENUM ); @@ -1367,7 +1367,7 @@ function toLDMLString(options) { ldmlString += appendToLDMLString(option, {'2-digit': 'ss', 'numeric': 's'}); option = getOption('timeZoneName', 'string', ['short', 'long']); - ldmlString += appendToLDMLString(option, {short: 'v', long: 'vv'}); + ldmlString += appendToLDMLString(option, {short: 'z', long: 'zzzz'}); return ldmlString; } @@ -1440,16 +1440,16 @@ function fromLDMLString(ldmlString) { options = appendToDateTimeObject( options, 'second', match, {s: 'numeric', ss: '2-digit'}); - match = ldmlString.match(/v{1,2}/g); + match = ldmlString.match(/z|zzzz/g); options = appendToDateTimeObject( - options, 'timeZoneName', match, {v: 'short', vv: 'long'}); + options, 'timeZoneName', match, {z: 'short', zzzz: 'long'}); return options; } function appendToDateTimeObject(options, option, match, pairs) { - if (match === null) { + if (IS_NULL(match)) { if (!options.hasOwnProperty(option)) { defineWEProperty(options, option, undefined); } @@ -1606,8 +1606,8 @@ function initializeDateTimeFormat(dateFormat, locales, options) { * @constructor */ %SetProperty(Intl, 'DateTimeFormat', function() { - var locales = arguments[0]; - var options = arguments[1]; + var locales = %_Arguments(0); + var options = %_Arguments(1); if (!this || this === Intl) { // Constructor is called as a function. @@ -1685,7 +1685,7 @@ function initializeDateTimeFormat(dateFormat, locales, options) { throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR); } - return supportedLocalesOf('dateformat', locales, arguments[1]); + return supportedLocalesOf('dateformat', locales, %_Arguments(1)); }, DONT_ENUM ); @@ -1751,7 +1751,7 @@ function canonicalizeTimeZoneID(tzID) { // We expect only _ and / beside ASCII letters. // All inputs should conform to Area/Location from now on. var match = GetTimezoneNameCheckRE().exec(tzID); - if (match === null) { + if (IS_NULL(match)) { throw new $RangeError('Expected Area/Location for time zone, got ' + tzID); } @@ -1812,8 +1812,8 @@ function initializeBreakIterator(iterator, locales, options) { * @constructor */ %SetProperty(Intl, 'v8BreakIterator', function() { - var locales = arguments[0]; - var options = arguments[1]; + var locales = %_Arguments(0); + var options = %_Arguments(1); if (!this || this === Intl) { // Constructor is called as a function. @@ -1868,7 +1868,7 @@ function initializeBreakIterator(iterator, locales, options) { throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR); } - return supportedLocalesOf('breakiterator', locales, arguments[1]); + return supportedLocalesOf('breakiterator', locales, %_Arguments(1)); }, DONT_ENUM ); @@ -1971,12 +1971,12 @@ $Object.defineProperty($String.prototype, 'localeCompare', { throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR); } - if (this === undefined || this === null) { + if (IS_NULL_OR_UNDEFINED(this)) { throw new $TypeError('Method invoked on undefined or null value.'); } - var locales = arguments[1]; - var options = arguments[2]; + var locales = %_Arguments(1); + var options = %_Arguments(2); var collator = cachedOrNewService('collator', locales, options); return compare(collator, this, that); }, @@ -2003,8 +2003,8 @@ $Object.defineProperty($Number.prototype, 'toLocaleString', { throw new $TypeError('Method invoked on an object that is not Number.'); } - var locales = arguments[0]; - var options = arguments[1]; + var locales = %_Arguments(0); + var options = %_Arguments(1); var numberFormat = cachedOrNewService('numberformat', locales, options); return formatNumber(numberFormat, this); }, @@ -2049,8 +2049,8 @@ $Object.defineProperty($Date.prototype, 'toLocaleString', { throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR); } - var locales = arguments[0]; - var options = arguments[1]; + var locales = %_Arguments(0); + var options = %_Arguments(1); return toLocaleDateTime( this, locales, options, 'any', 'all', 'dateformatall'); }, @@ -2074,8 +2074,8 @@ $Object.defineProperty($Date.prototype, 'toLocaleDateString', { throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR); } - var locales = arguments[0]; - var options = arguments[1]; + var locales = %_Arguments(0); + var options = %_Arguments(1); return toLocaleDateTime( this, locales, options, 'date', 'date', 'dateformatdate'); }, @@ -2099,8 +2099,8 @@ $Object.defineProperty($Date.prototype, 'toLocaleTimeString', { throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR); } - var locales = arguments[0]; - var options = arguments[1]; + var locales = %_Arguments(0); + var options = %_Arguments(1); return toLocaleDateTime( this, locales, options, 'time', 'time', 'dateformattime'); }, diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h index 5a35b207f7..05cc23a71d 100644 --- a/deps/v8/src/ia32/assembler-ia32-inl.h +++ b/deps/v8/src/ia32/assembler-ia32-inl.h @@ -47,6 +47,7 @@ namespace internal { static const byte kCallOpcode = 0xE8; +static const int kNoCodeAgeSequenceLength = 5; // The modes possibly affected by apply must be in kApplyMask. @@ -190,6 +191,13 @@ void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) { } +Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) { + ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); + ASSERT(*pc_ == kCallOpcode); + return Memory::Object_Handle_at(pc_ + 1); +} + + Code* RelocInfo::code_age_stub() { ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); ASSERT(*pc_ == kCallOpcode); @@ -379,7 +387,8 @@ void Assembler::emit(Handle<Object> handle) { void Assembler::emit(uint32_t x, RelocInfo::Mode rmode, TypeFeedbackId id) { if (rmode == RelocInfo::CODE_TARGET && !id.IsNone()) { RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, id.ToInt()); - } else if (!RelocInfo::IsNone(rmode)) { + } else if (!RelocInfo::IsNone(rmode) + && rmode != RelocInfo::CODE_AGE_SEQUENCE) { RecordRelocInfo(rmode); } emit(x); diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc index e5456da474..0557ed8853 100644 --- a/deps/v8/src/ia32/assembler-ia32.cc +++ b/deps/v8/src/ia32/assembler-ia32.cc @@ -53,6 +53,7 @@ bool CpuFeatures::initialized_ = false; #endif uint64_t CpuFeatures::supported_ = 0; uint64_t CpuFeatures::found_by_runtime_probing_only_ = 0; +uint64_t CpuFeatures::cross_compile_ = 0; ExternalReference ExternalReference::cpu_features() { @@ -1131,30 +1132,21 @@ void Assembler::sub(const Operand& dst, Register src) { void Assembler::test(Register reg, const Immediate& imm) { + if (RelocInfo::IsNone(imm.rmode_) && is_uint8(imm.x_)) { + test_b(reg, imm.x_); + return; + } + EnsureSpace ensure_space(this); - // Only use test against byte for registers that have a byte - // variant: eax, ebx, ecx, and edx. - if (RelocInfo::IsNone(imm.rmode_) && - is_uint8(imm.x_) && - reg.is_byte_register()) { - uint8_t imm8 = imm.x_; - if (reg.is(eax)) { - EMIT(0xA8); - EMIT(imm8); - } else { - emit_arith_b(0xF6, 0xC0, reg, imm8); - } + // This is not using emit_arith because test doesn't support + // sign-extension of 8-bit operands. + if (reg.is(eax)) { + EMIT(0xA9); } else { - // This is not using emit_arith because test doesn't support - // sign-extension of 8-bit operands. - if (reg.is(eax)) { - EMIT(0xA9); - } else { - EMIT(0xF7); - EMIT(0xC0 | reg.code()); - } - emit(imm); + EMIT(0xF7); + EMIT(0xC0 | reg.code()); } + emit(imm); } @@ -1178,6 +1170,9 @@ void Assembler::test(const Operand& op, const Immediate& imm) { test(op.reg(), imm); return; } + if (RelocInfo::IsNone(imm.rmode_) && is_uint8(imm.x_)) { + return test_b(op, imm.x_); + } EnsureSpace ensure_space(this); EMIT(0xF7); emit_operand(eax, op); @@ -1185,9 +1180,26 @@ void Assembler::test(const Operand& op, const Immediate& imm) { } +void Assembler::test_b(Register reg, uint8_t imm8) { + EnsureSpace ensure_space(this); + // Only use test against byte for registers that have a byte + // variant: eax, ebx, ecx, and edx. + if (reg.is(eax)) { + EMIT(0xA8); + EMIT(imm8); + } else if (reg.is_byte_register()) { + emit_arith_b(0xF6, 0xC0, reg, imm8); + } else { + EMIT(0xF7); + EMIT(0xC0 | reg.code()); + emit(imm8); + } +} + + void Assembler::test_b(const Operand& op, uint8_t imm8) { - if (op.is_reg_only() && !op.reg().is_byte_register()) { - test(op, Immediate(imm8)); + if (op.is_reg_only()) { + test_b(op.reg(), imm8); return; } EnsureSpace ensure_space(this); @@ -1402,7 +1414,8 @@ void Assembler::call(Handle<Code> code, TypeFeedbackId ast_id) { positions_recorder()->WriteRecordedPositions(); EnsureSpace ensure_space(this); - ASSERT(RelocInfo::IsCodeTarget(rmode)); + ASSERT(RelocInfo::IsCodeTarget(rmode) + || rmode == RelocInfo::CODE_AGE_SEQUENCE); EMIT(0xE8); emit(code, rmode, ast_id); } @@ -2055,6 +2068,7 @@ void Assembler::xorps(XMMRegister dst, XMMRegister src) { void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) { + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF2); EMIT(0x0F); @@ -2064,6 +2078,7 @@ void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) { void Assembler::andpd(XMMRegister dst, XMMRegister src) { + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2073,6 +2088,7 @@ void Assembler::andpd(XMMRegister dst, XMMRegister src) { void Assembler::orpd(XMMRegister dst, XMMRegister src) { + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2235,18 +2251,6 @@ void Assembler::prefetch(const Operand& src, int level) { } -void Assembler::movdbl(XMMRegister dst, const Operand& src) { - EnsureSpace ensure_space(this); - movsd(dst, src); -} - - -void Assembler::movdbl(const Operand& dst, XMMRegister src) { - EnsureSpace ensure_space(this); - movsd(dst, src); -} - - void Assembler::movsd(const Operand& dst, XMMRegister src ) { ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); @@ -2335,11 +2339,19 @@ void Assembler::extractps(Register dst, XMMRegister src, byte imm8) { EMIT(0x0F); EMIT(0x3A); EMIT(0x17); - emit_sse_operand(dst, src); + emit_sse_operand(src, dst); EMIT(imm8); } +void Assembler::andps(XMMRegister dst, XMMRegister src) { + EnsureSpace ensure_space(this); + EMIT(0x0F); + EMIT(0x54); + emit_sse_operand(dst, src); +} + + void Assembler::pand(XMMRegister dst, XMMRegister src) { ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); @@ -2474,6 +2486,11 @@ void Assembler::emit_sse_operand(Register dst, XMMRegister src) { } +void Assembler::emit_sse_operand(XMMRegister dst, Register src) { + EMIT(0xC0 | (dst.code() << 3) | src.code()); +} + + void Assembler::Print() { Disassembler::Decode(isolate(), stdout, buffer_, pc_); } diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h index 55eff93190..f46c6478db 100644 --- a/deps/v8/src/ia32/assembler-ia32.h +++ b/deps/v8/src/ia32/assembler-ia32.h @@ -535,32 +535,54 @@ class CpuFeatures : public AllStatic { // Check whether a feature is supported by the target CPU. static bool IsSupported(CpuFeature f) { ASSERT(initialized_); + if (Check(f, cross_compile_)) return true; if (f == SSE2 && !FLAG_enable_sse2) return false; if (f == SSE3 && !FLAG_enable_sse3) return false; if (f == SSE4_1 && !FLAG_enable_sse4_1) return false; if (f == CMOV && !FLAG_enable_cmov) return false; - return (supported_ & (static_cast<uint64_t>(1) << f)) != 0; + return Check(f, supported_); } static bool IsFoundByRuntimeProbingOnly(CpuFeature f) { ASSERT(initialized_); - return (found_by_runtime_probing_only_ & - (static_cast<uint64_t>(1) << f)) != 0; + return Check(f, found_by_runtime_probing_only_); } static bool IsSafeForSnapshot(CpuFeature f) { - return (IsSupported(f) && + return Check(f, cross_compile_) || + (IsSupported(f) && (!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f))); } + static bool VerifyCrossCompiling() { + return cross_compile_ == 0; + } + + static bool VerifyCrossCompiling(CpuFeature f) { + uint64_t mask = flag2set(f); + return cross_compile_ == 0 || + (cross_compile_ & mask) == mask; + } + private: + static bool Check(CpuFeature f, uint64_t set) { + return (set & flag2set(f)) != 0; + } + + static uint64_t flag2set(CpuFeature f) { + return static_cast<uint64_t>(1) << f; + } + #ifdef DEBUG static bool initialized_; #endif static uint64_t supported_; static uint64_t found_by_runtime_probing_only_; + static uint64_t cross_compile_; + friend class ExternalReference; + friend class PlatformFeatureScope; DISALLOW_COPY_AND_ASSIGN(CpuFeatures); }; @@ -852,7 +874,7 @@ class Assembler : public AssemblerBase { void test(Register reg, const Operand& op); void test_b(Register reg, const Operand& op); void test(const Operand& op, const Immediate& imm); - void test_b(Register reg, uint8_t imm8) { test_b(Operand(reg), imm8); } + void test_b(Register reg, uint8_t imm8); void test_b(const Operand& op, uint8_t imm8); void xor_(Register dst, int32_t imm32); @@ -995,6 +1017,10 @@ class Assembler : public AssemblerBase { void cpuid(); + // SSE instructions + void andps(XMMRegister dst, XMMRegister src); + void xorps(XMMRegister dst, XMMRegister src); + // SSE2 instructions void cvttss2si(Register dst, const Operand& src); void cvttsd2si(Register dst, const Operand& src); @@ -1012,7 +1038,6 @@ class Assembler : public AssemblerBase { void mulsd(XMMRegister dst, const Operand& src); void divsd(XMMRegister dst, XMMRegister src); void xorpd(XMMRegister dst, XMMRegister src); - void xorps(XMMRegister dst, XMMRegister src); void sqrtsd(XMMRegister dst, XMMRegister src); void andpd(XMMRegister dst, XMMRegister src); @@ -1050,15 +1075,14 @@ class Assembler : public AssemblerBase { } } - // Use either movsd or movlpd. - void movdbl(XMMRegister dst, const Operand& src); - void movdbl(const Operand& dst, XMMRegister src); - void movd(XMMRegister dst, Register src) { movd(dst, Operand(src)); } void movd(XMMRegister dst, const Operand& src); void movd(Register dst, XMMRegister src) { movd(Operand(dst), src); } void movd(const Operand& dst, XMMRegister src); void movsd(XMMRegister dst, XMMRegister src); + void movsd(XMMRegister dst, const Operand& src); + void movsd(const Operand& dst, XMMRegister src); + void movss(XMMRegister dst, const Operand& src); void movss(const Operand& dst, XMMRegister src); @@ -1136,16 +1160,14 @@ class Assembler : public AssemblerBase { // Avoid overflows for displacements etc. static const int kMaximalBufferSize = 512*MB; - byte byte_at(int pos) { return buffer_[pos]; } + byte byte_at(int pos) { return buffer_[pos]; } void set_byte_at(int pos, byte value) { buffer_[pos] = value; } protected: - void movsd(XMMRegister dst, const Operand& src); - void movsd(const Operand& dst, XMMRegister src); - void emit_sse_operand(XMMRegister reg, const Operand& adr); void emit_sse_operand(XMMRegister dst, XMMRegister src); void emit_sse_operand(Register dst, XMMRegister src); + void emit_sse_operand(XMMRegister dst, Register src); byte* addr_at(int pos) { return buffer_ + pos; } diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc index a1597481aa..e5e6ec50d1 100644 --- a/deps/v8/src/ia32/builtins-ia32.cc +++ b/deps/v8/src/ia32/builtins-ia32.cc @@ -539,10 +539,12 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) { __ mov(eax, Operand(esp, 8 * kPointerSize)); { FrameScope scope(masm, StackFrame::MANUAL); - __ PrepareCallCFunction(1, ebx); + __ PrepareCallCFunction(2, ebx); + __ mov(Operand(esp, 1 * kPointerSize), + Immediate(ExternalReference::isolate_address(masm->isolate()))); __ mov(Operand(esp, 0), eax); __ CallCFunction( - ExternalReference::get_make_code_young_function(masm->isolate()), 1); + ExternalReference::get_make_code_young_function(masm->isolate()), 2); } __ popad(); __ ret(0); @@ -561,6 +563,44 @@ CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR) #undef DEFINE_CODE_AGE_BUILTIN_GENERATOR +void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) { + // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact + // that make_code_young doesn't do any garbage collection which allows us to + // save/restore the registers without worrying about which of them contain + // pointers. + __ pushad(); + __ mov(eax, Operand(esp, 8 * kPointerSize)); + __ sub(eax, Immediate(Assembler::kCallInstructionLength)); + { // NOLINT + FrameScope scope(masm, StackFrame::MANUAL); + __ PrepareCallCFunction(2, ebx); + __ mov(Operand(esp, 1 * kPointerSize), + Immediate(ExternalReference::isolate_address(masm->isolate()))); + __ mov(Operand(esp, 0), eax); + __ CallCFunction( + ExternalReference::get_mark_code_as_executed_function(masm->isolate()), + 2); + } + __ popad(); + + // Perform prologue operations usually performed by the young code stub. + __ pop(eax); // Pop return address into scratch register. + __ push(ebp); // Caller's frame pointer. + __ mov(ebp, esp); + __ push(esi); // Callee's context. + __ push(edi); // Callee's JS Function. + __ push(eax); // Push return address after frame prologue. + + // Jump to point after the code-age stub. + __ ret(0); +} + + +void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) { + GenerateMakeCodeYoungAgainCommon(masm); +} + + void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { // Enter an internal frame. { @@ -628,25 +668,6 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) { } -void Builtins::Generate_NotifyOSR(MacroAssembler* masm) { - // TODO(kasperl): Do we need to save/restore the XMM registers too? - // TODO(mvstanton): We should save these regs, do this in a future - // checkin. - - // For now, we are relying on the fact that Runtime::NotifyOSR - // doesn't do any garbage collection which allows us to save/restore - // the registers without worrying about which of them contain - // pointers. This seems a bit fragile. - __ pushad(); - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ CallRuntime(Runtime::kNotifyOSR, 0); - } - __ popad(); - __ ret(0); -} - - void Builtins::Generate_FunctionCall(MacroAssembler* masm) { Factory* factory = masm->isolate()->factory(); @@ -1063,13 +1084,11 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { // Lookup the argument in the number to string cache. Label not_cached, argument_is_string; - NumberToStringStub::GenerateLookupNumberStringCache( - masm, - eax, // Input. - ebx, // Result. - ecx, // Scratch 1. - edx, // Scratch 2. - ¬_cached); + __ LookupNumberStringCache(eax, // Input. + ebx, // Result. + ecx, // Scratch 1. + edx, // Scratch 2. + ¬_cached); __ IncrementCounter(counters->string_ctor_cached_number(), 1); __ bind(&argument_is_string); // ----------- S t a t e ------------- @@ -1326,6 +1345,24 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { } +void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) { + // We check the stack limit as indicator that recompilation might be done. + Label ok; + ExternalReference stack_limit = + ExternalReference::address_of_stack_limit(masm->isolate()); + __ cmp(esp, Operand::StaticVariable(stack_limit)); + __ j(above_equal, &ok, Label::kNear); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ CallRuntime(Runtime::kStackGuard, 0); + } + __ jmp(masm->isolate()->builtins()->OnStackReplacement(), + RelocInfo::CODE_TARGET); + + __ bind(&ok); + __ ret(0); +} + #undef __ } } // namespace v8::internal diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc index a83c1ae91d..b6bbe04b33 100644 --- a/deps/v8/src/ia32/code-stubs-ia32.cc +++ b/deps/v8/src/ia32/code-stubs-ia32.cc @@ -64,6 +64,17 @@ void ToNumberStub::InitializeInterfaceDescriptor( } +void NumberToStringStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + static Register registers[] = { eax }; + descriptor->register_param_count_ = 1; + descriptor->register_params_ = registers; + descriptor->deoptimization_handler_ = + Runtime::FunctionForId(Runtime::kNumberToString)->entry; +} + + void FastCloneShallowArrayStub::InitializeInterfaceDescriptor( Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { @@ -82,7 +93,7 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor( descriptor->register_param_count_ = 4; descriptor->register_params_ = registers; descriptor->deoptimization_handler_ = - Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry; + Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry; } @@ -162,7 +173,7 @@ static void InitializeArrayConstructorDescriptor( if (constant_stack_parameter_count != 0) { // stack param count needs (constructor pointer, and single argument) - descriptor->stack_parameter_count_ = &eax; + descriptor->stack_parameter_count_ = eax; } descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count; descriptor->register_params_ = registers; @@ -184,7 +195,7 @@ static void InitializeInternalArrayConstructorDescriptor( if (constant_stack_parameter_count != 0) { // stack param count needs (constructor pointer, and single argument) - descriptor->stack_parameter_count_ = &eax; + descriptor->stack_parameter_count_ = eax; } descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count; descriptor->register_params_ = registers; @@ -283,6 +294,18 @@ void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor( } +void BinaryOpStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + static Register registers[] = { edx, eax }; + descriptor->register_param_count_ = 2; + descriptor->register_params_ = registers; + descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss); + descriptor->SetMissHandler( + ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate)); +} + + #define __ ACCESS_MASM(masm) @@ -432,7 +455,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { __ sub(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters)); for (int i = 0; i < XMMRegister::kNumRegisters; i++) { XMMRegister reg = XMMRegister::from_code(i); - __ movdbl(Operand(esp, i * kDoubleSize), reg); + __ movsd(Operand(esp, i * kDoubleSize), reg); } } const int argument_count = 1; @@ -448,7 +471,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { CpuFeatureScope scope(masm, SSE2); for (int i = 0; i < XMMRegister::kNumRegisters; i++) { XMMRegister reg = XMMRegister::from_code(i); - __ movdbl(reg, Operand(esp, i * kDoubleSize)); + __ movsd(reg, Operand(esp, i * kDoubleSize)); } __ add(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters)); } @@ -470,18 +493,6 @@ class FloatingPointHelper : public AllStatic { // on FPU stack. static void LoadFloatOperand(MacroAssembler* masm, Register number); - // Code pattern for loading floating point values. Input values must - // be either smi or heap number objects (fp values). Requirements: - // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax. - // Returns operands as floating point numbers on FPU stack. - static void LoadFloatOperands(MacroAssembler* masm, - Register scratch, - ArgLocation arg_location = ARGS_ON_STACK); - - // Similar to LoadFloatOperand but assumes that both operands are smis. - // Expects operands in edx, eax. - static void LoadFloatSmis(MacroAssembler* masm, Register scratch); - // Test if operands are smi or number objects (fp). Requirements: // operand_1 in eax, operand_2 in edx; falls through on float // operands, jumps to the non_float label otherwise. @@ -489,32 +500,11 @@ class FloatingPointHelper : public AllStatic { Label* non_float, Register scratch); - // Takes the operands in edx and eax and loads them as integers in eax - // and ecx. - static void LoadUnknownsAsIntegers(MacroAssembler* masm, - bool use_sse3, - BinaryOpIC::TypeInfo left_type, - BinaryOpIC::TypeInfo right_type, - Label* operand_conversion_failure); - // Test if operands are numbers (smi or HeapNumber objects), and load // them into xmm0 and xmm1 if they are. Jump to label not_numbers if // either operand is not a number. Operands are in edx and eax. // Leaves operands unchanged. static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers); - - // Similar to LoadSSE2Operands but assumes that both operands are smis. - // Expects operands in edx, eax. - static void LoadSSE2Smis(MacroAssembler* masm, Register scratch); - - // Checks that |operand| has an int32 value. If |int32_result| is different - // from |scratch|, it will contain that int32 value. - static void CheckSSE2OperandIsInt32(MacroAssembler* masm, - Label* non_int32, - XMMRegister operand, - Register int32_result, - Register scratch, - XMMRegister xmm_scratch); }; @@ -658,1259 +648,6 @@ void DoubleToIStub::Generate(MacroAssembler* masm) { } -void BinaryOpStub::Initialize() { - platform_specific_bit_ = CpuFeatures::IsSupported(SSE3); -} - - -void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { - __ pop(ecx); // Save return address. - __ push(edx); - __ push(eax); - // Left and right arguments are now on top. - __ push(Immediate(Smi::FromInt(MinorKey()))); - - __ push(ecx); // Push return address. - - // Patch the caller to an appropriate specialized stub and return the - // operation result to the caller of the stub. - __ TailCallExternalReference( - ExternalReference(IC_Utility(IC::kBinaryOp_Patch), - masm->isolate()), - 3, - 1); -} - - -// Prepare for a type transition runtime call when the args are already on -// the stack, under the return address. -void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm) { - __ pop(ecx); // Save return address. - // Left and right arguments are already on top of the stack. - __ push(Immediate(Smi::FromInt(MinorKey()))); - - __ push(ecx); // Push return address. - - // Patch the caller to an appropriate specialized stub and return the - // operation result to the caller of the stub. - __ TailCallExternalReference( - ExternalReference(IC_Utility(IC::kBinaryOp_Patch), - masm->isolate()), - 3, - 1); -} - - -static void BinaryOpStub_GenerateRegisterArgsPop(MacroAssembler* masm) { - __ pop(ecx); - __ pop(eax); - __ pop(edx); - __ push(ecx); -} - - -static void BinaryOpStub_GenerateSmiCode( - MacroAssembler* masm, - Label* slow, - BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results, - Token::Value op) { - // 1. Move arguments into edx, eax except for DIV and MOD, which need the - // dividend in eax and edx free for the division. Use eax, ebx for those. - Comment load_comment(masm, "-- Load arguments"); - Register left = edx; - Register right = eax; - if (op == Token::DIV || op == Token::MOD) { - left = eax; - right = ebx; - __ mov(ebx, eax); - __ mov(eax, edx); - } - - - // 2. Prepare the smi check of both operands by oring them together. - Comment smi_check_comment(masm, "-- Smi check arguments"); - Label not_smis; - Register combined = ecx; - ASSERT(!left.is(combined) && !right.is(combined)); - switch (op) { - case Token::BIT_OR: - // Perform the operation into eax and smi check the result. Preserve - // eax in case the result is not a smi. - ASSERT(!left.is(ecx) && !right.is(ecx)); - __ mov(ecx, right); - __ or_(right, left); // Bitwise or is commutative. - combined = right; - break; - - case Token::BIT_XOR: - case Token::BIT_AND: - case Token::ADD: - case Token::SUB: - case Token::MUL: - case Token::DIV: - case Token::MOD: - __ mov(combined, right); - __ or_(combined, left); - break; - - case Token::SHL: - case Token::SAR: - case Token::SHR: - // Move the right operand into ecx for the shift operation, use eax - // for the smi check register. - ASSERT(!left.is(ecx) && !right.is(ecx)); - __ mov(ecx, right); - __ or_(right, left); - combined = right; - break; - - default: - break; - } - - // 3. Perform the smi check of the operands. - STATIC_ASSERT(kSmiTag == 0); // Adjust zero check if not the case. - __ JumpIfNotSmi(combined, ¬_smis); - - // 4. Operands are both smis, perform the operation leaving the result in - // eax and check the result if necessary. - Comment perform_smi(masm, "-- Perform smi operation"); - Label use_fp_on_smis; - switch (op) { - case Token::BIT_OR: - // Nothing to do. - break; - - case Token::BIT_XOR: - ASSERT(right.is(eax)); - __ xor_(right, left); // Bitwise xor is commutative. - break; - - case Token::BIT_AND: - ASSERT(right.is(eax)); - __ and_(right, left); // Bitwise and is commutative. - break; - - case Token::SHL: - // Remove tags from operands (but keep sign). - __ SmiUntag(left); - __ SmiUntag(ecx); - // Perform the operation. - __ shl_cl(left); - // Check that the *signed* result fits in a smi. - __ cmp(left, 0xc0000000); - __ j(sign, &use_fp_on_smis); - // Tag the result and store it in register eax. - __ SmiTag(left); - __ mov(eax, left); - break; - - case Token::SAR: - // Remove tags from operands (but keep sign). - __ SmiUntag(left); - __ SmiUntag(ecx); - // Perform the operation. - __ sar_cl(left); - // Tag the result and store it in register eax. - __ SmiTag(left); - __ mov(eax, left); - break; - - case Token::SHR: - // Remove tags from operands (but keep sign). - __ SmiUntag(left); - __ SmiUntag(ecx); - // Perform the operation. - __ shr_cl(left); - // Check that the *unsigned* result fits in a smi. - // Neither of the two high-order bits can be set: - // - 0x80000000: high bit would be lost when smi tagging. - // - 0x40000000: this number would convert to negative when - // Smi tagging these two cases can only happen with shifts - // by 0 or 1 when handed a valid smi. - __ test(left, Immediate(0xc0000000)); - __ j(not_zero, &use_fp_on_smis); - // Tag the result and store it in register eax. - __ SmiTag(left); - __ mov(eax, left); - break; - - case Token::ADD: - ASSERT(right.is(eax)); - __ add(right, left); // Addition is commutative. - __ j(overflow, &use_fp_on_smis); - break; - - case Token::SUB: - __ sub(left, right); - __ j(overflow, &use_fp_on_smis); - __ mov(eax, left); - break; - - case Token::MUL: - // If the smi tag is 0 we can just leave the tag on one operand. - STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case. - // We can't revert the multiplication if the result is not a smi - // so save the right operand. - __ mov(ebx, right); - // Remove tag from one of the operands (but keep sign). - __ SmiUntag(right); - // Do multiplication. - __ imul(right, left); // Multiplication is commutative. - __ j(overflow, &use_fp_on_smis); - // Check for negative zero result. Use combined = left | right. - __ NegativeZeroTest(right, combined, &use_fp_on_smis); - break; - - case Token::DIV: - // We can't revert the division if the result is not a smi so - // save the left operand. - __ mov(edi, left); - // Check for 0 divisor. - __ test(right, right); - __ j(zero, &use_fp_on_smis); - // Sign extend left into edx:eax. - ASSERT(left.is(eax)); - __ cdq(); - // Divide edx:eax by right. - __ idiv(right); - // Check for the corner case of dividing the most negative smi by - // -1. We cannot use the overflow flag, since it is not set by idiv - // instruction. - STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1); - __ cmp(eax, 0x40000000); - __ j(equal, &use_fp_on_smis); - // Check for negative zero result. Use combined = left | right. - __ NegativeZeroTest(eax, combined, &use_fp_on_smis); - // Check that the remainder is zero. - __ test(edx, edx); - __ j(not_zero, &use_fp_on_smis); - // Tag the result and store it in register eax. - __ SmiTag(eax); - break; - - case Token::MOD: - // Check for 0 divisor. - __ test(right, right); - __ j(zero, ¬_smis); - - // Sign extend left into edx:eax. - ASSERT(left.is(eax)); - __ cdq(); - // Divide edx:eax by right. - __ idiv(right); - // Check for negative zero result. Use combined = left | right. - __ NegativeZeroTest(edx, combined, slow); - // Move remainder to register eax. - __ mov(eax, edx); - break; - - default: - UNREACHABLE(); - } - - // 5. Emit return of result in eax. Some operations have registers pushed. - switch (op) { - case Token::ADD: - case Token::SUB: - case Token::MUL: - case Token::DIV: - __ ret(0); - break; - case Token::MOD: - case Token::BIT_OR: - case Token::BIT_AND: - case Token::BIT_XOR: - case Token::SAR: - case Token::SHL: - case Token::SHR: - __ ret(2 * kPointerSize); - break; - default: - UNREACHABLE(); - } - - // 6. For some operations emit inline code to perform floating point - // operations on known smis (e.g., if the result of the operation - // overflowed the smi range). - if (allow_heapnumber_results == BinaryOpStub::NO_HEAPNUMBER_RESULTS) { - __ bind(&use_fp_on_smis); - switch (op) { - // Undo the effects of some operations, and some register moves. - case Token::SHL: - // The arguments are saved on the stack, and only used from there. - break; - case Token::ADD: - // Revert right = right + left. - __ sub(right, left); - break; - case Token::SUB: - // Revert left = left - right. - __ add(left, right); - break; - case Token::MUL: - // Right was clobbered but a copy is in ebx. - __ mov(right, ebx); - break; - case Token::DIV: - // Left was clobbered but a copy is in edi. Right is in ebx for - // division. They should be in eax, ebx for jump to not_smi. - __ mov(eax, edi); - break; - default: - // No other operators jump to use_fp_on_smis. - break; - } - __ jmp(¬_smis); - } else { - ASSERT(allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS); - switch (op) { - case Token::SHL: - case Token::SHR: { - Comment perform_float(masm, "-- Perform float operation on smis"); - __ bind(&use_fp_on_smis); - // Result we want is in left == edx, so we can put the allocated heap - // number in eax. - __ AllocateHeapNumber(eax, ecx, ebx, slow); - // Store the result in the HeapNumber and return. - // It's OK to overwrite the arguments on the stack because we - // are about to return. - if (op == Token::SHR) { - __ mov(Operand(esp, 1 * kPointerSize), left); - __ mov(Operand(esp, 2 * kPointerSize), Immediate(0)); - __ fild_d(Operand(esp, 1 * kPointerSize)); - __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); - } else { - ASSERT_EQ(Token::SHL, op); - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatureScope use_sse2(masm, SSE2); - __ cvtsi2sd(xmm0, left); - __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); - } else { - __ mov(Operand(esp, 1 * kPointerSize), left); - __ fild_s(Operand(esp, 1 * kPointerSize)); - __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); - } - } - __ ret(2 * kPointerSize); - break; - } - - case Token::ADD: - case Token::SUB: - case Token::MUL: - case Token::DIV: { - Comment perform_float(masm, "-- Perform float operation on smis"); - __ bind(&use_fp_on_smis); - // Restore arguments to edx, eax. - switch (op) { - case Token::ADD: - // Revert right = right + left. - __ sub(right, left); - break; - case Token::SUB: - // Revert left = left - right. - __ add(left, right); - break; - case Token::MUL: - // Right was clobbered but a copy is in ebx. - __ mov(right, ebx); - break; - case Token::DIV: - // Left was clobbered but a copy is in edi. Right is in ebx for - // division. - __ mov(edx, edi); - __ mov(eax, right); - break; - default: UNREACHABLE(); - break; - } - __ AllocateHeapNumber(ecx, ebx, no_reg, slow); - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatureScope use_sse2(masm, SSE2); - FloatingPointHelper::LoadSSE2Smis(masm, ebx); - switch (op) { - case Token::ADD: __ addsd(xmm0, xmm1); break; - case Token::SUB: __ subsd(xmm0, xmm1); break; - case Token::MUL: __ mulsd(xmm0, xmm1); break; - case Token::DIV: __ divsd(xmm0, xmm1); break; - default: UNREACHABLE(); - } - __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0); - } else { // SSE2 not available, use FPU. - FloatingPointHelper::LoadFloatSmis(masm, ebx); - switch (op) { - case Token::ADD: __ faddp(1); break; - case Token::SUB: __ fsubp(1); break; - case Token::MUL: __ fmulp(1); break; - case Token::DIV: __ fdivp(1); break; - default: UNREACHABLE(); - } - __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset)); - } - __ mov(eax, ecx); - __ ret(0); - break; - } - - default: - break; - } - } - - // 7. Non-smi operands, fall out to the non-smi code with the operands in - // edx and eax. - Comment done_comment(masm, "-- Enter non-smi code"); - __ bind(¬_smis); - switch (op) { - case Token::BIT_OR: - case Token::SHL: - case Token::SAR: - case Token::SHR: - // Right operand is saved in ecx and eax was destroyed by the smi - // check. - __ mov(eax, ecx); - break; - - case Token::DIV: - case Token::MOD: - // Operands are in eax, ebx at this point. - __ mov(edx, eax); - __ mov(eax, ebx); - break; - - default: - break; - } -} - - -void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { - Label right_arg_changed, call_runtime; - - switch (op_) { - case Token::ADD: - case Token::SUB: - case Token::MUL: - case Token::DIV: - break; - case Token::MOD: - case Token::BIT_OR: - case Token::BIT_AND: - case Token::BIT_XOR: - case Token::SAR: - case Token::SHL: - case Token::SHR: - GenerateRegisterArgsPush(masm); - break; - default: - UNREACHABLE(); - } - - if (op_ == Token::MOD && encoded_right_arg_.has_value) { - // It is guaranteed that the value will fit into a Smi, because if it - // didn't, we wouldn't be here, see BinaryOp_Patch. - __ cmp(eax, Immediate(Smi::FromInt(fixed_right_arg_value()))); - __ j(not_equal, &right_arg_changed); - } - - if (result_type_ == BinaryOpIC::UNINITIALIZED || - result_type_ == BinaryOpIC::SMI) { - BinaryOpStub_GenerateSmiCode( - masm, &call_runtime, NO_HEAPNUMBER_RESULTS, op_); - } else { - BinaryOpStub_GenerateSmiCode( - masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_); - } - - // Code falls through if the result is not returned as either a smi or heap - // number. - __ bind(&right_arg_changed); - switch (op_) { - case Token::ADD: - case Token::SUB: - case Token::MUL: - case Token::DIV: - GenerateTypeTransition(masm); - break; - case Token::MOD: - case Token::BIT_OR: - case Token::BIT_AND: - case Token::BIT_XOR: - case Token::SAR: - case Token::SHL: - case Token::SHR: - GenerateTypeTransitionWithSavedArgs(masm); - break; - default: - UNREACHABLE(); - } - - __ bind(&call_runtime); - switch (op_) { - case Token::ADD: - case Token::SUB: - case Token::MUL: - case Token::DIV: - break; - case Token::MOD: - case Token::BIT_OR: - case Token::BIT_AND: - case Token::BIT_XOR: - case Token::SAR: - case Token::SHL: - case Token::SHR: - BinaryOpStub_GenerateRegisterArgsPop(masm); - break; - default: - UNREACHABLE(); - } - - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ push(edx); - __ push(eax); - GenerateCallRuntime(masm); - } - __ ret(0); -} - - -void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { - Label call_runtime; - ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING); - ASSERT(op_ == Token::ADD); - // If both arguments are strings, call the string add stub. - // Otherwise, do a transition. - - // Registers containing left and right operands respectively. - Register left = edx; - Register right = eax; - - // Test if left operand is a string. - __ JumpIfSmi(left, &call_runtime, Label::kNear); - __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx); - __ j(above_equal, &call_runtime, Label::kNear); - - // Test if right operand is a string. - __ JumpIfSmi(right, &call_runtime, Label::kNear); - __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx); - __ j(above_equal, &call_runtime, Label::kNear); - - StringAddStub string_add_stub( - (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME)); - GenerateRegisterArgsPush(masm); - __ TailCallStub(&string_add_stub); - - __ bind(&call_runtime); - GenerateTypeTransition(masm); -} - - -static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, - Label* alloc_failure, - OverwriteMode mode); - - -// Input: -// edx: left operand (tagged) -// eax: right operand (tagged) -// Output: -// eax: result (tagged) -void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { - Label call_runtime; - ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32); - - // Floating point case. - switch (op_) { - case Token::ADD: - case Token::SUB: - case Token::MUL: - case Token::DIV: - case Token::MOD: { - Label not_floats, not_int32, right_arg_changed; - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatureScope use_sse2(masm, SSE2); - // It could be that only SMIs have been seen at either the left - // or the right operand. For precise type feedback, patch the IC - // again if this changes. - // In theory, we would need the same check in the non-SSE2 case, - // but since we don't support Crankshaft on such hardware we can - // afford not to care about precise type feedback. - if (left_type_ == BinaryOpIC::SMI) { - __ JumpIfNotSmi(edx, ¬_int32); - } - if (right_type_ == BinaryOpIC::SMI) { - __ JumpIfNotSmi(eax, ¬_int32); - } - FloatingPointHelper::LoadSSE2Operands(masm, ¬_floats); - FloatingPointHelper::CheckSSE2OperandIsInt32( - masm, ¬_int32, xmm0, ebx, ecx, xmm2); - FloatingPointHelper::CheckSSE2OperandIsInt32( - masm, ¬_int32, xmm1, edi, ecx, xmm2); - if (op_ == Token::MOD) { - if (encoded_right_arg_.has_value) { - __ cmp(edi, Immediate(fixed_right_arg_value())); - __ j(not_equal, &right_arg_changed); - } - GenerateRegisterArgsPush(masm); - __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); - } else { - switch (op_) { - case Token::ADD: __ addsd(xmm0, xmm1); break; - case Token::SUB: __ subsd(xmm0, xmm1); break; - case Token::MUL: __ mulsd(xmm0, xmm1); break; - case Token::DIV: __ divsd(xmm0, xmm1); break; - default: UNREACHABLE(); - } - // Check result type if it is currently Int32. - if (result_type_ <= BinaryOpIC::INT32) { - FloatingPointHelper::CheckSSE2OperandIsInt32( - masm, ¬_int32, xmm0, ecx, ecx, xmm2); - } - BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_); - __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); - __ ret(0); - } - } else { // SSE2 not available, use FPU. - FloatingPointHelper::CheckFloatOperands(masm, ¬_floats, ebx); - FloatingPointHelper::LoadFloatOperands( - masm, - ecx, - FloatingPointHelper::ARGS_IN_REGISTERS); - if (op_ == Token::MOD) { - // The operands are now on the FPU stack, but we don't need them. - __ fstp(0); - __ fstp(0); - GenerateRegisterArgsPush(masm); - __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); - } else { - switch (op_) { - case Token::ADD: __ faddp(1); break; - case Token::SUB: __ fsubp(1); break; - case Token::MUL: __ fmulp(1); break; - case Token::DIV: __ fdivp(1); break; - default: UNREACHABLE(); - } - Label after_alloc_failure; - BinaryOpStub_GenerateHeapResultAllocation( - masm, &after_alloc_failure, mode_); - __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); - __ ret(0); - __ bind(&after_alloc_failure); - __ fstp(0); // Pop FPU stack before calling runtime. - __ jmp(&call_runtime); - } - } - - __ bind(¬_floats); - __ bind(¬_int32); - __ bind(&right_arg_changed); - GenerateTypeTransition(masm); - break; - } - - case Token::BIT_OR: - case Token::BIT_AND: - case Token::BIT_XOR: - case Token::SAR: - case Token::SHL: - case Token::SHR: { - GenerateRegisterArgsPush(masm); - Label not_floats; - Label not_int32; - Label non_smi_result; - bool use_sse3 = platform_specific_bit_; - FloatingPointHelper::LoadUnknownsAsIntegers( - masm, use_sse3, left_type_, right_type_, ¬_floats); - switch (op_) { - case Token::BIT_OR: __ or_(eax, ecx); break; - case Token::BIT_AND: __ and_(eax, ecx); break; - case Token::BIT_XOR: __ xor_(eax, ecx); break; - case Token::SAR: __ sar_cl(eax); break; - case Token::SHL: __ shl_cl(eax); break; - case Token::SHR: __ shr_cl(eax); break; - default: UNREACHABLE(); - } - if (op_ == Token::SHR) { - // Check if result is non-negative and fits in a smi. - __ test(eax, Immediate(0xc0000000)); - __ j(not_zero, &call_runtime); - } else { - // Check if result fits in a smi. - __ cmp(eax, 0xc0000000); - __ j(negative, &non_smi_result, Label::kNear); - } - // Tag smi result and return. - __ SmiTag(eax); - __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack. - - // All ops except SHR return a signed int32 that we load in - // a HeapNumber. - if (op_ != Token::SHR) { - __ bind(&non_smi_result); - // Allocate a heap number if needed. - __ mov(ebx, eax); // ebx: result - Label skip_allocation; - switch (mode_) { - case OVERWRITE_LEFT: - case OVERWRITE_RIGHT: - // If the operand was an object, we skip the - // allocation of a heap number. - __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ? - 1 * kPointerSize : 2 * kPointerSize)); - __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear); - // Fall through! - case NO_OVERWRITE: - __ AllocateHeapNumber(eax, ecx, edx, &call_runtime); - __ bind(&skip_allocation); - break; - default: UNREACHABLE(); - } - // Store the result in the HeapNumber and return. - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatureScope use_sse2(masm, SSE2); - __ cvtsi2sd(xmm0, ebx); - __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); - } else { - __ mov(Operand(esp, 1 * kPointerSize), ebx); - __ fild_s(Operand(esp, 1 * kPointerSize)); - __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); - } - __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack. - } - - __ bind(¬_floats); - __ bind(¬_int32); - GenerateTypeTransitionWithSavedArgs(masm); - break; - } - default: UNREACHABLE(); break; - } - - // If an allocation fails, or SHR hits a hard case, use the runtime system to - // get the correct result. - __ bind(&call_runtime); - - switch (op_) { - case Token::ADD: - case Token::SUB: - case Token::MUL: - case Token::DIV: - break; - case Token::MOD: - return; // Handled above. - case Token::BIT_OR: - case Token::BIT_AND: - case Token::BIT_XOR: - case Token::SAR: - case Token::SHL: - case Token::SHR: - BinaryOpStub_GenerateRegisterArgsPop(masm); - break; - default: - UNREACHABLE(); - } - - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ push(edx); - __ push(eax); - GenerateCallRuntime(masm); - } - __ ret(0); -} - - -void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { - if (op_ == Token::ADD) { - // Handle string addition here, because it is the only operation - // that does not do a ToNumber conversion on the operands. - GenerateAddStrings(masm); - } - - Factory* factory = masm->isolate()->factory(); - - // Convert odd ball arguments to numbers. - Label check, done; - __ cmp(edx, factory->undefined_value()); - __ j(not_equal, &check, Label::kNear); - if (Token::IsBitOp(op_)) { - __ xor_(edx, edx); - } else { - __ mov(edx, Immediate(factory->nan_value())); - } - __ jmp(&done, Label::kNear); - __ bind(&check); - __ cmp(eax, factory->undefined_value()); - __ j(not_equal, &done, Label::kNear); - if (Token::IsBitOp(op_)) { - __ xor_(eax, eax); - } else { - __ mov(eax, Immediate(factory->nan_value())); - } - __ bind(&done); - - GenerateNumberStub(masm); -} - - -void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) { - Label call_runtime; - - // Floating point case. - switch (op_) { - case Token::ADD: - case Token::SUB: - case Token::MUL: - case Token::DIV: { - Label not_floats; - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatureScope use_sse2(masm, SSE2); - - // It could be that only SMIs have been seen at either the left - // or the right operand. For precise type feedback, patch the IC - // again if this changes. - // In theory, we would need the same check in the non-SSE2 case, - // but since we don't support Crankshaft on such hardware we can - // afford not to care about precise type feedback. - if (left_type_ == BinaryOpIC::SMI) { - __ JumpIfNotSmi(edx, ¬_floats); - } - if (right_type_ == BinaryOpIC::SMI) { - __ JumpIfNotSmi(eax, ¬_floats); - } - FloatingPointHelper::LoadSSE2Operands(masm, ¬_floats); - if (left_type_ == BinaryOpIC::INT32) { - FloatingPointHelper::CheckSSE2OperandIsInt32( - masm, ¬_floats, xmm0, ecx, ecx, xmm2); - } - if (right_type_ == BinaryOpIC::INT32) { - FloatingPointHelper::CheckSSE2OperandIsInt32( - masm, ¬_floats, xmm1, ecx, ecx, xmm2); - } - - switch (op_) { - case Token::ADD: __ addsd(xmm0, xmm1); break; - case Token::SUB: __ subsd(xmm0, xmm1); break; - case Token::MUL: __ mulsd(xmm0, xmm1); break; - case Token::DIV: __ divsd(xmm0, xmm1); break; - default: UNREACHABLE(); - } - BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_); - __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); - __ ret(0); - } else { // SSE2 not available, use FPU. - FloatingPointHelper::CheckFloatOperands(masm, ¬_floats, ebx); - FloatingPointHelper::LoadFloatOperands( - masm, - ecx, - FloatingPointHelper::ARGS_IN_REGISTERS); - switch (op_) { - case Token::ADD: __ faddp(1); break; - case Token::SUB: __ fsubp(1); break; - case Token::MUL: __ fmulp(1); break; - case Token::DIV: __ fdivp(1); break; - default: UNREACHABLE(); - } - Label after_alloc_failure; - BinaryOpStub_GenerateHeapResultAllocation( - masm, &after_alloc_failure, mode_); - __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); - __ ret(0); - __ bind(&after_alloc_failure); - __ fstp(0); // Pop FPU stack before calling runtime. - __ jmp(&call_runtime); - } - - __ bind(¬_floats); - GenerateTypeTransition(masm); - break; - } - - case Token::MOD: { - // For MOD we go directly to runtime in the non-smi case. - break; - } - case Token::BIT_OR: - case Token::BIT_AND: - case Token::BIT_XOR: - case Token::SAR: - case Token::SHL: - case Token::SHR: { - GenerateRegisterArgsPush(masm); - Label not_floats; - Label non_smi_result; - // We do not check the input arguments here, as any value is - // unconditionally truncated to an int32 anyway. To get the - // right optimized code, int32 type feedback is just right. - bool use_sse3 = platform_specific_bit_; - FloatingPointHelper::LoadUnknownsAsIntegers( - masm, use_sse3, left_type_, right_type_, ¬_floats); - switch (op_) { - case Token::BIT_OR: __ or_(eax, ecx); break; - case Token::BIT_AND: __ and_(eax, ecx); break; - case Token::BIT_XOR: __ xor_(eax, ecx); break; - case Token::SAR: __ sar_cl(eax); break; - case Token::SHL: __ shl_cl(eax); break; - case Token::SHR: __ shr_cl(eax); break; - default: UNREACHABLE(); - } - if (op_ == Token::SHR) { - // Check if result is non-negative and fits in a smi. - __ test(eax, Immediate(0xc0000000)); - __ j(not_zero, &call_runtime); - } else { - // Check if result fits in a smi. - __ cmp(eax, 0xc0000000); - __ j(negative, &non_smi_result, Label::kNear); - } - // Tag smi result and return. - __ SmiTag(eax); - __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack. - - // All ops except SHR return a signed int32 that we load in - // a HeapNumber. - if (op_ != Token::SHR) { - __ bind(&non_smi_result); - // Allocate a heap number if needed. - __ mov(ebx, eax); // ebx: result - Label skip_allocation; - switch (mode_) { - case OVERWRITE_LEFT: - case OVERWRITE_RIGHT: - // If the operand was an object, we skip the - // allocation of a heap number. - __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ? - 1 * kPointerSize : 2 * kPointerSize)); - __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear); - // Fall through! - case NO_OVERWRITE: - __ AllocateHeapNumber(eax, ecx, edx, &call_runtime); - __ bind(&skip_allocation); - break; - default: UNREACHABLE(); - } - // Store the result in the HeapNumber and return. - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatureScope use_sse2(masm, SSE2); - __ cvtsi2sd(xmm0, ebx); - __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); - } else { - __ mov(Operand(esp, 1 * kPointerSize), ebx); - __ fild_s(Operand(esp, 1 * kPointerSize)); - __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); - } - __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack. - } - - __ bind(¬_floats); - GenerateTypeTransitionWithSavedArgs(masm); - break; - } - default: UNREACHABLE(); break; - } - - // If an allocation fails, or SHR or MOD hit a hard case, - // use the runtime system to get the correct result. - __ bind(&call_runtime); - - switch (op_) { - case Token::ADD: - case Token::SUB: - case Token::MUL: - case Token::DIV: - case Token::MOD: - break; - case Token::BIT_OR: - case Token::BIT_AND: - case Token::BIT_XOR: - case Token::SAR: - case Token::SHL: - case Token::SHR: - BinaryOpStub_GenerateRegisterArgsPop(masm); - break; - default: - UNREACHABLE(); - } - - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ push(edx); - __ push(eax); - GenerateCallRuntime(masm); - } - __ ret(0); -} - - -void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { - Label call_runtime; - - Counters* counters = masm->isolate()->counters(); - __ IncrementCounter(counters->generic_binary_stub_calls(), 1); - - switch (op_) { - case Token::ADD: - case Token::SUB: - case Token::MUL: - case Token::DIV: - break; - case Token::MOD: - case Token::BIT_OR: - case Token::BIT_AND: - case Token::BIT_XOR: - case Token::SAR: - case Token::SHL: - case Token::SHR: - GenerateRegisterArgsPush(masm); - break; - default: - UNREACHABLE(); - } - - BinaryOpStub_GenerateSmiCode( - masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_); - - // Floating point case. - switch (op_) { - case Token::ADD: - case Token::SUB: - case Token::MUL: - case Token::DIV: { - Label not_floats; - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatureScope use_sse2(masm, SSE2); - FloatingPointHelper::LoadSSE2Operands(masm, ¬_floats); - - switch (op_) { - case Token::ADD: __ addsd(xmm0, xmm1); break; - case Token::SUB: __ subsd(xmm0, xmm1); break; - case Token::MUL: __ mulsd(xmm0, xmm1); break; - case Token::DIV: __ divsd(xmm0, xmm1); break; - default: UNREACHABLE(); - } - BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_); - __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); - __ ret(0); - } else { // SSE2 not available, use FPU. - FloatingPointHelper::CheckFloatOperands(masm, ¬_floats, ebx); - FloatingPointHelper::LoadFloatOperands( - masm, - ecx, - FloatingPointHelper::ARGS_IN_REGISTERS); - switch (op_) { - case Token::ADD: __ faddp(1); break; - case Token::SUB: __ fsubp(1); break; - case Token::MUL: __ fmulp(1); break; - case Token::DIV: __ fdivp(1); break; - default: UNREACHABLE(); - } - Label after_alloc_failure; - BinaryOpStub_GenerateHeapResultAllocation( - masm, &after_alloc_failure, mode_); - __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); - __ ret(0); - __ bind(&after_alloc_failure); - __ fstp(0); // Pop FPU stack before calling runtime. - __ jmp(&call_runtime); - } - __ bind(¬_floats); - break; - } - case Token::MOD: { - // For MOD we go directly to runtime in the non-smi case. - break; - } - case Token::BIT_OR: - case Token::BIT_AND: - case Token::BIT_XOR: - case Token::SAR: - case Token::SHL: - case Token::SHR: { - Label non_smi_result; - bool use_sse3 = platform_specific_bit_; - FloatingPointHelper::LoadUnknownsAsIntegers(masm, - use_sse3, - BinaryOpIC::GENERIC, - BinaryOpIC::GENERIC, - &call_runtime); - switch (op_) { - case Token::BIT_OR: __ or_(eax, ecx); break; - case Token::BIT_AND: __ and_(eax, ecx); break; - case Token::BIT_XOR: __ xor_(eax, ecx); break; - case Token::SAR: __ sar_cl(eax); break; - case Token::SHL: __ shl_cl(eax); break; - case Token::SHR: __ shr_cl(eax); break; - default: UNREACHABLE(); - } - if (op_ == Token::SHR) { - // Check if result is non-negative and fits in a smi. - __ test(eax, Immediate(0xc0000000)); - __ j(not_zero, &call_runtime); - } else { - // Check if result fits in a smi. - __ cmp(eax, 0xc0000000); - __ j(negative, &non_smi_result, Label::kNear); - } - // Tag smi result and return. - __ SmiTag(eax); - __ ret(2 * kPointerSize); // Drop the arguments from the stack. - - // All ops except SHR return a signed int32 that we load in - // a HeapNumber. - if (op_ != Token::SHR) { - __ bind(&non_smi_result); - // Allocate a heap number if needed. - __ mov(ebx, eax); // ebx: result - Label skip_allocation; - switch (mode_) { - case OVERWRITE_LEFT: - case OVERWRITE_RIGHT: - // If the operand was an object, we skip the - // allocation of a heap number. - __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ? - 1 * kPointerSize : 2 * kPointerSize)); - __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear); - // Fall through! - case NO_OVERWRITE: - __ AllocateHeapNumber(eax, ecx, edx, &call_runtime); - __ bind(&skip_allocation); - break; - default: UNREACHABLE(); - } - // Store the result in the HeapNumber and return. - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatureScope use_sse2(masm, SSE2); - __ cvtsi2sd(xmm0, ebx); - __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); - } else { - __ mov(Operand(esp, 1 * kPointerSize), ebx); - __ fild_s(Operand(esp, 1 * kPointerSize)); - __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); - } - __ ret(2 * kPointerSize); - } - break; - } - default: UNREACHABLE(); break; - } - - // If all else fails, use the runtime system to get the correct - // result. - __ bind(&call_runtime); - switch (op_) { - case Token::ADD: - GenerateAddStrings(masm); - // Fall through. - case Token::SUB: - case Token::MUL: - case Token::DIV: - break; - case Token::MOD: - case Token::BIT_OR: - case Token::BIT_AND: - case Token::BIT_XOR: - case Token::SAR: - case Token::SHL: - case Token::SHR: - BinaryOpStub_GenerateRegisterArgsPop(masm); - break; - default: - UNREACHABLE(); - } - - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ push(edx); - __ push(eax); - GenerateCallRuntime(masm); - } - __ ret(0); -} - - -void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { - ASSERT(op_ == Token::ADD); - Label left_not_string, call_runtime; - - // Registers containing left and right operands respectively. - Register left = edx; - Register right = eax; - - // Test if left operand is a string. - __ JumpIfSmi(left, &left_not_string, Label::kNear); - __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx); - __ j(above_equal, &left_not_string, Label::kNear); - - StringAddStub string_add_left_stub( - (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME)); - GenerateRegisterArgsPush(masm); - __ TailCallStub(&string_add_left_stub); - - // Left operand is not a string, test right. - __ bind(&left_not_string); - __ JumpIfSmi(right, &call_runtime, Label::kNear); - __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx); - __ j(above_equal, &call_runtime, Label::kNear); - - StringAddStub string_add_right_stub( - (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME)); - GenerateRegisterArgsPush(masm); - __ TailCallStub(&string_add_right_stub); - - // Neither argument is a string. - __ bind(&call_runtime); -} - - -static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, - Label* alloc_failure, - OverwriteMode mode) { - Label skip_allocation; - switch (mode) { - case OVERWRITE_LEFT: { - // If the argument in edx is already an object, we skip the - // allocation of a heap number. - __ JumpIfNotSmi(edx, &skip_allocation, Label::kNear); - // Allocate a heap number for the result. Keep eax and edx intact - // for the possible runtime call. - __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure); - // Now edx can be overwritten losing one of the arguments as we are - // now done and will not need it any more. - __ mov(edx, ebx); - __ bind(&skip_allocation); - // Use object in edx as a result holder - __ mov(eax, edx); - break; - } - case OVERWRITE_RIGHT: - // If the argument in eax is already an object, we skip the - // allocation of a heap number. - __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear); - // Fall through! - case NO_OVERWRITE: - // Allocate a heap number for the result. Keep eax and edx intact - // for the possible runtime call. - __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure); - // Now eax can be overwritten losing one of the arguments as we are - // now done and will not need it any more. - __ mov(eax, ebx); - __ bind(&skip_allocation); - break; - default: UNREACHABLE(); - } -} - - -void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { - __ pop(ecx); - __ push(edx); - __ push(eax); - __ push(ecx); -} - - void TranscendentalCacheStub::Generate(MacroAssembler* masm) { // TAGGED case: // Input: @@ -2034,7 +771,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { __ ret(kPointerSize); } else { // UNTAGGED. CpuFeatureScope scope(masm, SSE2); - __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); + __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); __ Ret(); } @@ -2049,7 +786,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { CpuFeatureScope scope(masm, SSE2); __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache); __ sub(esp, Immediate(kDoubleSize)); - __ movdbl(Operand(esp, 0), xmm1); + __ movsd(Operand(esp, 0), xmm1); __ fld_d(Operand(esp, 0)); __ add(esp, Immediate(kDoubleSize)); } @@ -2062,17 +799,17 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { __ ret(kPointerSize); } else { // UNTAGGED. CpuFeatureScope scope(masm, SSE2); - __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); + __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); __ Ret(); // Skip cache and return answer directly, only in untagged case. __ bind(&skip_cache); __ sub(esp, Immediate(kDoubleSize)); - __ movdbl(Operand(esp, 0), xmm1); + __ movsd(Operand(esp, 0), xmm1); __ fld_d(Operand(esp, 0)); GenerateOperation(masm, type_); __ fstp_d(Operand(esp, 0)); - __ movdbl(xmm1, Operand(esp, 0)); + __ movsd(xmm1, Operand(esp, 0)); __ add(esp, Immediate(kDoubleSize)); // We return the value in xmm1 without adding it to the cache, but // we cause a scavenging GC so that future allocations will succeed. @@ -2098,13 +835,13 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { __ bind(&runtime_call_clear_stack); __ bind(&runtime_call); __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache); - __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm1); + __ movsd(FieldOperand(eax, HeapNumber::kValueOffset), xmm1); { FrameScope scope(masm, StackFrame::INTERNAL); __ push(eax); __ CallRuntime(RuntimeFunction(), 1); } - __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); + __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); __ Ret(); } } @@ -2221,79 +958,6 @@ void TranscendentalCacheStub::GenerateOperation( } -// Input: edx, eax are the left and right objects of a bit op. -// Output: eax, ecx are left and right integers for a bit op. -// Warning: can clobber inputs even when it jumps to |conversion_failure|! -void FloatingPointHelper::LoadUnknownsAsIntegers( - MacroAssembler* masm, - bool use_sse3, - BinaryOpIC::TypeInfo left_type, - BinaryOpIC::TypeInfo right_type, - Label* conversion_failure) { - // Check float operands. - Label arg1_is_object, check_undefined_arg1; - Label arg2_is_object, check_undefined_arg2; - Label load_arg2, done; - - // Test if arg1 is a Smi. - if (left_type == BinaryOpIC::SMI) { - __ JumpIfNotSmi(edx, conversion_failure); - } else { - __ JumpIfNotSmi(edx, &arg1_is_object, Label::kNear); - } - - __ SmiUntag(edx); - __ jmp(&load_arg2); - - // If the argument is undefined it converts to zero (ECMA-262, section 9.5). - __ bind(&check_undefined_arg1); - Factory* factory = masm->isolate()->factory(); - __ cmp(edx, factory->undefined_value()); - __ j(not_equal, conversion_failure); - __ mov(edx, Immediate(0)); - __ jmp(&load_arg2); - - __ bind(&arg1_is_object); - __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset)); - __ cmp(ebx, factory->heap_number_map()); - __ j(not_equal, &check_undefined_arg1); - - __ TruncateHeapNumberToI(edx, edx); - - // Here edx has the untagged integer, eax has a Smi or a heap number. - __ bind(&load_arg2); - - // Test if arg2 is a Smi. - if (right_type == BinaryOpIC::SMI) { - __ JumpIfNotSmi(eax, conversion_failure); - } else { - __ JumpIfNotSmi(eax, &arg2_is_object, Label::kNear); - } - - __ SmiUntag(eax); - __ mov(ecx, eax); - __ jmp(&done); - - // If the argument is undefined it converts to zero (ECMA-262, section 9.5). - __ bind(&check_undefined_arg2); - __ cmp(eax, factory->undefined_value()); - __ j(not_equal, conversion_failure); - __ mov(ecx, Immediate(0)); - __ jmp(&done); - - __ bind(&arg2_is_object); - __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); - __ cmp(ebx, factory->heap_number_map()); - __ j(not_equal, &check_undefined_arg2); - // Get the untagged integer version of the eax heap number in ecx. - - __ TruncateHeapNumberToI(ecx, eax); - - __ bind(&done); - __ mov(eax, edx); -} - - void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm, Register number) { Label load_smi, done; @@ -2320,7 +984,7 @@ void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm, Factory* factory = masm->isolate()->factory(); __ cmp(FieldOperand(edx, HeapObject::kMapOffset), factory->heap_number_map()); __ j(not_equal, not_numbers); // Argument in edx is not a number. - __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset)); + __ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset)); __ bind(&load_eax); // Load operand in eax into xmm1, or branch to not_numbers. __ JumpIfSmi(eax, &load_smi_eax, Label::kNear); @@ -2329,109 +993,20 @@ void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm, __ jmp(not_numbers); // Argument in eax is not a number. __ bind(&load_smi_edx); __ SmiUntag(edx); // Untag smi before converting to float. - __ cvtsi2sd(xmm0, edx); + __ Cvtsi2sd(xmm0, edx); __ SmiTag(edx); // Retag smi for heap number overwriting test. __ jmp(&load_eax); __ bind(&load_smi_eax); __ SmiUntag(eax); // Untag smi before converting to float. - __ cvtsi2sd(xmm1, eax); + __ Cvtsi2sd(xmm1, eax); __ SmiTag(eax); // Retag smi for heap number overwriting test. __ jmp(&done, Label::kNear); __ bind(&load_float_eax); - __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); + __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); __ bind(&done); } -void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm, - Register scratch) { - const Register left = edx; - const Register right = eax; - __ mov(scratch, left); - ASSERT(!scratch.is(right)); // We're about to clobber scratch. - __ SmiUntag(scratch); - __ cvtsi2sd(xmm0, scratch); - - __ mov(scratch, right); - __ SmiUntag(scratch); - __ cvtsi2sd(xmm1, scratch); -} - - -void FloatingPointHelper::CheckSSE2OperandIsInt32(MacroAssembler* masm, - Label* non_int32, - XMMRegister operand, - Register int32_result, - Register scratch, - XMMRegister xmm_scratch) { - __ cvttsd2si(int32_result, Operand(operand)); - __ cvtsi2sd(xmm_scratch, int32_result); - __ pcmpeqd(xmm_scratch, operand); - __ movmskps(scratch, xmm_scratch); - // Two least significant bits should be both set. - __ not_(scratch); - __ test(scratch, Immediate(3)); - __ j(not_zero, non_int32); -} - - -void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm, - Register scratch, - ArgLocation arg_location) { - Label load_smi_1, load_smi_2, done_load_1, done; - if (arg_location == ARGS_IN_REGISTERS) { - __ mov(scratch, edx); - } else { - __ mov(scratch, Operand(esp, 2 * kPointerSize)); - } - __ JumpIfSmi(scratch, &load_smi_1, Label::kNear); - __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset)); - __ bind(&done_load_1); - - if (arg_location == ARGS_IN_REGISTERS) { - __ mov(scratch, eax); - } else { - __ mov(scratch, Operand(esp, 1 * kPointerSize)); - } - __ JumpIfSmi(scratch, &load_smi_2, Label::kNear); - __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset)); - __ jmp(&done, Label::kNear); - - __ bind(&load_smi_1); - __ SmiUntag(scratch); - __ push(scratch); - __ fild_s(Operand(esp, 0)); - __ pop(scratch); - __ jmp(&done_load_1); - - __ bind(&load_smi_2); - __ SmiUntag(scratch); - __ push(scratch); - __ fild_s(Operand(esp, 0)); - __ pop(scratch); - - __ bind(&done); -} - - -void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm, - Register scratch) { - const Register left = edx; - const Register right = eax; - __ mov(scratch, left); - ASSERT(!scratch.is(right)); // We're about to clobber scratch. - __ SmiUntag(scratch); - __ push(scratch); - __ fild_s(Operand(esp, 0)); - - __ mov(scratch, right); - __ SmiUntag(scratch); - __ mov(Operand(esp, 0), scratch); - __ fild_s(Operand(esp, 0)); - __ pop(scratch); -} - - void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm, Label* non_float, Register scratch) { @@ -2470,7 +1045,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { // Save 1 in double_result - we need this several times later on. __ mov(scratch, Immediate(1)); - __ cvtsi2sd(double_result, scratch); + __ Cvtsi2sd(double_result, scratch); if (exponent_type_ == ON_STACK) { Label base_is_smi, unpack_exponent; @@ -2485,12 +1060,12 @@ void MathPowStub::Generate(MacroAssembler* masm) { factory->heap_number_map()); __ j(not_equal, &call_runtime); - __ movdbl(double_base, FieldOperand(base, HeapNumber::kValueOffset)); + __ movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset)); __ jmp(&unpack_exponent, Label::kNear); __ bind(&base_is_smi); __ SmiUntag(base); - __ cvtsi2sd(double_base, base); + __ Cvtsi2sd(double_base, base); __ bind(&unpack_exponent); __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear); @@ -2501,7 +1076,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { __ cmp(FieldOperand(exponent, HeapObject::kMapOffset), factory->heap_number_map()); __ j(not_equal, &call_runtime); - __ movdbl(double_exponent, + __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset)); } else if (exponent_type_ == TAGGED) { __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear); @@ -2509,7 +1084,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { __ jmp(&int_exponent); __ bind(&exponent_not_smi); - __ movdbl(double_exponent, + __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset)); } @@ -2604,9 +1179,9 @@ void MathPowStub::Generate(MacroAssembler* masm) { __ fnclex(); // Clear flags to catch exceptions later. // Transfer (B)ase and (E)xponent onto the FPU register stack. __ sub(esp, Immediate(kDoubleSize)); - __ movdbl(Operand(esp, 0), double_exponent); + __ movsd(Operand(esp, 0), double_exponent); __ fld_d(Operand(esp, 0)); // E - __ movdbl(Operand(esp, 0), double_base); + __ movsd(Operand(esp, 0), double_base); __ fld_d(Operand(esp, 0)); // B, E // Exponent is in st(1) and base is in st(0) @@ -2629,7 +1204,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { __ test_b(eax, 0x5F); // We check for all but precision exception. __ j(not_zero, &fast_power_failed, Label::kNear); __ fstp_d(Operand(esp, 0)); - __ movdbl(double_result, Operand(esp, 0)); + __ movsd(double_result, Operand(esp, 0)); __ add(esp, Immediate(kDoubleSize)); __ jmp(&done); @@ -2683,7 +1258,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { // and may not have contained the exponent value in the first place when the // exponent is a smi. We reset it with exponent value before bailing out. __ j(not_equal, &done); - __ cvtsi2sd(double_exponent, exponent); + __ Cvtsi2sd(double_exponent, exponent); // Returning or bailing out. Counters* counters = masm->isolate()->counters(); @@ -2696,7 +1271,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { // as heap number in exponent. __ bind(&done); __ AllocateHeapNumber(eax, scratch, base, &call_runtime); - __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), double_result); + __ movsd(FieldOperand(eax, HeapNumber::kValueOffset), double_result); __ IncrementCounter(counters->math_pow(), 1); __ ret(2 * kPointerSize); } else { @@ -2704,8 +1279,8 @@ void MathPowStub::Generate(MacroAssembler* masm) { { AllowExternalCallThatCantCauseGC scope(masm); __ PrepareCallCFunction(4, scratch); - __ movdbl(Operand(esp, 0 * kDoubleSize), double_base); - __ movdbl(Operand(esp, 1 * kDoubleSize), double_exponent); + __ movsd(Operand(esp, 0 * kDoubleSize), double_base); + __ movsd(Operand(esp, 1 * kDoubleSize), double_exponent); __ CallCFunction( ExternalReference::power_double_double_function(masm->isolate()), 4); } @@ -2713,7 +1288,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { // Store it into the (fixed) result register. __ sub(esp, Immediate(kDoubleSize)); __ fstp_d(Operand(esp, 0)); - __ movdbl(double_result, Operand(esp, 0)); + __ movsd(double_result, Operand(esp, 0)); __ add(esp, Immediate(kDoubleSize)); __ bind(&done); @@ -2756,8 +1331,7 @@ void StringLengthStub::Generate(MacroAssembler* masm) { __ j(not_equal, &miss); } - StubCompiler::GenerateLoadStringLength(masm, edx, eax, ebx, &miss, - support_wrapper_); + StubCompiler::GenerateLoadStringLength(masm, edx, eax, ebx, &miss); __ bind(&miss); StubCompiler::TailCallBuiltin( masm, BaseLoadStoreStubCompiler::MissBuiltin(kind())); @@ -3495,7 +2069,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ call(edx); // Drop arguments and come back to JS mode. - __ LeaveApiExitFrame(); + __ LeaveApiExitFrame(true); // Check the result. Label success; @@ -3768,106 +2342,6 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) { } -void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, - Register object, - Register result, - Register scratch1, - Register scratch2, - Label* not_found) { - // Use of registers. Register result is used as a temporary. - Register number_string_cache = result; - Register mask = scratch1; - Register scratch = scratch2; - - // Load the number string cache. - __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); - // Make the hash mask from the length of the number string cache. It - // contains two elements (number and string) for each cache entry. - __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset)); - __ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two. - __ sub(mask, Immediate(1)); // Make mask. - - // Calculate the entry in the number string cache. The hash value in the - // number string cache for smis is just the smi value, and the hash for - // doubles is the xor of the upper and lower words. See - // Heap::GetNumberStringCache. - Label smi_hash_calculated; - Label load_result_from_cache; - Label not_smi; - STATIC_ASSERT(kSmiTag == 0); - __ JumpIfNotSmi(object, ¬_smi, Label::kNear); - __ mov(scratch, object); - __ SmiUntag(scratch); - __ jmp(&smi_hash_calculated, Label::kNear); - __ bind(¬_smi); - __ cmp(FieldOperand(object, HeapObject::kMapOffset), - masm->isolate()->factory()->heap_number_map()); - __ j(not_equal, not_found); - STATIC_ASSERT(8 == kDoubleSize); - __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset)); - __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4)); - // Object is heap number and hash is now in scratch. Calculate cache index. - __ and_(scratch, mask); - Register index = scratch; - Register probe = mask; - __ mov(probe, - FieldOperand(number_string_cache, - index, - times_twice_pointer_size, - FixedArray::kHeaderSize)); - __ JumpIfSmi(probe, not_found); - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatureScope fscope(masm, SSE2); - __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset)); - __ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset)); - __ ucomisd(xmm0, xmm1); - } else { - __ fld_d(FieldOperand(object, HeapNumber::kValueOffset)); - __ fld_d(FieldOperand(probe, HeapNumber::kValueOffset)); - __ FCmp(); - } - __ j(parity_even, not_found); // Bail out if NaN is involved. - __ j(not_equal, not_found); // The cache did not contain this value. - __ jmp(&load_result_from_cache, Label::kNear); - - __ bind(&smi_hash_calculated); - // Object is smi and hash is now in scratch. Calculate cache index. - __ and_(scratch, mask); - // Check if the entry is the smi we are looking for. - __ cmp(object, - FieldOperand(number_string_cache, - index, - times_twice_pointer_size, - FixedArray::kHeaderSize)); - __ j(not_equal, not_found); - - // Get the result from the cache. - __ bind(&load_result_from_cache); - __ mov(result, - FieldOperand(number_string_cache, - index, - times_twice_pointer_size, - FixedArray::kHeaderSize + kPointerSize)); - Counters* counters = masm->isolate()->counters(); - __ IncrementCounter(counters->number_to_string_native(), 1); -} - - -void NumberToStringStub::Generate(MacroAssembler* masm) { - Label runtime; - - __ mov(ebx, Operand(esp, kPointerSize)); - - // Generate code to lookup number in the number string cache. - GenerateLookupNumberStringCache(masm, ebx, eax, ecx, edx, &runtime); - __ ret(1 * kPointerSize); - - __ bind(&runtime); - // Handle number to string in the runtime system if not found in the cache. - __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1); -} - - static int NegativeComparisonResult(Condition cc) { ASSERT(cc != equal); ASSERT((cc == less) || (cc == less_equal) @@ -4205,6 +2679,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) { // Cache the called function in a global property cell. Cache states // are uninitialized, monomorphic (indicated by a JSFunction), and // megamorphic. + // eax : number of arguments to the construct function // ebx : cache cell for call target // edi : the function to call Isolate* isolate = masm->isolate(); @@ -4224,9 +2699,8 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) { // If we didn't have a matching function, and we didn't find the megamorph // sentinel, then we have in the cell either some other function or an // AllocationSite. Do a map check on the object in ecx. - Handle<Map> allocation_site_map( - masm->isolate()->heap()->allocation_site_map(), - masm->isolate()); + Handle<Map> allocation_site_map = + masm->isolate()->factory()->allocation_site_map(); __ cmp(FieldOperand(ecx, 0), Immediate(allocation_site_map)); __ j(not_equal, &miss); @@ -4265,6 +2739,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) { { FrameScope scope(masm, StackFrame::INTERNAL); + // Arguments register must be smi-tagged to call out. __ SmiTag(eax); __ push(eax); __ push(edi); @@ -4444,6 +2919,12 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate); ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); CreateAllocationSiteStub::GenerateAheadOfTime(isolate); + if (Serializer::enabled()) { + PlatformFeatureScope sse2(SSE2); + BinaryOpStub::GenerateAheadOfTime(isolate); + } else { + BinaryOpStub::GenerateAheadOfTime(isolate); + } } @@ -4508,6 +2989,8 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, // stack alignment is known to be correct. This function takes one argument // which is passed on the stack, and we know that the stack has been // prepared to pass at least one argument. + __ mov(Operand(esp, 1 * kPointerSize), + Immediate(ExternalReference::isolate_address(masm->isolate()))); __ mov(Operand(esp, 0 * kPointerSize), eax); // Result. __ call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY); } @@ -5455,33 +3938,11 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ Drop(2); // Just jump to runtime to add the two strings. __ bind(&call_runtime); - if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) { - GenerateRegisterArgsPop(masm, ecx); - // Build a frame - { - FrameScope scope(masm, StackFrame::INTERNAL); - GenerateRegisterArgsPush(masm); - __ CallRuntime(Runtime::kStringAdd, 2); - } - __ ret(0); - } else { - __ TailCallRuntime(Runtime::kStringAdd, 2, 1); - } + __ TailCallRuntime(Runtime::kStringAdd, 2, 1); if (call_builtin.is_linked()) { __ bind(&call_builtin); - if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) { - GenerateRegisterArgsPop(masm, ecx); - // Build a frame - { - FrameScope scope(masm, StackFrame::INTERNAL); - GenerateRegisterArgsPush(masm); - __ InvokeBuiltin(builtin_id, CALL_FUNCTION); - } - __ ret(0); - } else { - __ InvokeBuiltin(builtin_id, JUMP_FUNCTION); - } + __ InvokeBuiltin(builtin_id, JUMP_FUNCTION); } } @@ -5517,12 +3978,7 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm, // Check the number to string cache. __ bind(¬_string); // Puts the cached result into scratch1. - NumberToStringStub::GenerateLookupNumberStringCache(masm, - arg, - scratch1, - scratch2, - scratch3, - slow); + __ LookupNumberStringCache(arg, scratch1, scratch2, scratch3, slow); __ mov(arg, scratch1); __ mov(Operand(esp, stack_offset), arg); __ bind(&done); @@ -6253,24 +4709,24 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { __ cmp(FieldOperand(eax, HeapObject::kMapOffset), masm->isolate()->factory()->heap_number_map()); __ j(not_equal, &maybe_undefined1, Label::kNear); - __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); + __ movsd(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); __ jmp(&left, Label::kNear); __ bind(&right_smi); __ mov(ecx, eax); // Can't clobber eax because we can still jump away. __ SmiUntag(ecx); - __ cvtsi2sd(xmm1, ecx); + __ Cvtsi2sd(xmm1, ecx); __ bind(&left); __ JumpIfSmi(edx, &left_smi, Label::kNear); __ cmp(FieldOperand(edx, HeapObject::kMapOffset), masm->isolate()->factory()->heap_number_map()); __ j(not_equal, &maybe_undefined2, Label::kNear); - __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset)); + __ movsd(xmm0, FieldOperand(edx, HeapNumber::kValueOffset)); __ jmp(&done); __ bind(&left_smi); __ mov(ecx, edx); // Can't clobber edx because we can still jump away. __ SmiUntag(ecx); - __ cvtsi2sd(xmm0, ecx); + __ Cvtsi2sd(xmm0, ecx); __ bind(&done); // Compare operands. @@ -7300,9 +5756,8 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm, __ inc(edx); __ mov(ecx, FieldOperand(ebx, Cell::kValueOffset)); if (FLAG_debug_code) { - Handle<Map> allocation_site_map( - masm->isolate()->heap()->allocation_site_map(), - masm->isolate()); + Handle<Map> allocation_site_map = + masm->isolate()->factory()->allocation_site_map(); __ cmp(FieldOperand(ecx, 0), Immediate(allocation_site_map)); __ Assert(equal, kExpectedAllocationSiteInCell); } @@ -7447,8 +5902,8 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) { __ cmp(ebx, Immediate(undefined_sentinel)); __ j(equal, &no_info); __ mov(edx, FieldOperand(ebx, Cell::kValueOffset)); - __ cmp(FieldOperand(edx, 0), Immediate(Handle<Map>( - masm->isolate()->heap()->allocation_site_map()))); + __ cmp(FieldOperand(edx, 0), Immediate( + masm->isolate()->factory()->allocation_site_map())); __ j(not_equal, &no_info); __ mov(edx, FieldOperand(edx, AllocationSite::kTransitionInfoOffset)); diff --git a/deps/v8/src/ia32/code-stubs-ia32.h b/deps/v8/src/ia32/code-stubs-ia32.h index 5c8eca37b5..006651c9c8 100644 --- a/deps/v8/src/ia32/code-stubs-ia32.h +++ b/deps/v8/src/ia32/code-stubs-ia32.h @@ -217,30 +217,6 @@ class StringCompareStub: public PlatformCodeStub { }; -class NumberToStringStub: public PlatformCodeStub { - public: - NumberToStringStub() { } - - // Generate code to do a lookup in the number string cache. If the number in - // the register object is found in the cache the generated code falls through - // with the result in the result register. The object and the result register - // can be the same. If the number is not found in the cache the code jumps to - // the label not_found with only the content of register object unchanged. - static void GenerateLookupNumberStringCache(MacroAssembler* masm, - Register object, - Register result, - Register scratch1, - Register scratch2, - Label* not_found); - - private: - Major MajorKey() { return NumberToString; } - int MinorKey() { return 0; } - - void Generate(MacroAssembler* masm); -}; - - class NameDictionaryLookupStub: public PlatformCodeStub { public: enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP }; @@ -468,7 +444,7 @@ class RecordWriteStub: public PlatformCodeStub { // Save all XMM registers except XMM0. for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) { XMMRegister reg = XMMRegister::from_code(i); - masm->movdbl(Operand(esp, (i - 1) * kDoubleSize), reg); + masm->movsd(Operand(esp, (i - 1) * kDoubleSize), reg); } } } @@ -480,7 +456,7 @@ class RecordWriteStub: public PlatformCodeStub { // Restore all XMM registers except XMM0. for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) { XMMRegister reg = XMMRegister::from_code(i); - masm->movdbl(reg, Operand(esp, (i - 1) * kDoubleSize)); + masm->movsd(reg, Operand(esp, (i - 1) * kDoubleSize)); } masm->add(esp, Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1))); diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc index 84a4d238bd..d09a85f8b1 100644 --- a/deps/v8/src/ia32/codegen-ia32.cc +++ b/deps/v8/src/ia32/codegen-ia32.cc @@ -117,7 +117,7 @@ UnaryMathFunction CreateExpFunction() { CpuFeatureScope use_sse2(&masm, SSE2); XMMRegister input = xmm1; XMMRegister result = xmm2; - __ movdbl(input, Operand(esp, 1 * kPointerSize)); + __ movsd(input, Operand(esp, 1 * kPointerSize)); __ push(eax); __ push(ebx); @@ -125,7 +125,7 @@ UnaryMathFunction CreateExpFunction() { __ pop(ebx); __ pop(eax); - __ movdbl(Operand(esp, 1 * kPointerSize), result); + __ movsd(Operand(esp, 1 * kPointerSize), result); __ fld_d(Operand(esp, 1 * kPointerSize)); __ Ret(); } @@ -155,9 +155,9 @@ UnaryMathFunction CreateSqrtFunction() { // Move double input into registers. { CpuFeatureScope use_sse2(&masm, SSE2); - __ movdbl(xmm0, Operand(esp, 1 * kPointerSize)); + __ movsd(xmm0, Operand(esp, 1 * kPointerSize)); __ sqrtsd(xmm0, xmm0); - __ movdbl(Operand(esp, 1 * kPointerSize), xmm0); + __ movsd(Operand(esp, 1 * kPointerSize), xmm0); // Load result into floating point register as return value. __ fld_d(Operand(esp, 1 * kPointerSize)); __ Ret(); @@ -462,10 +462,10 @@ OS::MemMoveFunction CreateMemMoveFunction() { Label medium_handlers, f9_16, f17_32, f33_48, f49_63; __ bind(&f9_16); - __ movdbl(xmm0, Operand(src, 0)); - __ movdbl(xmm1, Operand(src, count, times_1, -8)); - __ movdbl(Operand(dst, 0), xmm0); - __ movdbl(Operand(dst, count, times_1, -8), xmm1); + __ movsd(xmm0, Operand(src, 0)); + __ movsd(xmm1, Operand(src, count, times_1, -8)); + __ movsd(Operand(dst, 0), xmm0); + __ movsd(Operand(dst, count, times_1, -8), xmm1); MemMoveEmitPopAndReturn(&masm); __ bind(&f17_32); @@ -666,8 +666,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( // ----------------------------------- if (mode == TRACK_ALLOCATION_SITE) { ASSERT(allocation_memento_found != NULL); - __ TestJSArrayForAllocationMemento(edx, edi); - __ j(equal, allocation_memento_found); + __ JumpIfJSArrayHasAllocationMemento(edx, edi, allocation_memento_found); } // Set transitioned map. @@ -694,8 +693,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( Label loop, entry, convert_hole, gc_required, only_change_map; if (mode == TRACK_ALLOCATION_SITE) { - __ TestJSArrayForAllocationMemento(edx, edi); - __ j(equal, fail); + __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail); } // Check for empty arrays, which only require a map transition and no changes @@ -743,7 +741,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( XMMRegister the_hole_nan = xmm1; if (CpuFeatures::IsSupported(SSE2)) { CpuFeatureScope use_sse2(masm, SSE2); - __ movdbl(the_hole_nan, + __ movsd(the_hole_nan, Operand::StaticVariable(canonical_the_hole_nan_reference)); } __ jmp(&entry); @@ -768,8 +766,8 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( __ SmiUntag(ebx); if (CpuFeatures::IsSupported(SSE2)) { CpuFeatureScope fscope(masm, SSE2); - __ cvtsi2sd(xmm0, ebx); - __ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize), + __ Cvtsi2sd(xmm0, ebx); + __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize), xmm0); } else { __ push(ebx); @@ -789,7 +787,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( if (CpuFeatures::IsSupported(SSE2)) { CpuFeatureScope use_sse2(masm, SSE2); - __ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize), + __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize), the_hole_nan); } else { __ fld_d(Operand::StaticVariable(canonical_the_hole_nan_reference)); @@ -833,8 +831,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( Label loop, entry, convert_hole, gc_required, only_change_map, success; if (mode == TRACK_ALLOCATION_SITE) { - __ TestJSArrayForAllocationMemento(edx, edi); - __ j(equal, fail); + __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail); } // Check for empty arrays, which only require a map transition and no changes @@ -899,9 +896,9 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( // edx: new heap number if (CpuFeatures::IsSupported(SSE2)) { CpuFeatureScope fscope(masm, SSE2); - __ movdbl(xmm0, + __ movsd(xmm0, FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize)); - __ movdbl(FieldOperand(edx, HeapNumber::kValueOffset), xmm0); + __ movsd(FieldOperand(edx, HeapNumber::kValueOffset), xmm0); } else { __ mov(esi, FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize)); __ mov(FieldOperand(edx, HeapNumber::kValueOffset), esi); @@ -1081,20 +1078,20 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm, Label done; - __ movdbl(double_scratch, ExpConstant(0)); + __ movsd(double_scratch, ExpConstant(0)); __ xorpd(result, result); __ ucomisd(double_scratch, input); __ j(above_equal, &done); __ ucomisd(input, ExpConstant(1)); - __ movdbl(result, ExpConstant(2)); + __ movsd(result, ExpConstant(2)); __ j(above_equal, &done); - __ movdbl(double_scratch, ExpConstant(3)); - __ movdbl(result, ExpConstant(4)); + __ movsd(double_scratch, ExpConstant(3)); + __ movsd(result, ExpConstant(4)); __ mulsd(double_scratch, input); __ addsd(double_scratch, result); __ movd(temp2, double_scratch); __ subsd(double_scratch, result); - __ movdbl(result, ExpConstant(6)); + __ movsd(result, ExpConstant(6)); __ mulsd(double_scratch, ExpConstant(5)); __ subsd(double_scratch, input); __ subsd(result, double_scratch); @@ -1111,7 +1108,7 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm, __ shl(temp1, 20); __ movd(input, temp1); __ pshufd(input, input, static_cast<uint8_t>(0xe1)); // Order: 11 10 00 01 - __ movdbl(double_scratch, Operand::StaticArray( + __ movsd(double_scratch, Operand::StaticArray( temp2, times_8, ExternalReference::math_exp_log_table())); __ por(input, double_scratch); __ mulsd(result, input); @@ -1120,7 +1117,6 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm, #undef __ -static const int kNoCodeAgeSequenceLength = 5; static byte* GetNoCodeAgeSequence(uint32_t* length) { static bool initialized = false; @@ -1153,7 +1149,7 @@ bool Code::IsYoungSequence(byte* sequence) { void Code::GetCodeAgeAndParity(byte* sequence, Age* age, MarkingParity* parity) { if (IsYoungSequence(sequence)) { - *age = kNoAge; + *age = kNoAgeCodeAge; *parity = NO_MARKING_PARITY; } else { sequence++; // Skip the kCallOpcode byte @@ -1165,16 +1161,17 @@ void Code::GetCodeAgeAndParity(byte* sequence, Age* age, } -void Code::PatchPlatformCodeAge(byte* sequence, +void Code::PatchPlatformCodeAge(Isolate* isolate, + byte* sequence, Code::Age age, MarkingParity parity) { uint32_t young_length; byte* young_sequence = GetNoCodeAgeSequence(&young_length); - if (age == kNoAge) { + if (age == kNoAgeCodeAge) { CopyBytes(sequence, young_sequence, young_length); CPU::FlushICache(sequence, young_length); } else { - Code* stub = GetCodeAgeStub(age, parity); + Code* stub = GetCodeAgeStub(isolate, age, parity); CodePatcher patcher(sequence, young_length); patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32); } diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc index 13a70afe52..e339b3ad11 100644 --- a/deps/v8/src/ia32/deoptimizer-ia32.cc +++ b/deps/v8/src/ia32/deoptimizer-ia32.cc @@ -177,87 +177,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) { } -static const byte kJnsInstruction = 0x79; -static const byte kJnsOffset = 0x11; -static const byte kCallInstruction = 0xe8; -static const byte kNopByteOne = 0x66; -static const byte kNopByteTwo = 0x90; - -// The back edge bookkeeping code matches the pattern: -// -// sub <profiling_counter>, <delta> -// jns ok -// call <interrupt stub> -// ok: -// -// The patched back edge looks like this: -// -// sub <profiling_counter>, <delta> ;; Not changed -// nop -// nop -// call <on-stack replacment> -// ok: - -void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code, - Address pc_after, - Code* replacement_code) { - // Turn the jump into nops. - Address call_target_address = pc_after - kIntSize; - *(call_target_address - 3) = kNopByteOne; - *(call_target_address - 2) = kNopByteTwo; - // Replace the call address. - Assembler::set_target_address_at(call_target_address, - replacement_code->entry()); - - unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch( - unoptimized_code, call_target_address, replacement_code); -} - - -void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code, - Address pc_after, - Code* interrupt_code) { - // Restore the original jump. - Address call_target_address = pc_after - kIntSize; - *(call_target_address - 3) = kJnsInstruction; - *(call_target_address - 2) = kJnsOffset; - // Restore the original call address. - Assembler::set_target_address_at(call_target_address, - interrupt_code->entry()); - - interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch( - unoptimized_code, call_target_address, interrupt_code); -} - - -#ifdef DEBUG -Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState( - Isolate* isolate, - Code* unoptimized_code, - Address pc_after) { - Address call_target_address = pc_after - kIntSize; - ASSERT_EQ(kCallInstruction, *(call_target_address - 1)); - if (*(call_target_address - 3) == kNopByteOne) { - ASSERT_EQ(kNopByteTwo, *(call_target_address - 2)); - Code* osr_builtin = - isolate->builtins()->builtin(Builtins::kOnStackReplacement); - ASSERT_EQ(osr_builtin->entry(), - Assembler::target_address_at(call_target_address)); - return PATCHED_FOR_OSR; - } else { - // Get the interrupt stub code object to match against from cache. - Code* interrupt_builtin = - isolate->builtins()->builtin(Builtins::kInterruptCheck); - ASSERT_EQ(interrupt_builtin->entry(), - Assembler::target_address_at(call_target_address)); - ASSERT_EQ(kJnsInstruction, *(call_target_address - 3)); - ASSERT_EQ(kJnsOffset, *(call_target_address - 2)); - return NOT_PATCHED; - } -} -#endif // DEBUG - - void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) { // Set the register values. The values are not important as there are no // callee saved registers in JavaScript frames, so all registers are @@ -283,16 +202,14 @@ void Deoptimizer::SetPlatformCompiledStubRegisters( FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) { intptr_t handler = reinterpret_cast<intptr_t>(descriptor->deoptimization_handler_); - int params = descriptor->register_param_count_; - if (descriptor->stack_parameter_count_ != NULL) { - params++; - } + int params = descriptor->environment_length(); output_frame->SetRegister(eax.code(), params); output_frame->SetRegister(ebx.code(), handler); } void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) { + if (!CpuFeatures::IsSupported(SSE2)) return; for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { double double_value = input_->GetDoubleRegister(i); output_frame->SetDoubleRegister(i, double_value); @@ -330,7 +247,7 @@ void Deoptimizer::EntryGenerator::Generate() { for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i); int offset = i * kDoubleSize; - __ movdbl(Operand(esp, offset), xmm_reg); + __ movsd(Operand(esp, offset), xmm_reg); } } @@ -382,8 +299,8 @@ void Deoptimizer::EntryGenerator::Generate() { for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { int dst_offset = i * kDoubleSize + double_regs_offset; int src_offset = i * kDoubleSize; - __ movdbl(xmm0, Operand(esp, src_offset)); - __ movdbl(Operand(ebx, dst_offset), xmm0); + __ movsd(xmm0, Operand(esp, src_offset)); + __ movsd(Operand(ebx, dst_offset), xmm0); } } @@ -468,7 +385,7 @@ void Deoptimizer::EntryGenerator::Generate() { for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i); int src_offset = i * kDoubleSize + double_regs_offset; - __ movdbl(xmm_reg, Operand(ebx, src_offset)); + __ movsd(xmm_reg, Operand(ebx, src_offset)); } } diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc index 01fa999645..13cf6bc49a 100644 --- a/deps/v8/src/ia32/disasm-ia32.cc +++ b/deps/v8/src/ia32/disasm-ia32.cc @@ -942,13 +942,13 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer, case SHORT_IMMEDIATE_INSTR: { byte* addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data+1)); - AppendToBuffer("%s eax, %s", idesc.mnem, NameOfAddress(addr)); + AppendToBuffer("%s eax,%s", idesc.mnem, NameOfAddress(addr)); data += 5; break; } case BYTE_IMMEDIATE_INSTR: { - AppendToBuffer("%s al, 0x%x", idesc.mnem, data[1]); + AppendToBuffer("%s al,0x%x", idesc.mnem, data[1]); data += 2; break; } @@ -1042,6 +1042,14 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer, NameOfXMMRegister(regop), NameOfXMMRegister(rm)); data++; + } else if (f0byte == 0x54) { + data += 2; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + AppendToBuffer("andps %s,%s", + NameOfXMMRegister(regop), + NameOfXMMRegister(rm)); + data++; } else if (f0byte == 0x57) { data += 2; int mod, regop, rm; @@ -1239,8 +1247,8 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer, get_modrm(*data, &mod, ®op, &rm); int8_t imm8 = static_cast<int8_t>(data[1]); AppendToBuffer("extractps %s,%s,%d", - NameOfCPURegister(regop), - NameOfXMMRegister(rm), + NameOfCPURegister(rm), + NameOfXMMRegister(regop), static_cast<int>(imm8)); data += 2; } else if (*data == 0x22) { diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc index 6d39cc1e6e..704fb4e7d2 100644 --- a/deps/v8/src/ia32/full-codegen-ia32.cc +++ b/deps/v8/src/ia32/full-codegen-ia32.cc @@ -158,10 +158,7 @@ void FullCodeGenerator::Generate() { FrameScope frame_scope(masm_, StackFrame::MANUAL); info->set_prologue_offset(masm_->pc_offset()); - __ push(ebp); // Caller's frame pointer. - __ mov(ebp, esp); - __ push(esi); // Callee's context. - __ push(edi); // Callee's JS Function. + __ Prologue(BUILD_FUNCTION_FRAME); info->AddNoFrameRange(0, masm_->pc_offset()); { Comment cmnt(masm_, "[ Allocate locals"); @@ -1586,21 +1583,15 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { : ObjectLiteral::kNoFlags; int properties_count = constant_properties->length() / 2; if ((FLAG_track_double_fields && expr->may_store_doubles()) || - expr->depth() > 1) { - __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); - __ push(FieldOperand(edi, JSFunction::kLiteralsOffset)); - __ push(Immediate(Smi::FromInt(expr->literal_index()))); - __ push(Immediate(constant_properties)); - __ push(Immediate(Smi::FromInt(flags))); - __ CallRuntime(Runtime::kCreateObjectLiteral, 4); - } else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements || + expr->depth() > 1 || Serializer::enabled() || + flags != ObjectLiteral::kFastElements || properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) { __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); __ push(FieldOperand(edi, JSFunction::kLiteralsOffset)); __ push(Immediate(Smi::FromInt(expr->literal_index()))); __ push(Immediate(constant_properties)); __ push(Immediate(Smi::FromInt(flags))); - __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4); + __ CallRuntime(Runtime::kCreateObjectLiteral, 4); } else { __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); __ mov(eax, FieldOperand(edi, JSFunction::kLiteralsOffset)); @@ -3316,7 +3307,7 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) { __ cvtss2sd(xmm1, xmm1); __ xorps(xmm0, xmm1); __ subsd(xmm0, xmm1); - __ movdbl(FieldOperand(edi, HeapNumber::kValueOffset), xmm0); + __ movsd(FieldOperand(edi, HeapNumber::kValueOffset), xmm0); } else { // 0x4130000000000000 is 1.0 x 2^20 as a double. __ mov(FieldOperand(edi, HeapNumber::kExponentOffset), @@ -3555,8 +3546,8 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); ASSERT_EQ(args->length(), 1); - // Load the argument on the stack and call the stub. - VisitForStackValue(args->at(0)); + // Load the argument into eax and call the stub. + VisitForAccumulatorValue(args->at(0)); NumberToStringStub stub; __ CallStub(&stub); @@ -4897,6 +4888,79 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit( #undef __ + +static const byte kJnsInstruction = 0x79; +static const byte kJnsOffset = 0x11; +static const byte kCallInstruction = 0xe8; +static const byte kNopByteOne = 0x66; +static const byte kNopByteTwo = 0x90; + + +void BackEdgeTable::PatchAt(Code* unoptimized_code, + Address pc, + BackEdgeState target_state, + Code* replacement_code) { + Address call_target_address = pc - kIntSize; + Address jns_instr_address = call_target_address - 3; + Address jns_offset_address = call_target_address - 2; + + switch (target_state) { + case INTERRUPT: + // sub <profiling_counter>, <delta> ;; Not changed + // jns ok + // call <interrupt stub> + // ok: + *jns_instr_address = kJnsInstruction; + *jns_offset_address = kJnsOffset; + break; + case ON_STACK_REPLACEMENT: + case OSR_AFTER_STACK_CHECK: + // sub <profiling_counter>, <delta> ;; Not changed + // nop + // nop + // call <on-stack replacment> + // ok: + *jns_instr_address = kNopByteOne; + *jns_offset_address = kNopByteTwo; + break; + } + + Assembler::set_target_address_at(call_target_address, + replacement_code->entry()); + unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch( + unoptimized_code, call_target_address, replacement_code); +} + + +BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState( + Isolate* isolate, + Code* unoptimized_code, + Address pc) { + Address call_target_address = pc - kIntSize; + Address jns_instr_address = call_target_address - 3; + ASSERT_EQ(kCallInstruction, *(call_target_address - 1)); + + if (*jns_instr_address == kJnsInstruction) { + ASSERT_EQ(kJnsOffset, *(call_target_address - 2)); + ASSERT_EQ(isolate->builtins()->InterruptCheck()->entry(), + Assembler::target_address_at(call_target_address)); + return INTERRUPT; + } + + ASSERT_EQ(kNopByteOne, *jns_instr_address); + ASSERT_EQ(kNopByteTwo, *(call_target_address - 2)); + + if (Assembler::target_address_at(call_target_address) == + isolate->builtins()->OnStackReplacement()->entry()) { + return ON_STACK_REPLACEMENT; + } + + ASSERT_EQ(isolate->builtins()->OsrAfterStackCheck()->entry(), + Assembler::target_address_at(call_target_address)); + return OSR_AFTER_STACK_CHECK; +} + + } } // namespace v8::internal #endif // V8_TARGET_ARCH_IA32 diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc index 327ac57623..f8e4ea53d0 100644 --- a/deps/v8/src/ia32/ic-ia32.cc +++ b/deps/v8/src/ia32/ic-ia32.cc @@ -1304,7 +1304,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) { // Probe the stub cache. Code::Flags flags = Code::ComputeFlags( - Code::STUB, MONOMORPHIC, Code::kNoExtraICState, + Code::HANDLER, MONOMORPHIC, Code::kNoExtraICState, Code::NORMAL, Code::LOAD_IC); masm->isolate()->stub_cache()->GenerateProbe( masm, flags, edx, ecx, ebx, eax); @@ -1423,7 +1423,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm, // ----------------------------------- Code::Flags flags = Code::ComputeFlags( - Code::STUB, MONOMORPHIC, strict_mode, + Code::HANDLER, MONOMORPHIC, strict_mode, Code::NORMAL, Code::STORE_IC); masm->isolate()->stub_cache()->GenerateProbe( masm, flags, edx, ecx, ebx, no_reg); diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc index d50b780d71..46c87e1d62 100644 --- a/deps/v8/src/ia32/lithium-codegen-ia32.cc +++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc @@ -120,24 +120,6 @@ void LCodeGen::Abort(BailoutReason reason) { } -void LCodeGen::Comment(const char* format, ...) { - if (!FLAG_code_comments) return; - char buffer[4 * KB]; - StringBuilder builder(buffer, ARRAY_SIZE(buffer)); - va_list arguments; - va_start(arguments, format); - builder.AddFormattedList(format, arguments); - va_end(arguments); - - // Copy the string before recording it in the assembler to avoid - // issues when the stack allocated buffer goes out of scope. - size_t length = builder.position(); - Vector<char> copy = Vector<char>::New(length + 1); - OS::MemCopy(copy.start(), builder.Finalize(), copy.length()); - masm()->RecordComment(copy.start()); -} - - #ifdef _MSC_VER void LCodeGen::MakeSureStackPagesMapped(int offset) { const int kPageSize = 4 * KB; @@ -206,15 +188,8 @@ bool LCodeGen::GeneratePrologue() { if (NeedsEagerFrame()) { ASSERT(!frame_is_built_); frame_is_built_ = true; - __ push(ebp); // Caller's frame pointer. - __ mov(ebp, esp); + __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME); info()->AddNoFrameRange(0, masm_->pc_offset()); - __ push(esi); // Callee's context. - if (info()->IsStub()) { - __ push(Immediate(Smi::FromInt(StackFrame::STUB))); - } else { - __ push(edi); // Callee's JS function. - } } if (info()->IsOptimizing() && @@ -275,7 +250,7 @@ bool LCodeGen::GeneratePrologue() { BitVector* doubles = chunk()->allocated_double_registers(); BitVector::Iterator save_iterator(doubles); while (!save_iterator.Done()) { - __ movdbl(MemOperand(esp, count * kDoubleSize), + __ movsd(MemOperand(esp, count * kDoubleSize), XMMRegister::FromAllocationIndex(save_iterator.Current())); save_iterator.Advance(); count++; @@ -340,12 +315,41 @@ void LCodeGen::GenerateOsrPrologue() { osr_pc_offset_ = masm()->pc_offset(); + // Move state of dynamic frame alignment into edx. + __ mov(edx, Immediate(kNoAlignmentPadding)); + + if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) { + Label do_not_pad, align_loop; + // Align ebp + 4 to a multiple of 2 * kPointerSize. + __ test(ebp, Immediate(kPointerSize)); + __ j(zero, &do_not_pad, Label::kNear); + __ push(Immediate(0)); + __ mov(ebx, esp); + __ mov(edx, Immediate(kAlignmentPaddingPushed)); + + // Move all parts of the frame over one word. The frame consists of: + // unoptimized frame slots, alignment state, context, frame pointer, return + // address, receiver, and the arguments. + __ mov(ecx, Immediate(scope()->num_parameters() + + 5 + graph()->osr()->UnoptimizedFrameSlots())); + + __ bind(&align_loop); + __ mov(eax, Operand(ebx, 1 * kPointerSize)); + __ mov(Operand(ebx, 0), eax); + __ add(Operand(ebx), Immediate(kPointerSize)); + __ dec(ecx); + __ j(not_zero, &align_loop, Label::kNear); + __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue)); + __ sub(Operand(ebp), Immediate(kPointerSize)); + __ bind(&do_not_pad); + } + // Save the first local, which is overwritten by the alignment state. Operand alignment_loc = MemOperand(ebp, -3 * kPointerSize); __ push(alignment_loc); - // Set the dynamic frame alignment state to "not aligned". - __ mov(alignment_loc, Immediate(kNoAlignmentPadding)); + // Set the dynamic frame alignment state. + __ mov(alignment_loc, edx); // Adjust the frame size, subsuming the unoptimized frame into the // optimized frame. @@ -355,44 +359,27 @@ void LCodeGen::GenerateOsrPrologue() { } -bool LCodeGen::GenerateBody() { - ASSERT(is_generating()); - bool emit_instructions = true; - for (current_instruction_ = 0; - !is_aborted() && current_instruction_ < instructions_->length(); - current_instruction_++) { - LInstruction* instr = instructions_->at(current_instruction_); - - // Don't emit code for basic blocks with a replacement. - if (instr->IsLabel()) { - emit_instructions = !LLabel::cast(instr)->HasReplacement(); - } - if (!emit_instructions) continue; - - if (FLAG_code_comments && instr->HasInterestingComment(this)) { - Comment(";;; <@%d,#%d> %s", - current_instruction_, - instr->hydrogen_value()->id(), - instr->Mnemonic()); - } - - if (!CpuFeatures::IsSupported(SSE2)) FlushX87StackIfNecessary(instr); - - RecordAndUpdatePosition(instr->position()); +void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { + if (!CpuFeatures::IsSupported(SSE2)) FlushX87StackIfNecessary(instr); +} - instr->CompileToNative(this); - if (!CpuFeatures::IsSupported(SSE2)) { - if (instr->IsGoto()) { - x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr)); - } else if (FLAG_debug_code && FLAG_enable_slow_asserts && - !instr->IsGap() && !instr->IsReturn()) { - __ VerifyX87StackDepth(x87_stack_.depth()); +void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) { + if (!CpuFeatures::IsSupported(SSE2)) { + if (instr->IsGoto()) { + x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr)); + } else if (FLAG_debug_code && FLAG_enable_slow_asserts && + !instr->IsGap() && !instr->IsReturn()) { + if (instr->ClobbersDoubleRegisters()) { + if (instr->HasDoubleRegisterResult()) { + ASSERT_EQ(1, x87_stack_.depth()); + } else { + ASSERT_EQ(0, x87_stack_.depth()); + } } + __ VerifyX87StackDepth(x87_stack_.depth()); } } - EnsureSpaceForLazyDeopt(); - return !is_aborted(); } @@ -453,8 +440,9 @@ bool LCodeGen::GenerateDeferredCode() { X87Stack copy(code->x87_stack()); x87_stack_ = copy; - int pos = instructions_->at(code->instruction_index())->position(); - RecordAndUpdatePosition(pos); + HValue* value = + instructions_->at(code->instruction_index())->hydrogen_value(); + RecordAndWritePosition(value->position()); Comment(";;; <@%d,#%d> " "-------------------- Deferred %s --------------------", @@ -532,6 +520,16 @@ void LCodeGen::X87LoadForUsage(X87Register reg) { } +void LCodeGen::X87LoadForUsage(X87Register reg1, X87Register reg2) { + ASSERT(x87_stack_.Contains(reg1)); + ASSERT(x87_stack_.Contains(reg2)); + x87_stack_.Fxch(reg1, 1); + x87_stack_.Fxch(reg2); + x87_stack_.pop(); + x87_stack_.pop(); +} + + void LCodeGen::X87Stack::Fxch(X87Register reg, int other_slot) { ASSERT(is_mutable_); ASSERT(Contains(reg) && stack_depth_ > other_slot); @@ -931,8 +929,6 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code, LInstruction* instr, SafepointMode safepoint_mode) { ASSERT(instr != NULL); - LPointerMap* pointers = instr->pointer_map(); - RecordPosition(pointers->position()); __ call(code, mode); RecordSafepointWithLazyDeopt(instr, safepoint_mode); @@ -954,13 +950,12 @@ void LCodeGen::CallCode(Handle<Code> code, void LCodeGen::CallRuntime(const Runtime::Function* fun, int argc, - LInstruction* instr) { + LInstruction* instr, + SaveFPRegsMode save_doubles) { ASSERT(instr != NULL); ASSERT(instr->HasPointerMap()); - LPointerMap* pointers = instr->pointer_map(); - RecordPosition(pointers->position()); - __ CallRuntime(fun, argc); + __ CallRuntime(fun, argc, save_doubles); RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); @@ -1122,26 +1117,31 @@ void LCodeGen::DeoptimizeIf(Condition cc, void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) { ZoneList<Handle<Map> > maps(1, zone()); + ZoneList<Handle<JSObject> > objects(1, zone()); int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT); for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) { - RelocInfo::Mode mode = it.rinfo()->rmode(); - if (mode == RelocInfo::EMBEDDED_OBJECT && - it.rinfo()->target_object()->IsMap()) { - Handle<Map> map(Map::cast(it.rinfo()->target_object())); - if (map->CanTransition()) { + if (Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) { + if (it.rinfo()->target_object()->IsMap()) { + Handle<Map> map(Map::cast(it.rinfo()->target_object())); maps.Add(map, zone()); + } else if (it.rinfo()->target_object()->IsJSObject()) { + Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object())); + objects.Add(object, zone()); } } } #ifdef VERIFY_HEAP - // This disables verification of weak embedded maps after full GC. + // This disables verification of weak embedded objects after full GC. // AddDependentCode can cause a GC, which would observe the state where // this code is not yet in the depended code lists of the embedded maps. - NoWeakEmbeddedMapsVerificationScope disable_verification_of_embedded_maps; + NoWeakObjectVerificationScope disable_verification_of_embedded_objects; #endif for (int i = 0; i < maps.length(); i++) { maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code); } + for (int i = 0; i < objects.length(); i++) { + AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code); + } } @@ -1246,7 +1246,7 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers, void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) { - LPointerMap empty_pointers(RelocInfo::kNoPosition, zone()); + LPointerMap empty_pointers(zone()); RecordSafepoint(&empty_pointers, mode); } @@ -1258,17 +1258,10 @@ void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, } -void LCodeGen::RecordPosition(int position) { +void LCodeGen::RecordAndWritePosition(int position) { if (position == RelocInfo::kNoPosition) return; masm()->positions_recorder()->RecordPosition(position); -} - - -void LCodeGen::RecordAndUpdatePosition(int position) { - if (position >= 0 && position != old_position_) { - masm()->positions_recorder()->RecordPosition(position); - old_position_ = position; - } + masm()->positions_recorder()->WriteRecordedPositions(); } @@ -1336,11 +1329,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) { CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); break; } - case CodeStub::NumberToString: { - NumberToStringStub stub; - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); - break; - } case CodeStub::StringCompare: { StringCompareStub stub; CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); @@ -1733,9 +1721,9 @@ void LCodeGen::DoMulI(LMulI* instr) { case 9: __ lea(left, Operand(left, left, times_8, 0)); break; - case 16: - __ shl(left, 4); - break; + case 16: + __ shl(left, 4); + break; default: __ imul(left, left, constant); break; @@ -1967,9 +1955,10 @@ void LCodeGen::DoConstantD(LConstantD* instr) { __ movd(res, Operand(temp)); __ psllq(res, 32); if (lower != 0) { + XMMRegister xmm_scratch = double_scratch0(); __ Set(temp, Immediate(lower)); - __ movd(xmm0, Operand(temp)); - __ por(res, xmm0); + __ movd(xmm_scratch, Operand(temp)); + __ por(res, xmm_scratch); } } } @@ -2178,7 +2167,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) { __ jmp(&return_right, Label::kNear); __ bind(&check_zero); - XMMRegister xmm_scratch = xmm0; + XMMRegister xmm_scratch = double_scratch0(); __ xorps(xmm_scratch, xmm_scratch); __ ucomisd(left_reg, xmm_scratch); __ j(not_equal, &return_left, Label::kNear); // left == right != 0. @@ -2208,8 +2197,6 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) { XMMRegister left = ToDoubleRegister(instr->left()); XMMRegister right = ToDoubleRegister(instr->right()); XMMRegister result = ToDoubleRegister(instr->result()); - // Modulo uses a fixed result register. - ASSERT(instr->op() == Token::MOD || left.is(result)); switch (instr->op()) { case Token::ADD: __ addsd(left, right); @@ -2229,17 +2216,17 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) { case Token::MOD: { // Pass two doubles as arguments on the stack. __ PrepareCallCFunction(4, eax); - __ movdbl(Operand(esp, 0 * kDoubleSize), left); - __ movdbl(Operand(esp, 1 * kDoubleSize), right); + __ movsd(Operand(esp, 0 * kDoubleSize), left); + __ movsd(Operand(esp, 1 * kDoubleSize), right); __ CallCFunction( ExternalReference::double_fp_operation(Token::MOD, isolate()), 4); // Return value is in st(0) on ia32. - // Store it into the (fixed) result register. + // Store it into the result register. __ sub(Operand(esp), Immediate(kDoubleSize)); __ fstp_d(Operand(esp, 0)); - __ movdbl(result, Operand(esp, 0)); + __ movsd(result, Operand(esp, 0)); __ add(Operand(esp), Immediate(kDoubleSize)); break; } @@ -2272,6 +2259,8 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) { __ PrepareCallCFunction(4, eax); X87Mov(Operand(esp, 1 * kDoubleSize), right); X87Mov(Operand(esp, 0), left); + X87Free(right); + ASSERT(left.is(result)); X87PrepareToWrite(result); __ CallCFunction( ExternalReference::double_fp_operation(Token::MOD, isolate()), @@ -2301,14 +2290,6 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) { } -int LCodeGen::GetNextEmittedBlock() const { - for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) { - if (!chunk_->GetLabel(i)->HasReplacement()) return i; - } - return -1; -} - - template<class InstrType> void LCodeGen::EmitBranch(InstrType instr, Condition cc) { int left_block = instr->TrueDestination(chunk_); @@ -2340,25 +2321,6 @@ void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) { } -void LCodeGen::DoIsNumberAndBranch(LIsNumberAndBranch* instr) { - Representation r = instr->hydrogen()->value()->representation(); - if (r.IsSmiOrInteger32() || r.IsDouble()) { - EmitBranch(instr, no_condition); - } else { - ASSERT(r.IsTagged()); - Register reg = ToRegister(instr->value()); - HType type = instr->hydrogen()->value()->type(); - if (type.IsTaggedNumber()) { - EmitBranch(instr, no_condition); - } - __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); - __ cmp(FieldOperand(reg, HeapObject::kMapOffset), - factory()->heap_number_map()); - EmitBranch(instr, equal); - } -} - - void LCodeGen::DoBranch(LBranch* instr) { Representation r = instr->hydrogen()->value()->representation(); if (r.IsSmiOrInteger32()) { @@ -2369,8 +2331,9 @@ void LCodeGen::DoBranch(LBranch* instr) { ASSERT(!info()->IsStub()); CpuFeatureScope scope(masm(), SSE2); XMMRegister reg = ToDoubleRegister(instr->value()); - __ xorps(xmm0, xmm0); - __ ucomisd(reg, xmm0); + XMMRegister xmm_scratch = double_scratch0(); + __ xorps(xmm_scratch, xmm_scratch); + __ ucomisd(reg, xmm_scratch); EmitBranch(instr, not_equal); } else { ASSERT(r.IsTagged()); @@ -2390,8 +2353,9 @@ void LCodeGen::DoBranch(LBranch* instr) { } else if (type.IsHeapNumber()) { ASSERT(!info()->IsStub()); CpuFeatureScope scope(masm(), SSE2); - __ xorps(xmm0, xmm0); - __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset)); + XMMRegister xmm_scratch = double_scratch0(); + __ xorps(xmm_scratch, xmm_scratch); + __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); EmitBranch(instr, not_equal); } else if (type.IsString()) { ASSERT(!info()->IsStub()); @@ -2476,8 +2440,9 @@ void LCodeGen::DoBranch(LBranch* instr) { __ j(not_equal, ¬_heap_number, Label::kNear); if (CpuFeatures::IsSafeForSnapshot(SSE2)) { CpuFeatureScope scope(masm(), SSE2); - __ xorps(xmm0, xmm0); - __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset)); + XMMRegister xmm_scratch = double_scratch0(); + __ xorps(xmm_scratch, xmm_scratch); + __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); } else { __ fldz(); __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset)); @@ -2521,6 +2486,10 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { case Token::EQ_STRICT: cond = equal; break; + case Token::NE: + case Token::NE_STRICT: + cond = not_equal; + break; case Token::LT: cond = is_unsigned ? below : less; break; @@ -2556,10 +2525,15 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) { EmitGoto(next_block); } else { if (instr->is_double()) { - CpuFeatureScope scope(masm(), SSE2); + if (CpuFeatures::IsSafeForSnapshot(SSE2)) { + CpuFeatureScope scope(masm(), SSE2); + __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right)); + } else { + X87LoadForUsage(ToX87Register(right), ToX87Register(left)); + __ FCmp(); + } // Don't base result on EFLAGS when a NaN is involved. Instead // jump to the false block. - __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right)); __ j(parity_even, instr->FalseLabel(chunk_)); } else { if (right->IsConstantOperand()) { @@ -2626,7 +2600,7 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) { if (use_sse2) { CpuFeatureScope scope(masm(), SSE2); XMMRegister input_reg = ToDoubleRegister(instr->object()); - __ movdbl(MemOperand(esp, 0), input_reg); + __ movsd(MemOperand(esp, 0), input_reg); } else { __ fstp_d(MemOperand(esp, 0)); } @@ -3016,14 +2990,6 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, } -void LCodeGen::DoInstanceSize(LInstanceSize* instr) { - Register object = ToRegister(instr->object()); - Register result = ToRegister(instr->result()); - __ mov(result, FieldOperand(object, HeapObject::kMapOffset)); - __ movzx_b(result, FieldOperand(result, Map::kInstanceSizeOffset)); -} - - void LCodeGen::DoCmpT(LCmpT* instr) { Token::Value op = instr->op(); @@ -3096,7 +3062,7 @@ void LCodeGen::DoReturn(LReturn* instr) { BitVector::Iterator save_iterator(doubles); int count = 0; while (!save_iterator.Done()) { - __ movdbl(XMMRegister::FromAllocationIndex(save_iterator.Current()), + __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()), MemOperand(esp, count * kDoubleSize)); save_iterator.Advance(); count++; @@ -3131,7 +3097,7 @@ void LCodeGen::DoReturn(LReturn* instr) { void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { Register result = ToRegister(instr->result()); - __ mov(result, Operand::ForCell(instr->hydrogen()->cell())); + __ mov(result, Operand::ForCell(instr->hydrogen()->cell().handle())); if (instr->hydrogen()->RequiresHoleCheck()) { __ cmp(result, factory()->the_hole_value()); DeoptimizeIf(equal, instr->environment()); @@ -3154,7 +3120,7 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { Register value = ToRegister(instr->value()); - Handle<PropertyCell> cell_handle = instr->hydrogen()->cell(); + Handle<PropertyCell> cell_handle = instr->hydrogen()->cell().handle(); // If the cell we are storing to contains the hole it could have // been deleted from the property dictionary. In that case, we need @@ -3245,12 +3211,15 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { if (access.IsExternalMemory()) { Register result = ToRegister(instr->result()); - if (instr->object()->IsConstantOperand()) { - ExternalReference external_reference = ToExternalReference( - LConstantOperand::cast(instr->object())); - __ mov(result, MemOperand::StaticVariable(external_reference)); + MemOperand operand = instr->object()->IsConstantOperand() + ? MemOperand::StaticVariable(ToExternalReference( + LConstantOperand::cast(instr->object()))) + : MemOperand(ToRegister(instr->object()), offset); + if (access.representation().IsByte()) { + ASSERT(instr->hydrogen()->representation().IsInteger32()); + __ movzx_b(result, operand); } else { - __ mov(result, MemOperand(ToRegister(instr->object()), offset)); + __ mov(result, operand); } return; } @@ -3261,7 +3230,7 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { if (CpuFeatures::IsSupported(SSE2)) { CpuFeatureScope scope(masm(), SSE2); XMMRegister result = ToDoubleRegister(instr->result()); - __ movdbl(result, FieldOperand(object, offset)); + __ movsd(result, FieldOperand(object, offset)); } else { X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset)); } @@ -3269,11 +3238,15 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { } Register result = ToRegister(instr->result()); - if (access.IsInobject()) { - __ mov(result, FieldOperand(object, offset)); - } else { + if (!access.IsInobject()) { __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset)); - __ mov(result, FieldOperand(result, offset)); + object = result; + } + if (access.representation().IsByte()) { + ASSERT(instr->hydrogen()->representation().IsInteger32()); + __ movzx_b(result, FieldOperand(object, offset)); + } else { + __ mov(result, FieldOperand(object, offset)); } } @@ -3349,6 +3322,12 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { } +void LCodeGen::DoLoadRoot(LLoadRoot* instr) { + Register result = ToRegister(instr->result()); + __ LoadRoot(result, instr->index()); +} + + void LCodeGen::DoLoadExternalArrayPointer( LLoadExternalArrayPointer* instr) { Register result = ToRegister(instr->result()); @@ -3405,7 +3384,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { if (CpuFeatures::IsSupported(SSE2)) { CpuFeatureScope scope(masm(), SSE2); - __ movdbl(ToDoubleRegister(instr->result()), operand); + __ movsd(ToDoubleRegister(instr->result()), operand); } else { X87Mov(ToX87Register(instr->result()), operand); } @@ -3476,7 +3455,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { if (CpuFeatures::IsSupported(SSE2)) { CpuFeatureScope scope(masm(), SSE2); XMMRegister result = ToDoubleRegister(instr->result()); - __ movdbl(result, double_load_operand); + __ movsd(result, double_load_operand); } else { X87Mov(ToX87Register(instr->result()), double_load_operand); } @@ -3693,7 +3672,6 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) { __ bind(&invoke); ASSERT(instr->HasPointerMap()); LPointerMap* pointers = instr->pointer_map(); - RecordPosition(pointers->position()); SafepointGenerator safepoint_generator( this, pointers, Safepoint::kLazyDeopt); ParameterCount actual(eax); @@ -3778,9 +3756,6 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function, bool can_invoke_directly = dont_adapt_arguments || formal_parameter_count == arity; - LPointerMap* pointers = instr->pointer_map(); - RecordPosition(pointers->position()); - if (can_invoke_directly) { if (edi_state == EDI_UNINITIALIZED) { __ LoadHeapObject(edi, function); @@ -3805,6 +3780,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function, RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); } else { // We need to adapt arguments. + LPointerMap* pointers = instr->pointer_map(); SafepointGenerator generator( this, pointers, Safepoint::kLazyDeopt); ParameterCount count(arity); @@ -3903,11 +3879,11 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) { CpuFeatureScope scope(masm(), SSE2); if (r.IsDouble()) { - XMMRegister scratch = xmm0; + XMMRegister scratch = double_scratch0(); XMMRegister input_reg = ToDoubleRegister(instr->value()); __ xorps(scratch, scratch); __ subsd(scratch, input_reg); - __ pand(input_reg, scratch); + __ andps(input_reg, scratch); } else if (r.IsSmiOrInteger32()) { EmitIntegerMathAbs(instr); } else { // Tagged case. @@ -3924,7 +3900,7 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) { void LCodeGen::DoMathFloor(LMathFloor* instr) { CpuFeatureScope scope(masm(), SSE2); - XMMRegister xmm_scratch = xmm0; + XMMRegister xmm_scratch = double_scratch0(); Register output_reg = ToRegister(instr->result()); XMMRegister input_reg = ToDoubleRegister(instr->value()); @@ -3977,7 +3953,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) { __ bind(&negative_sign); // Truncate, then compare and compensate. __ cvttsd2si(output_reg, Operand(input_reg)); - __ cvtsi2sd(xmm_scratch, output_reg); + __ Cvtsi2sd(xmm_scratch, output_reg); __ ucomisd(input_reg, xmm_scratch); __ j(equal, &done, Label::kNear); __ sub(output_reg, Immediate(1)); @@ -3992,14 +3968,14 @@ void LCodeGen::DoMathRound(LMathRound* instr) { CpuFeatureScope scope(masm(), SSE2); Register output_reg = ToRegister(instr->result()); XMMRegister input_reg = ToDoubleRegister(instr->value()); - XMMRegister xmm_scratch = xmm0; + XMMRegister xmm_scratch = double_scratch0(); XMMRegister input_temp = ToDoubleRegister(instr->temp()); ExternalReference one_half = ExternalReference::address_of_one_half(); ExternalReference minus_one_half = ExternalReference::address_of_minus_one_half(); Label done, round_to_zero, below_one_half, do_not_compensate; - __ movdbl(xmm_scratch, Operand::StaticVariable(one_half)); + __ movsd(xmm_scratch, Operand::StaticVariable(one_half)); __ ucomisd(xmm_scratch, input_reg); __ j(above, &below_one_half); @@ -4013,7 +3989,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) { __ jmp(&done); __ bind(&below_one_half); - __ movdbl(xmm_scratch, Operand::StaticVariable(minus_one_half)); + __ movsd(xmm_scratch, Operand::StaticVariable(minus_one_half)); __ ucomisd(xmm_scratch, input_reg); __ j(below_equal, &round_to_zero); @@ -4027,7 +4003,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) { __ RecordComment("D2I conversion overflow"); DeoptimizeIf(equal, instr->environment()); - __ cvtsi2sd(xmm_scratch, output_reg); + __ Cvtsi2sd(xmm_scratch, output_reg); __ ucomisd(xmm_scratch, input_temp); __ j(equal, &done); __ sub(output_reg, Immediate(1)); @@ -4059,7 +4035,7 @@ void LCodeGen::DoMathSqrt(LMathSqrt* instr) { void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { CpuFeatureScope scope(masm(), SSE2); - XMMRegister xmm_scratch = xmm0; + XMMRegister xmm_scratch = double_scratch0(); XMMRegister input_reg = ToDoubleRegister(instr->value()); Register scratch = ToRegister(instr->temp()); ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); @@ -4178,8 +4154,7 @@ void LCodeGen::DoRandom(LRandom* instr) { // by computing: // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)). XMMRegister result = ToDoubleRegister(instr->result()); - // We use xmm0 as fixed scratch register here. - XMMRegister scratch4 = xmm0; + XMMRegister scratch4 = double_scratch0(); __ mov(scratch3, Immediate(0x49800000)); // 1.0 x 2^20 as single. __ movd(scratch4, scratch3); __ movd(result, random); @@ -4193,29 +4168,29 @@ void LCodeGen::DoMathLog(LMathLog* instr) { CpuFeatureScope scope(masm(), SSE2); ASSERT(instr->value()->Equals(instr->result())); XMMRegister input_reg = ToDoubleRegister(instr->value()); + XMMRegister xmm_scratch = double_scratch0(); Label positive, done, zero; - __ xorps(xmm0, xmm0); - __ ucomisd(input_reg, xmm0); + __ xorps(xmm_scratch, xmm_scratch); + __ ucomisd(input_reg, xmm_scratch); __ j(above, &positive, Label::kNear); __ j(equal, &zero, Label::kNear); ExternalReference nan = ExternalReference::address_of_canonical_non_hole_nan(); - __ movdbl(input_reg, Operand::StaticVariable(nan)); + __ movsd(input_reg, Operand::StaticVariable(nan)); __ jmp(&done, Label::kNear); __ bind(&zero); - __ push(Immediate(0xFFF00000)); - __ push(Immediate(0)); - __ movdbl(input_reg, Operand(esp, 0)); - __ add(Operand(esp), Immediate(kDoubleSize)); + ExternalReference ninf = + ExternalReference::address_of_negative_infinity(); + __ movsd(input_reg, Operand::StaticVariable(ninf)); __ jmp(&done, Label::kNear); __ bind(&positive); __ fldln2(); __ sub(Operand(esp), Immediate(kDoubleSize)); - __ movdbl(Operand(esp, 0), input_reg); + __ movsd(Operand(esp, 0), input_reg); __ fld_d(Operand(esp, 0)); __ fyl2x(); __ fstp_d(Operand(esp, 0)); - __ movdbl(input_reg, Operand(esp, 0)); + __ movsd(input_reg, Operand(esp, 0)); __ add(Operand(esp), Immediate(kDoubleSize)); __ bind(&done); } @@ -4225,10 +4200,11 @@ void LCodeGen::DoMathExp(LMathExp* instr) { CpuFeatureScope scope(masm(), SSE2); XMMRegister input = ToDoubleRegister(instr->value()); XMMRegister result = ToDoubleRegister(instr->result()); + XMMRegister temp0 = double_scratch0(); Register temp1 = ToRegister(instr->temp1()); Register temp2 = ToRegister(instr->temp2()); - MathExpGenerator::EmitMathExp(masm(), input, result, xmm0, temp1, temp2); + MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2); } @@ -4273,7 +4249,6 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { Handle<JSFunction> known_function = instr->hydrogen()->known_function(); if (known_function.is_null()) { LPointerMap* pointers = instr->pointer_map(); - RecordPosition(pointers->position()); SafepointGenerator generator( this, pointers, Safepoint::kLazyDeopt); ParameterCount count(instr->arity()); @@ -4409,7 +4384,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) { void LCodeGen::DoCallRuntime(LCallRuntime* instr) { - CallRuntime(instr->function(), instr->arity(), instr); + CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles()); } @@ -4441,11 +4416,16 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { ToExternalReference(LConstantOperand::cast(instr->object()))) : MemOperand(ToRegister(instr->object()), offset); if (instr->value()->IsConstantOperand()) { + ASSERT(!representation.IsByte()); LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); __ mov(operand, Immediate(ToInteger32(operand_value))); } else { Register value = ToRegister(instr->value()); - __ mov(operand, value); + if (representation.IsByte()) { + __ mov_b(operand, value); + } else { + __ mov(operand, value); + } } return; } @@ -4480,7 +4460,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { if (CpuFeatures::IsSupported(SSE2)) { CpuFeatureScope scope(masm(), SSE2); XMMRegister value = ToDoubleRegister(instr->value()); - __ movdbl(FieldOperand(object, offset), value); + __ movsd(FieldOperand(object, offset), value); } else { X87Register value = ToX87Register(instr->value()); X87Mov(FieldOperand(object, offset), value); @@ -4518,17 +4498,28 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { __ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset)); } + MemOperand operand = FieldOperand(write_register, offset); if (instr->value()->IsConstantOperand()) { LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); if (operand_value->IsRegister()) { - __ mov(FieldOperand(write_register, offset), ToRegister(operand_value)); + Register value = ToRegister(operand_value); + if (representation.IsByte()) { + __ mov_b(operand, value); + } else { + __ mov(operand, value); + } } else { Handle<Object> handle_value = ToHandle(operand_value); ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); - __ mov(FieldOperand(write_register, offset), handle_value); + __ mov(operand, handle_value); } } else { - __ mov(FieldOperand(write_register, offset), ToRegister(instr->value())); + Register value = ToRegister(instr->value()); + if (representation.IsByte()) { + __ mov_b(operand, value); + } else { + __ mov(operand, value); + } } if (instr->hydrogen()->NeedsWriteBarrier()) { @@ -4609,8 +4600,9 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { if (CpuFeatures::IsSafeForSnapshot(SSE2)) { CpuFeatureScope scope(masm(), SSE2); - __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value())); - __ movss(operand, xmm0); + XMMRegister xmm_scratch = double_scratch0(); + __ cvtsd2ss(xmm_scratch, ToDoubleRegister(instr->value())); + __ movss(operand, xmm_scratch); } else { __ fld(0); __ fstp_s(operand); @@ -4618,7 +4610,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { if (CpuFeatures::IsSafeForSnapshot(SSE2)) { CpuFeatureScope scope(masm(), SSE2); - __ movdbl(operand, ToDoubleRegister(instr->value())); + __ movsd(operand, ToDoubleRegister(instr->value())); } else { X87Mov(operand, ToX87Register(instr->value())); } @@ -4676,11 +4668,11 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { __ ucomisd(value, value); __ j(parity_odd, &have_value); // NaN. - __ movdbl(value, Operand::StaticVariable(canonical_nan_reference)); + __ movsd(value, Operand::StaticVariable(canonical_nan_reference)); __ bind(&have_value); } - __ movdbl(double_store_operand, value); + __ movsd(double_store_operand, value); } else { // Can't use SSE2 in the serializer if (instr->hydrogen()->IsConstantHoleStore()) { @@ -4803,8 +4795,10 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { Register object = ToRegister(instr->object()); Register temp = ToRegister(instr->temp()); - __ TestJSArrayForAllocationMemento(object, temp); + Label no_memento_found; + __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); DeoptimizeIf(equal, instr->environment()); + __ bind(&no_memento_found); } @@ -4825,9 +4819,8 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { __ j(not_equal, ¬_applicable, branch_distance); if (is_simple_map_transition) { Register new_map_reg = ToRegister(instr->new_map_temp()); - Handle<Map> map = instr->hydrogen()->transitioned_map(); __ mov(FieldOperand(object_reg, HeapObject::kMapOffset), - Immediate(map)); + Immediate(to_map)); // Write barrier. ASSERT_NE(instr->temp(), NULL); __ RecordWriteForMap(object_reg, to_map, new_map_reg, @@ -4978,7 +4971,7 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { ASSERT(output->IsDoubleRegister()); if (CpuFeatures::IsSupported(SSE2)) { CpuFeatureScope scope(masm(), SSE2); - __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input)); + __ Cvtsi2sd(ToDoubleRegister(output), ToOperand(input)); } else if (input->IsRegister()) { Register input_reg = ToRegister(input); __ push(input_reg); @@ -5001,14 +4994,32 @@ void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) { void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { - CpuFeatureScope scope(masm(), SSE2); LOperand* input = instr->value(); LOperand* output = instr->result(); - LOperand* temp = instr->temp(); + if (CpuFeatures::IsSupported(SSE2)) { + CpuFeatureScope scope(masm(), SSE2); + LOperand* temp = instr->temp(); - __ LoadUint32(ToDoubleRegister(output), - ToRegister(input), - ToDoubleRegister(temp)); + __ LoadUint32(ToDoubleRegister(output), + ToRegister(input), + ToDoubleRegister(temp)); + } else { + X87Register res = ToX87Register(output); + X87PrepareToWrite(res); + __ LoadUint32NoSSE2(ToRegister(input)); + X87CommitWrite(res); + } +} + + +void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) { + Register input = ToRegister(instr->value()); + if (!instr->hydrogen()->value()->HasRange() || + !instr->hydrogen()->value()->range()->IsInSmiRange()) { + __ test(input, Immediate(0xc0000000)); + DeoptimizeIf(not_zero, instr->environment()); + } + __ SmiTag(input); } @@ -5073,6 +5084,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, Label slow; Register reg = ToRegister(value); Register tmp = reg.is(eax) ? ecx : eax; + XMMRegister xmm_scratch = double_scratch0(); // Preserve the value of all registers. PushSafepointRegistersScope scope(this); @@ -5087,7 +5099,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, __ xor_(reg, 0x80000000); if (CpuFeatures::IsSupported(SSE2)) { CpuFeatureScope feature_scope(masm(), SSE2); - __ cvtsi2sd(xmm0, Operand(reg)); + __ Cvtsi2sd(xmm_scratch, Operand(reg)); } else { __ push(reg); __ fild_s(Operand(esp, 0)); @@ -5096,7 +5108,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, } else { if (CpuFeatures::IsSupported(SSE2)) { CpuFeatureScope feature_scope(masm(), SSE2); - __ LoadUint32(xmm0, reg, + __ LoadUint32(xmm_scratch, reg, ToDoubleRegister(LNumberTagU::cast(instr)->temp())); } else { // There's no fild variant for unsigned values, so zero-extend to a 64-bit @@ -5132,12 +5144,12 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); if (!reg.is(eax)) __ mov(reg, eax); - // Done. Put the value in xmm0 into the value of the allocated heap + // Done. Put the value in xmm_scratch into the value of the allocated heap // number. __ bind(&done); if (CpuFeatures::IsSupported(SSE2)) { CpuFeatureScope feature_scope(masm(), SSE2); - __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0); + __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch); } else { __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset)); } @@ -5181,7 +5193,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { if (use_sse2) { CpuFeatureScope scope(masm(), SSE2); XMMRegister input_reg = ToDoubleRegister(instr->value()); - __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg); + __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg); } else { __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset)); } @@ -5308,7 +5320,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg, bool deoptimize_on_minus_zero, LEnvironment* env, NumberUntagDMode mode) { - Label load_smi, done; + Label convert, load_smi, done; if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { // Smi check. @@ -5317,28 +5329,17 @@ void LCodeGen::EmitNumberUntagD(Register input_reg, // Heap number map check. __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), factory()->heap_number_map()); - if (!can_convert_undefined_to_nan) { - DeoptimizeIf(not_equal, env); + if (can_convert_undefined_to_nan) { + __ j(not_equal, &convert, Label::kNear); } else { - Label heap_number, convert; - __ j(equal, &heap_number, Label::kNear); - - // Convert undefined (and hole) to NaN. - __ cmp(input_reg, factory()->undefined_value()); DeoptimizeIf(not_equal, env); - - __ bind(&convert); - ExternalReference nan = - ExternalReference::address_of_canonical_non_hole_nan(); - __ movdbl(result_reg, Operand::StaticVariable(nan)); - __ jmp(&done, Label::kNear); - - __ bind(&heap_number); } + // Heap number to XMM conversion. - __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset)); + __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset)); + if (deoptimize_on_minus_zero) { - XMMRegister xmm_scratch = xmm0; + XMMRegister xmm_scratch = double_scratch0(); __ xorps(xmm_scratch, xmm_scratch); __ ucomisd(result_reg, xmm_scratch); __ j(not_zero, &done, Label::kNear); @@ -5347,6 +5348,19 @@ void LCodeGen::EmitNumberUntagD(Register input_reg, DeoptimizeIf(not_zero, env); } __ jmp(&done, Label::kNear); + + if (can_convert_undefined_to_nan) { + __ bind(&convert); + + // Convert undefined (and hole) to NaN. + __ cmp(input_reg, factory()->undefined_value()); + DeoptimizeIf(not_equal, env); + + ExternalReference nan = + ExternalReference::address_of_canonical_non_hole_nan(); + __ movsd(result_reg, Operand::StaticVariable(nan)); + __ jmp(&done, Label::kNear); + } } else { ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); } @@ -5356,7 +5370,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg, // input register since we avoid dependencies. __ mov(temp_reg, input_reg); __ SmiUntag(temp_reg); // Untag smi before converting to float. - __ cvtsi2sd(result_reg, Operand(temp_reg)); + __ Cvtsi2sd(result_reg, Operand(temp_reg)); __ bind(&done); } @@ -5364,25 +5378,36 @@ void LCodeGen::EmitNumberUntagD(Register input_reg, void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) { Register input_reg = ToRegister(instr->value()); - if (instr->truncating()) { - Label heap_number, slow_case; + Label no_heap_number, check_bools, check_false; // Heap number map check. __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), factory()->heap_number_map()); - __ j(equal, &heap_number, Label::kNear); + __ j(not_equal, &no_heap_number, Label::kNear); + __ TruncateHeapNumberToI(input_reg, input_reg); + __ jmp(done); - // Check for undefined. Undefined is converted to zero for truncating - // conversions. + __ bind(&no_heap_number); + // Check for Oddballs. Undefined/False is converted to zero and True to one + // for truncating conversions. __ cmp(input_reg, factory()->undefined_value()); + __ j(not_equal, &check_bools, Label::kNear); + __ Set(input_reg, Immediate(0)); + __ jmp(done); + + __ bind(&check_bools); + __ cmp(input_reg, factory()->true_value()); + __ j(not_equal, &check_false, Label::kNear); + __ Set(input_reg, Immediate(1)); + __ jmp(done); + + __ bind(&check_false); + __ cmp(input_reg, factory()->false_value()); __ RecordComment("Deferred TaggedToI: cannot truncate"); DeoptimizeIf(not_equal, instr->environment()); - __ mov(input_reg, 0); + __ Set(input_reg, Immediate(0)); __ jmp(done); - - __ bind(&heap_number); - __ TruncateHeapNumberToI(input_reg, input_reg); } else { Label bailout; XMMRegister scratch = (instr->temp() != NULL) @@ -5417,12 +5442,16 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) { Register input_reg = ToRegister(input); ASSERT(input_reg.is(ToRegister(instr->result()))); - DeferredTaggedToI* deferred = - new(zone()) DeferredTaggedToI(this, instr, x87_stack_); + if (instr->hydrogen()->value()->representation().IsSmi()) { + __ SmiUntag(input_reg); + } else { + DeferredTaggedToI* deferred = + new(zone()) DeferredTaggedToI(this, instr, x87_stack_); - __ JumpIfNotSmi(input_reg, deferred->entry()); - __ SmiUntag(input_reg); - __ bind(deferred->exit()); + __ JumpIfNotSmi(input_reg, deferred->entry()); + __ SmiUntag(input_reg); + __ bind(deferred->exit()); + } } @@ -5487,7 +5516,8 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) { if (CpuFeatures::IsSafeForSnapshot(SSE2)) { CpuFeatureScope scope(masm(), SSE2); XMMRegister input_reg = ToDoubleRegister(input); - __ DoubleToI(result_reg, input_reg, xmm0, + XMMRegister xmm_scratch = double_scratch0(); + __ DoubleToI(result_reg, input_reg, xmm_scratch, instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear); } else { X87Register input_reg = ToX87Register(input); @@ -5514,7 +5544,8 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { if (CpuFeatures::IsSafeForSnapshot(SSE2)) { CpuFeatureScope scope(masm(), SSE2); XMMRegister input_reg = ToDoubleRegister(input); - __ DoubleToI(result_reg, input_reg, xmm0, + XMMRegister xmm_scratch = double_scratch0(); + __ DoubleToI(result_reg, input_reg, xmm_scratch, instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear); } else { X87Register input_reg = ToX87Register(input); @@ -5594,7 +5625,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { void LCodeGen::DoCheckValue(LCheckValue* instr) { - Handle<HeapObject> object = instr->hydrogen()->object(); + Handle<HeapObject> object = instr->hydrogen()->object().handle(); if (instr->hydrogen()->object_in_new_space()) { Register reg = ToRegister(instr->value()); Handle<Cell> cell = isolate()->factory()->NewCell(object); @@ -5649,22 +5680,21 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) { ASSERT(input->IsRegister()); Register reg = ToRegister(input); - SmallMapList* map_set = instr->hydrogen()->map_set(); - DeferredCheckMaps* deferred = NULL; if (instr->hydrogen()->has_migration_target()) { deferred = new(zone()) DeferredCheckMaps(this, instr, reg, x87_stack_); __ bind(deferred->check_maps()); } + UniqueSet<Map> map_set = instr->hydrogen()->map_set(); Label success; - for (int i = 0; i < map_set->length() - 1; i++) { - Handle<Map> map = map_set->at(i); + for (int i = 0; i < map_set.size() - 1; i++) { + Handle<Map> map = map_set.at(i).handle(); __ CompareMap(reg, map, &success); __ j(equal, &success); } - Handle<Map> map = map_set->last(); + Handle<Map> map = map_set.at(map_set.size() - 1).handle(); __ CompareMap(reg, map, &success); if (instr->hydrogen()->has_migration_target()) { __ j(not_equal, deferred->entry()); @@ -5679,8 +5709,9 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) { void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { CpuFeatureScope scope(masm(), SSE2); XMMRegister value_reg = ToDoubleRegister(instr->unclamped()); + XMMRegister xmm_scratch = double_scratch0(); Register result_reg = ToRegister(instr->result()); - __ ClampDoubleToUint8(value_reg, xmm0, result_reg); + __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg); } @@ -5696,6 +5727,8 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { ASSERT(instr->unclamped()->Equals(instr->result())); Register input_reg = ToRegister(instr->unclamped()); + XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm()); + XMMRegister xmm_scratch = double_scratch0(); Label is_smi, done, heap_number; __ JumpIfSmi(input_reg, &is_smi); @@ -5714,8 +5747,8 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { // Heap number __ bind(&heap_number); - __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); - __ ClampDoubleToUint8(xmm0, xmm1, input_reg); + __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset)); + __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg); __ jmp(&done, Label::kNear); // smi @@ -6146,14 +6179,13 @@ void LCodeGen::EmitIsConstructCall(Register temp) { } -void LCodeGen::EnsureSpaceForLazyDeopt() { +void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { if (!info()->IsStub()) { // Ensure that we have enough space after the previous lazy-bailout // instruction for patching the code here. int current_pc = masm()->pc_offset(); - int patch_size = Deoptimizer::patch_size(); - if (current_pc < last_lazy_deopt_pc_ + patch_size) { - int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc; + if (current_pc < last_lazy_deopt_pc_ + space_needed) { + int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; __ Nop(padding_size); } } @@ -6162,7 +6194,7 @@ void LCodeGen::EnsureSpaceForLazyDeopt() { void LCodeGen::DoLazyBailout(LLazyBailout* instr) { - EnsureSpaceForLazyDeopt(); + EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); ASSERT(instr->HasEnvironment()); LEnvironment* env = instr->environment(); RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); @@ -6233,7 +6265,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) { CallCode(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET, instr); - EnsureSpaceForLazyDeopt(); + EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); __ bind(&done); RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); @@ -6246,7 +6278,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) { ExternalReference::address_of_stack_limit(isolate()); __ cmp(esp, Operand::StaticVariable(stack_limit)); __ j(below, deferred_stack_check->entry()); - EnsureSpaceForLazyDeopt(); + EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); __ bind(instr->done_label()); deferred_stack_check->SetExit(instr->done_label()); RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.h b/deps/v8/src/ia32/lithium-codegen-ia32.h index 769917f7e2..78bc69de91 100644 --- a/deps/v8/src/ia32/lithium-codegen-ia32.h +++ b/deps/v8/src/ia32/lithium-codegen-ia32.h @@ -33,6 +33,7 @@ #include "checks.h" #include "deoptimizer.h" #include "ia32/lithium-gap-resolver-ia32.h" +#include "lithium-codegen.h" #include "safepoint-table.h" #include "scopes.h" #include "v8utils.h" @@ -45,45 +46,28 @@ class LDeferredCode; class LGapNode; class SafepointGenerator; -class LCodeGen V8_FINAL BASE_EMBEDDED { +class LCodeGen: public LCodeGenBase { public: LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) - : zone_(info->zone()), - chunk_(static_cast<LPlatformChunk*>(chunk)), - masm_(assembler), - info_(info), - current_block_(-1), - current_instruction_(-1), - instructions_(chunk->instructions()), + : LCodeGenBase(chunk, assembler, info), deoptimizations_(4, info->zone()), jump_table_(4, info->zone()), deoptimization_literals_(8, info->zone()), inlined_function_count_(0), scope_(info->scope()), - status_(UNUSED), translations_(info->zone()), deferred_(8, info->zone()), dynamic_frame_alignment_(false), support_aligned_spilled_doubles_(false), osr_pc_offset_(-1), - last_lazy_deopt_pc_(0), frame_is_built_(false), x87_stack_(assembler), safepoints_(info->zone()), resolver_(this), - expected_safepoint_kind_(Safepoint::kSimple), - old_position_(RelocInfo::kNoPosition) { + expected_safepoint_kind_(Safepoint::kSimple) { PopulateDeoptimizationLiteralsWithInlinedFunctions(); } - // Simple accessors. - MacroAssembler* masm() const { return masm_; } - CompilationInfo* info() const { return info_; } - Isolate* isolate() const { return info_->isolate(); } - Factory* factory() const { return isolate()->factory(); } - Heap* heap() const { return isolate()->heap(); } - Zone* zone() const { return zone_; } - int LookupDestination(int block_id) const { return chunk()->LookupDestination(block_id); } @@ -129,12 +113,17 @@ class LCodeGen V8_FINAL BASE_EMBEDDED { X87Register left, X87Register right, X87Register result); void X87LoadForUsage(X87Register reg); + void X87LoadForUsage(X87Register reg1, X87Register reg2); void X87PrepareToWrite(X87Register reg) { x87_stack_.PrepareToWrite(reg); } void X87CommitWrite(X87Register reg) { x87_stack_.CommitWrite(reg); } void X87Fxch(X87Register reg, int other_slot = 0) { x87_stack_.Fxch(reg, other_slot); } + void X87Free(X87Register reg) { + x87_stack_.Free(reg); + } + bool X87StackEmpty() { return x87_stack_.depth() == 0; @@ -188,27 +177,13 @@ class LCodeGen V8_FINAL BASE_EMBEDDED { #undef DECLARE_DO private: - enum Status { - UNUSED, - GENERATING, - DONE, - ABORTED - }; - - bool is_unused() const { return status_ == UNUSED; } - bool is_generating() const { return status_ == GENERATING; } - bool is_done() const { return status_ == DONE; } - bool is_aborted() const { return status_ == ABORTED; } - StrictModeFlag strict_mode_flag() const { return info()->is_classic_mode() ? kNonStrictMode : kStrictMode; } - LPlatformChunk* chunk() const { return chunk_; } Scope* scope() const { return scope_; } - HGraph* graph() const { return chunk()->graph(); } - int GetNextEmittedBlock() const; + XMMRegister double_scratch0() const { return xmm0; } void EmitClassOfTest(Label* if_true, Label* if_false, @@ -220,14 +195,14 @@ class LCodeGen V8_FINAL BASE_EMBEDDED { int GetStackSlotCount() const { return chunk()->spill_slot_count(); } void Abort(BailoutReason reason); - void FPRINTF_CHECKING Comment(const char* format, ...); void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } // Code generation passes. Returns true if code generation should // continue. + void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE; + void GenerateBodyInstructionPost(LInstruction* instr) V8_OVERRIDE; bool GeneratePrologue(); - bool GenerateBody(); bool GenerateDeferredCode(); bool GenerateJumpTable(); bool GenerateSafepointTable(); @@ -251,7 +226,8 @@ class LCodeGen V8_FINAL BASE_EMBEDDED { void CallRuntime(const Runtime::Function* fun, int argc, - LInstruction* instr); + LInstruction* instr, + SaveFPRegsMode save_doubles = kDontSaveFPRegs); void CallRuntime(Runtime::FunctionId id, int argc, @@ -331,9 +307,8 @@ class LCodeGen V8_FINAL BASE_EMBEDDED { void RecordSafepointWithRegisters(LPointerMap* pointers, int arguments, Safepoint::DeoptMode mode); - void RecordPosition(int position); - void RecordAndUpdatePosition(int position); + void RecordAndWritePosition(int position) V8_OVERRIDE; static Condition TokenToCondition(Token::Value op, bool is_unsigned); void EmitGoto(int block); @@ -395,7 +370,7 @@ class LCodeGen V8_FINAL BASE_EMBEDDED { int* offset, AllocationSiteMode mode); - void EnsureSpaceForLazyDeopt(); + void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE; void DoLoadKeyedExternalArray(LLoadKeyed* instr); void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr); void DoLoadKeyedFixedArray(LLoadKeyed* instr); @@ -425,26 +400,16 @@ class LCodeGen V8_FINAL BASE_EMBEDDED { void MakeSureStackPagesMapped(int offset); #endif - Zone* zone_; - LPlatformChunk* const chunk_; - MacroAssembler* const masm_; - CompilationInfo* const info_; - - int current_block_; - int current_instruction_; - const ZoneList<LInstruction*>* instructions_; ZoneList<LEnvironment*> deoptimizations_; ZoneList<Deoptimizer::JumpTableEntry> jump_table_; ZoneList<Handle<Object> > deoptimization_literals_; int inlined_function_count_; Scope* const scope_; - Status status_; TranslationBuffer translations_; ZoneList<LDeferredCode*> deferred_; bool dynamic_frame_alignment_; bool support_aligned_spilled_doubles_; int osr_pc_offset_; - int last_lazy_deopt_pc_; bool frame_is_built_; class X87Stack { @@ -505,8 +470,6 @@ class LCodeGen V8_FINAL BASE_EMBEDDED { Safepoint::Kind expected_safepoint_kind_; - int old_position_; - class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED { public: explicit PushSafepointRegistersScope(LCodeGen* codegen) diff --git a/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc b/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc index b5bc18bdc9..2b2126af9d 100644 --- a/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc +++ b/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc @@ -326,7 +326,7 @@ void LGapResolver::EmitMove(int index) { } else { __ push(Immediate(upper)); __ push(Immediate(lower)); - __ movdbl(dst, Operand(esp, 0)); + __ movsd(dst, Operand(esp, 0)); __ add(esp, Immediate(kDoubleSize)); } } else { @@ -360,7 +360,7 @@ void LGapResolver::EmitMove(int index) { } else { ASSERT(destination->IsDoubleStackSlot()); Operand dst = cgen_->ToOperand(destination); - __ movdbl(dst, src); + __ movsd(dst, src); } } else { // load from the register onto the stack, store in destination, which must @@ -378,12 +378,12 @@ void LGapResolver::EmitMove(int index) { Operand src = cgen_->ToOperand(source); if (destination->IsDoubleRegister()) { XMMRegister dst = cgen_->ToDoubleRegister(destination); - __ movdbl(dst, src); + __ movsd(dst, src); } else { // We rely on having xmm0 available as a fixed scratch register. Operand dst = cgen_->ToOperand(destination); - __ movdbl(xmm0, src); - __ movdbl(dst, xmm0); + __ movsd(xmm0, src); + __ movsd(dst, xmm0); } } else { // load from the stack slot on top of the floating point stack, and then @@ -486,9 +486,9 @@ void LGapResolver::EmitSwap(int index) { : destination); Operand other = cgen_->ToOperand(source->IsDoubleRegister() ? destination : source); - __ movdbl(xmm0, other); - __ movdbl(other, reg); - __ movdbl(reg, Operand(xmm0)); + __ movsd(xmm0, other); + __ movsd(other, reg); + __ movsd(reg, Operand(xmm0)); } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) { CpuFeatureScope scope(cgen_->masm(), SSE2); // Double-width memory-to-memory. Spill on demand to use a general @@ -499,12 +499,12 @@ void LGapResolver::EmitSwap(int index) { Operand src1 = cgen_->HighOperand(source); Operand dst0 = cgen_->ToOperand(destination); Operand dst1 = cgen_->HighOperand(destination); - __ movdbl(xmm0, dst0); // Save destination in xmm0. + __ movsd(xmm0, dst0); // Save destination in xmm0. __ mov(tmp, src0); // Then use tmp to copy source to destination. __ mov(dst0, tmp); __ mov(tmp, src1); __ mov(dst1, tmp); - __ movdbl(src0, xmm0); + __ movsd(src0, xmm0); } else { // No other combinations are possible. diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc index ca1e60d644..fdddef3f47 100644 --- a/deps/v8/src/ia32/lithium-ia32.cc +++ b/deps/v8/src/ia32/lithium-ia32.cc @@ -386,9 +386,9 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) { } -int LPlatformChunk::GetNextSpillIndex(bool is_double) { +int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) { // Skip a slot if for a double-width slot. - if (is_double) { + if (kind == DOUBLE_REGISTERS) { spill_slot_count_++; spill_slot_count_ |= 1; num_double_slots_++; @@ -397,11 +397,12 @@ int LPlatformChunk::GetNextSpillIndex(bool is_double) { } -LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) { - int index = GetNextSpillIndex(is_double); - if (is_double) { +LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) { + int index = GetNextSpillIndex(kind); + if (kind == DOUBLE_REGISTERS) { return LDoubleStackSlot::Create(index, zone()); } else { + ASSERT(kind == GENERAL_REGISTERS); return LStackSlot::Create(index, zone()); } } @@ -479,7 +480,7 @@ LPlatformChunk* LChunkBuilder::Build() { // Reserve the first spill slot for the state of dynamic alignment. if (info()->IsOptimizing()) { - int alignment_state_index = chunk_->GetNextSpillIndex(false); + int alignment_state_index = chunk_->GetNextSpillIndex(GENERAL_REGISTERS); ASSERT_EQ(alignment_state_index, 0); USE(alignment_state_index); } @@ -488,7 +489,7 @@ LPlatformChunk* LChunkBuilder::Build() { // which will be subsumed into this frame. if (graph()->has_osr()) { for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) { - chunk_->GetNextSpillIndex(false); + chunk_->GetNextSpillIndex(GENERAL_REGISTERS); } } @@ -560,29 +561,34 @@ LOperand* LChunkBuilder::UseAtStart(HValue* value) { } +static inline bool CanBeImmediateConstant(HValue* value) { + return value->IsConstant() && HConstant::cast(value)->NotInNewSpace(); +} + + LOperand* LChunkBuilder::UseOrConstant(HValue* value) { - return value->IsConstant() + return CanBeImmediateConstant(value) ? chunk_->DefineConstantOperand(HConstant::cast(value)) : Use(value); } LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) { - return value->IsConstant() + return CanBeImmediateConstant(value) ? chunk_->DefineConstantOperand(HConstant::cast(value)) : UseAtStart(value); } LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) { - return value->IsConstant() + return CanBeImmediateConstant(value) ? chunk_->DefineConstantOperand(HConstant::cast(value)) : UseRegister(value); } LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) { - return value->IsConstant() + return CanBeImmediateConstant(value) ? chunk_->DefineConstantOperand(HConstant::cast(value)) : UseRegisterAtStart(value); } @@ -707,7 +713,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr, LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) { ASSERT(!instr->HasPointerMap()); - instr->set_pointer_map(new(zone()) LPointerMap(position_, zone())); + instr->set_pointer_map(new(zone()) LPointerMap(zone())); return instr; } @@ -762,52 +768,44 @@ LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) { LInstruction* LChunkBuilder::DoShift(Token::Value op, HBitwiseBinaryOperation* instr) { - if (instr->representation().IsTagged()) { - ASSERT(instr->left()->representation().IsSmiOrTagged()); - ASSERT(instr->right()->representation().IsSmiOrTagged()); - - LOperand* context = UseFixed(instr->context(), esi); - LOperand* left = UseFixed(instr->left(), edx); - LOperand* right = UseFixed(instr->right(), eax); - LArithmeticT* result = new(zone()) LArithmeticT(op, context, left, right); - return MarkAsCall(DefineFixed(result, eax), instr); - } - - ASSERT(instr->representation().IsSmiOrInteger32()); - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); - LOperand* left = UseRegisterAtStart(instr->left()); + if (instr->representation().IsSmiOrInteger32()) { + ASSERT(instr->left()->representation().Equals(instr->representation())); + ASSERT(instr->right()->representation().Equals(instr->representation())); + LOperand* left = UseRegisterAtStart(instr->left()); - HValue* right_value = instr->right(); - LOperand* right = NULL; - int constant_value = 0; - bool does_deopt = false; - if (right_value->IsConstant()) { - HConstant* constant = HConstant::cast(right_value); - right = chunk_->DefineConstantOperand(constant); - constant_value = constant->Integer32Value() & 0x1f; - // Left shifts can deoptimize if we shift by > 0 and the result cannot be - // truncated to smi. - if (instr->representation().IsSmi() && constant_value > 0) { - does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi); + HValue* right_value = instr->right(); + LOperand* right = NULL; + int constant_value = 0; + bool does_deopt = false; + if (right_value->IsConstant()) { + HConstant* constant = HConstant::cast(right_value); + right = chunk_->DefineConstantOperand(constant); + constant_value = constant->Integer32Value() & 0x1f; + // Left shifts can deoptimize if we shift by > 0 and the result cannot be + // truncated to smi. + if (instr->representation().IsSmi() && constant_value > 0) { + does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi); + } + } else { + right = UseFixed(right_value, ecx); } - } else { - right = UseFixed(right_value, ecx); - } - // Shift operations can only deoptimize if we do a logical shift by 0 and - // the result cannot be truncated to int32. - if (op == Token::SHR && constant_value == 0) { - if (FLAG_opt_safe_uint32_operations) { - does_deopt = !instr->CheckFlag(HInstruction::kUint32); - } else { - does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32); + // Shift operations can only deoptimize if we do a logical shift by 0 and + // the result cannot be truncated to int32. + if (op == Token::SHR && constant_value == 0) { + if (FLAG_opt_safe_uint32_operations) { + does_deopt = !instr->CheckFlag(HInstruction::kUint32); + } else { + does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32); + } } - } - LInstruction* result = - DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt)); - return does_deopt ? AssignEnvironment(result) : result; + LInstruction* result = + DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt)); + return does_deopt ? AssignEnvironment(result) : result; + } else { + return DoArithmeticT(op, instr); + } } @@ -816,21 +814,22 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op, ASSERT(instr->representation().IsDouble()); ASSERT(instr->left()->representation().IsDouble()); ASSERT(instr->right()->representation().IsDouble()); - ASSERT(op != Token::MOD); - LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); - LOperand* right = UseRegisterAtStart(instr->BetterRightOperand()); - LArithmeticD* result = new(zone()) LArithmeticD(op, left, right); - return DefineSameAsFirst(result); + if (op == Token::MOD) { + LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); + LOperand* right = UseRegisterAtStart(instr->BetterRightOperand()); + LArithmeticD* result = new(zone()) LArithmeticD(op, left, right); + return MarkAsCall(DefineSameAsFirst(result), instr); + } else { + LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); + LOperand* right = UseRegisterAtStart(instr->BetterRightOperand()); + LArithmeticD* result = new(zone()) LArithmeticD(op, left, right); + return DefineSameAsFirst(result); + } } LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op, - HArithmeticBinaryOperation* instr) { - ASSERT(op == Token::ADD || - op == Token::DIV || - op == Token::MOD || - op == Token::MUL || - op == Token::SUB); + HBinaryOperation* instr) { HValue* left = instr->left(); HValue* right = instr->right(); ASSERT(left->representation().IsTagged()); @@ -914,10 +913,31 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) { void LChunkBuilder::VisitInstruction(HInstruction* current) { HInstruction* old_current = current_instruction_; current_instruction_ = current; - if (current->has_position()) position_ = current->position(); - LInstruction* instr = current->CompileToLithium(this); + + LInstruction* instr = NULL; + if (current->CanReplaceWithDummyUses()) { + HValue* first_operand = current->OperandCount() == 0 + ? graph()->GetConstant1() + : current->OperandAt(0); + instr = DefineAsRegister(new(zone()) LDummyUse(UseAny(first_operand))); + for (int i = 1; i < current->OperandCount(); ++i) { + LInstruction* dummy = + new(zone()) LDummyUse(UseAny(current->OperandAt(i))); + dummy->set_hydrogen_value(current); + chunk_->AddInstruction(dummy, current_block_); + } + } else { + instr = current->CompileToLithium(this); + } + + argument_count_ += current->argument_delta(); + ASSERT(argument_count_ >= 0); if (instr != NULL) { + // Associate the hydrogen instruction first, since we may need it for + // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below. + instr->set_hydrogen_value(current); + #if DEBUG // Make sure that the lithium instruction has either no fixed register // constraints in temps or the result OR no uses that are only used at @@ -947,7 +967,6 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) { } #endif - instr->set_position(position_); if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) { instr = AssignPointerMap(instr); } @@ -964,7 +983,6 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) { clobber->set_hydrogen_value(current); chunk_->AddInstruction(clobber, current_block_); } - instr->set_hydrogen_value(current); chunk_->AddInstruction(instr, current_block_); } current_instruction_ = old_current; @@ -1061,21 +1079,15 @@ LInstruction* LChunkBuilder::DoGoto(HGoto* instr) { LInstruction* LChunkBuilder::DoBranch(HBranch* instr) { - HValue* value = instr->value(); - if (value->EmitAtUses()) { - ASSERT(value->IsConstant()); - ASSERT(!value->representation().IsDouble()); - HBasicBlock* successor = HConstant::cast(value)->BooleanValue() - ? instr->FirstSuccessor() - : instr->SecondSuccessor(); - return new(zone()) LGoto(successor); - } + LInstruction* goto_instr = CheckElideControlInstruction(instr); + if (goto_instr != NULL) return goto_instr; ToBooleanStub::Types expected = instr->expected_input_types(); // Tagged values that are not known smis or booleans require a // deoptimization environment. If the instruction is generic no // environment is needed since all cases are handled. + HValue* value = instr->value(); Representation rep = value->representation(); HType type = value->type(); if (!rep.IsTagged() || type.IsSmi() || type.IsBoolean()) { @@ -1141,12 +1153,6 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal( } -LInstruction* LChunkBuilder::DoInstanceSize(HInstanceSize* instr) { - LOperand* object = UseRegisterAtStart(instr->object()); - return DefineAsRegister(new(zone()) LInstanceSize(object)); -} - - LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) { LOperand* receiver = UseRegister(instr->receiver()); LOperand* function = UseRegisterAtStart(instr->function()); @@ -1171,7 +1177,6 @@ LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) { LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) { - ++argument_count_; LOperand* argument = UseAny(instr->argument()); return new(zone()) LPushArgument(argument); } @@ -1238,7 +1243,6 @@ LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) { LInstruction* LChunkBuilder::DoCallConstantFunction( HCallConstantFunction* instr) { - argument_count_ -= instr->argument_count(); return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, eax), instr); } @@ -1246,7 +1250,6 @@ LInstruction* LChunkBuilder::DoCallConstantFunction( LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) { LOperand* context = UseFixed(instr->context(), esi); LOperand* function = UseFixed(instr->function(), edi); - argument_count_ -= instr->argument_count(); LInvokeFunction* result = new(zone()) LInvokeFunction(context, function); return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY); } @@ -1356,7 +1359,6 @@ LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) { ASSERT(instr->key()->representation().IsTagged()); LOperand* context = UseFixed(instr->context(), esi); LOperand* key = UseFixed(instr->key(), ecx); - argument_count_ -= instr->argument_count(); LCallKeyed* result = new(zone()) LCallKeyed(context, key); return MarkAsCall(DefineFixed(result, eax), instr); } @@ -1364,7 +1366,6 @@ LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) { LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) { LOperand* context = UseFixed(instr->context(), esi); - argument_count_ -= instr->argument_count(); LCallNamed* result = new(zone()) LCallNamed(context); return MarkAsCall(DefineFixed(result, eax), instr); } @@ -1372,14 +1373,12 @@ LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) { LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) { LOperand* context = UseFixed(instr->context(), esi); - argument_count_ -= instr->argument_count(); LCallGlobal* result = new(zone()) LCallGlobal(context); return MarkAsCall(DefineFixed(result, eax), instr); } LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) { - argument_count_ -= instr->argument_count(); return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, eax), instr); } @@ -1387,7 +1386,6 @@ LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) { LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) { LOperand* context = UseFixed(instr->context(), esi); LOperand* constructor = UseFixed(instr->constructor(), edi); - argument_count_ -= instr->argument_count(); LCallNew* result = new(zone()) LCallNew(context, constructor); return MarkAsCall(DefineFixed(result, eax), instr); } @@ -1396,7 +1394,6 @@ LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) { LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) { LOperand* context = UseFixed(instr->context(), esi); LOperand* constructor = UseFixed(instr->constructor(), edi); - argument_count_ -= instr->argument_count(); LCallNewArray* result = new(zone()) LCallNewArray(context, constructor); return MarkAsCall(DefineFixed(result, eax), instr); } @@ -1405,14 +1402,12 @@ LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) { LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) { LOperand* context = UseFixed(instr->context(), esi); LOperand* function = UseFixed(instr->function(), edi); - argument_count_ -= instr->argument_count(); LCallFunction* result = new(zone()) LCallFunction(context, function); return MarkAsCall(DefineFixed(result, eax), instr); } LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) { - argument_count_ -= instr->argument_count(); LOperand* context = UseFixed(instr->context(), esi); return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), eax), instr); } @@ -1442,29 +1437,19 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) { if (instr->representation().IsSmiOrInteger32()) { ASSERT(instr->left()->representation().Equals(instr->representation())); ASSERT(instr->right()->representation().Equals(instr->representation())); + ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32)); LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand()); return DefineSameAsFirst(new(zone()) LBitI(left, right)); } else { - ASSERT(instr->representation().IsSmiOrTagged()); - ASSERT(instr->left()->representation().IsSmiOrTagged()); - ASSERT(instr->right()->representation().IsSmiOrTagged()); - - LOperand* context = UseFixed(instr->context(), esi); - LOperand* left = UseFixed(instr->left(), edx); - LOperand* right = UseFixed(instr->right(), eax); - LArithmeticT* result = - new(zone()) LArithmeticT(instr->op(), context, left, right); - return MarkAsCall(DefineFixed(result, eax), instr); + return DoArithmeticT(instr->op(), instr); } } LInstruction* LChunkBuilder::DoDiv(HDiv* instr) { - if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::DIV, instr); - } else if (instr->representation().IsSmiOrInteger32()) { + if (instr->representation().IsSmiOrInteger32()) { ASSERT(instr->left()->representation().Equals(instr->representation())); ASSERT(instr->right()->representation().Equals(instr->representation())); if (instr->HasPowerOf2Divisor()) { @@ -1481,8 +1466,9 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) { LOperand* divisor = UseRegister(instr->right()); LDivI* result = new(zone()) LDivI(dividend, divisor, temp); return AssignEnvironment(DefineFixed(result, eax)); + } else if (instr->representation().IsDouble()) { + return DoArithmeticD(Token::DIV, instr); } else { - ASSERT(instr->representation().IsTagged()); return DoArithmeticT(Token::DIV, instr); } } @@ -1584,17 +1570,10 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) { ? AssignEnvironment(result) : result; } - } else if (instr->representation().IsSmiOrTagged()) { - return DoArithmeticT(Token::MOD, instr); + } else if (instr->representation().IsDouble()) { + return DoArithmeticD(Token::MOD, instr); } else { - ASSERT(instr->representation().IsDouble()); - // We call a C function for double modulo. It can't trigger a GC. We need - // to use fixed result register for the call. - // TODO(fschneider): Allow any register as input registers. - LArithmeticD* mod = new(zone()) LArithmeticD(Token::MOD, - UseFixedDouble(left, xmm2), - UseFixedDouble(right, xmm1)); - return MarkAsCall(DefineFixedDouble(mod, xmm1), instr); + return DoArithmeticT(Token::MOD, instr); } } @@ -1618,7 +1597,6 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) { } else if (instr->representation().IsDouble()) { return DoArithmeticD(Token::MUL, instr); } else { - ASSERT(instr->representation().IsTagged()); return DoArithmeticT(Token::MUL, instr); } } @@ -1639,7 +1617,6 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) { } else if (instr->representation().IsDouble()) { return DoArithmeticD(Token::SUB, instr); } else { - ASSERT(instr->representation().IsSmiOrTagged()); return DoArithmeticT(Token::SUB, instr); } } @@ -1671,7 +1648,6 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) { } else if (instr->representation().IsDouble()) { return DoArithmeticD(Token::ADD, instr); } else { - ASSERT(instr->representation().IsSmiOrTagged()); return DoArithmeticT(Token::ADD, instr); } } @@ -1752,9 +1728,12 @@ LInstruction* LChunkBuilder::DoCompareNumericAndBranch( ASSERT(instr->right()->representation().IsDouble()); LOperand* left; LOperand* right; - if (instr->left()->IsConstant() && instr->right()->IsConstant()) { - left = UseRegisterOrConstantAtStart(instr->left()); - right = UseRegisterOrConstantAtStart(instr->right()); + if (CanBeImmediateConstant(instr->left()) && + CanBeImmediateConstant(instr->right())) { + // The code generator requires either both inputs to be constant + // operands, or neither. + left = UseConstant(instr->left()); + right = UseConstant(instr->right()); } else { left = UseRegisterAtStart(instr->left()); right = UseRegisterAtStart(instr->right()); @@ -1766,6 +1745,8 @@ LInstruction* LChunkBuilder::DoCompareNumericAndBranch( LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch( HCompareObjectEqAndBranch* instr) { + LInstruction* goto_instr = CheckElideControlInstruction(instr); + if (goto_instr != NULL) return goto_instr; LOperand* left = UseRegisterAtStart(instr->left()); LOperand* right = UseOrConstantAtStart(instr->right()); return new(zone()) LCmpObjectEqAndBranch(left, right); @@ -1774,8 +1755,8 @@ LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch( LInstruction* LChunkBuilder::DoCompareHoleAndBranch( HCompareHoleAndBranch* instr) { - LOperand* object = UseRegisterAtStart(instr->object()); - return new(zone()) LCmpHoleAndBranch(object); + LOperand* value = UseRegisterAtStart(instr->value()); + return new(zone()) LCmpHoleAndBranch(value); } @@ -1909,6 +1890,13 @@ LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation( } +LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) { + // The control instruction marking the end of a block that completed + // abruptly (e.g., threw an exception). There is nothing specific to do. + return NULL; +} + + LInstruction* LChunkBuilder::DoThrow(HThrow* instr) { LOperand* context = UseFixed(instr->context(), esi); LOperand* value = UseFixed(instr->value(), eax); @@ -1944,7 +1932,6 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { // building a stack frame. if (from.IsTagged()) { if (to.IsDouble()) { - info()->MarkAsDeferredCalling(); LOperand* value = UseRegister(instr->value()); // Temp register only necessary for minus zero check. LOperand* temp = TempRegister(); @@ -2015,8 +2002,9 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { } else if (to.IsSmi()) { HValue* val = instr->value(); LOperand* value = UseRegister(val); - LInstruction* result = - DefineSameAsFirst(new(zone()) LInteger32ToSmi(value)); + LInstruction* result = val->CheckFlag(HInstruction::kUint32) + ? DefineSameAsFirst(new(zone()) LUint32ToSmi(value)) + : DefineSameAsFirst(new(zone()) LInteger32ToSmi(value)); if (val->HasRange() && val->range()->IsInSmiRange()) { return result; } @@ -2050,12 +2038,6 @@ LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) { } -LInstruction* LChunkBuilder::DoIsNumberAndBranch(HIsNumberAndBranch* instr) { - return new(zone()) - LIsNumberAndBranch(UseRegisterOrConstantAtStart(instr->value())); -} - - LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) { LOperand* value = UseRegisterAtStart(instr->value()); LOperand* temp = TempRegister(); @@ -2234,6 +2216,11 @@ LInstruction* LChunkBuilder::DoLoadFunctionPrototype( } +LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) { + return DefineAsRegister(new(zone()) LLoadRoot); +} + + LInstruction* LChunkBuilder::DoLoadExternalArrayPointer( HLoadExternalArrayPointer* instr) { LOperand* input = UseRegisterAtStart(instr->value()); @@ -2435,7 +2422,11 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) { !(FLAG_track_double_fields && instr->field_representation().IsDouble()); LOperand* val; - if (needs_write_barrier) { + if (instr->field_representation().IsByte()) { + // mov_b requires a byte register (i.e. any of eax, ebx, ecx, edx). + // Just force the value to be in eax and we're safe here. + val = UseFixed(instr->value(), eax); + } else if (needs_write_barrier) { val = UseTempRegister(instr->value()); } else if (can_be_constant) { val = UseRegisterOrConstant(instr->value()); @@ -2582,7 +2573,6 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) { LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) { LOperand* context = UseFixed(instr->context(), esi); - argument_count_ -= instr->argument_count(); LCallStub* result = new(zone()) LCallStub(context); return MarkAsCall(DefineFixed(result, eax), instr); } @@ -2711,7 +2701,7 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) { if (env->entry()->arguments_pushed()) { int argument_count = env->arguments_environment()->parameter_count(); pop = new(zone()) LDrop(argument_count); - argument_count_ -= argument_count; + ASSERT(instr->argument_delta() == -argument_count); } HEnvironment* outer = current_block_->last_environment()-> diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h index 3a609c991a..752fdd4f6a 100644 --- a/deps/v8/src/ia32/lithium-ia32.h +++ b/deps/v8/src/ia32/lithium-ia32.h @@ -107,7 +107,6 @@ class LCodeGen; V(InnerAllocatedObject) \ V(InstanceOf) \ V(InstanceOfKnownGlobal) \ - V(InstanceSize) \ V(InstructionGap) \ V(Integer32ToDouble) \ V(Integer32ToSmi) \ @@ -116,7 +115,6 @@ class LCodeGen; V(IsObjectAndBranch) \ V(IsStringAndBranch) \ V(IsSmiAndBranch) \ - V(IsNumberAndBranch) \ V(IsUndetectableAndBranch) \ V(Label) \ V(LazyBailout) \ @@ -130,6 +128,7 @@ class LCodeGen; V(LoadKeyedGeneric) \ V(LoadNamedField) \ V(LoadNamedGeneric) \ + V(LoadRoot) \ V(MapEnumLength) \ V(MathAbs) \ V(MathCos) \ @@ -184,6 +183,7 @@ class LCodeGen; V(Typeof) \ V(TypeofIsAndBranch) \ V(Uint32ToDouble) \ + V(Uint32ToSmi) \ V(UnknownOSRValue) \ V(ValueOf) \ V(WrapReceiver) @@ -215,7 +215,6 @@ class LInstruction : public ZoneObject { : environment_(NULL), hydrogen_value_(NULL), bit_field_(IsCallBits::encode(false)) { - set_position(RelocInfo::kNoPosition); } virtual ~LInstruction() {} @@ -256,15 +255,6 @@ class LInstruction : public ZoneObject { LPointerMap* pointer_map() const { return pointer_map_.get(); } bool HasPointerMap() const { return pointer_map_.is_set(); } - // The 31 bits PositionBits is used to store the int position value. And the - // position value may be RelocInfo::kNoPosition (-1). The accessor always - // +1/-1 so that the encoded value of position in bit_field_ is always >= 0 - // and can fit into the 31 bits PositionBits. - void set_position(int pos) { - bit_field_ = PositionBits::update(bit_field_, pos + 1); - } - int position() { return PositionBits::decode(bit_field_) - 1; } - void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; } HValue* hydrogen_value() const { return hydrogen_value_; } @@ -310,7 +300,6 @@ class LInstruction : public ZoneObject { virtual LOperand* TempAt(int i) = 0; class IsCallBits: public BitField<bool, 0, 1> {}; - class PositionBits: public BitField<int, 1, 31> {}; LEnvironment* environment_; SetOncePointer<LPointerMap> pointer_map_; @@ -922,19 +911,6 @@ class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> { }; -class LIsNumberAndBranch V8_FINAL : public LControlInstruction<1, 0> { - public: - explicit LIsNumberAndBranch(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(IsNumberAndBranch, "is-number-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsNumberAndBranch) -}; - - class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> { public: LIsStringAndBranch(LOperand* value, LOperand* temp) { @@ -1144,19 +1120,6 @@ class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> { }; -class LInstanceSize V8_FINAL : public LTemplateInstruction<1, 1, 0> { - public: - explicit LInstanceSize(LOperand* object) { - inputs_[0] = object; - } - - LOperand* object() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(InstanceSize, "instance-size") - DECLARE_HYDROGEN_ACCESSOR(InstanceSize) -}; - - class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> { public: LBoundsCheck(LOperand* index, LOperand* length) { @@ -1309,7 +1272,7 @@ class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 0> { DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch") DECLARE_HYDROGEN_ACCESSOR(CompareMap) - Handle<Map> map() const { return hydrogen()->map(); } + Handle<Map> map() const { return hydrogen()->map().handle(); } }; @@ -1605,6 +1568,15 @@ class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 1> { }; +class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> { + public: + DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root") + DECLARE_HYDROGEN_ACCESSOR(LoadRoot) + + Heap::RootListIndex index() const { return hydrogen()->index(); } +}; + + class LLoadExternalArrayPointer V8_FINAL : public LTemplateInstruction<1, 1, 0> { public: @@ -1634,11 +1606,6 @@ class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> { return hydrogen()->is_external(); } - virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE { - return !CpuFeatures::IsSupported(SSE2) && - !IsDoubleOrFloatElementsKind(hydrogen()->elements_kind()); - } - DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed") DECLARE_HYDROGEN_ACCESSOR(LoadKeyed) @@ -2061,8 +2028,13 @@ class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> { DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime") DECLARE_HYDROGEN_ACCESSOR(CallRuntime) + virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE { + return save_doubles() == kDontSaveFPRegs; + } + const Runtime::Function* function() const { return hydrogen()->function(); } int arity() const { return hydrogen()->argument_count(); } + SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); } }; @@ -2105,6 +2077,19 @@ class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 1> { }; +class LUint32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LUint32ToSmi(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(Uint32ToSmi, "uint32-to-smi") + DECLARE_HYDROGEN_ACCESSOR(Change) +}; + + class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 0> { public: explicit LNumberTagI(LOperand* value) { @@ -2189,7 +2174,7 @@ class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 1> { LOperand* temp() { return temps_[0]; } DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i") - DECLARE_HYDROGEN_ACCESSOR(UnaryOperation) + DECLARE_HYDROGEN_ACCESSOR(Change) bool truncating() { return hydrogen()->CanTruncateToInt32(); } }; @@ -2364,8 +2349,10 @@ class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 2> { virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; - Handle<Map> original_map() { return hydrogen()->original_map(); } - Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); } + Handle<Map> original_map() { return hydrogen()->original_map().handle(); } + Handle<Map> transitioned_map() { + return hydrogen()->transitioned_map().handle(); + } ElementsKind from_kind() { return hydrogen()->from_kind(); } ElementsKind to_kind() { return hydrogen()->to_kind(); } }; @@ -2515,12 +2502,13 @@ class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> { class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> { public: - LClampTToUint8(LOperand* value, LOperand* temp) { + LClampTToUint8(LOperand* value, LOperand* temp_xmm) { inputs_[0] = value; - temps_[0] = temp; + temps_[0] = temp_xmm; } LOperand* unclamped() { return inputs_[0]; } + LOperand* temp_xmm() { return temps_[0]; } DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8") }; @@ -2742,8 +2730,8 @@ class LPlatformChunk V8_FINAL : public LChunk { : LChunk(info, graph), num_double_slots_(0) { } - int GetNextSpillIndex(bool is_double); - LOperand* GetNextSpillSlot(bool is_double); + int GetNextSpillIndex(RegisterKind kind); + LOperand* GetNextSpillSlot(RegisterKind kind); int num_double_slots() const { return num_double_slots_; } @@ -2765,13 +2753,14 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED { next_block_(NULL), argument_count_(0), allocator_(allocator), - position_(RelocInfo::kNoPosition), instruction_pending_deoptimization_environment_(NULL), pending_deoptimization_ast_id_(BailoutId::None()) { } // Build the sequence for the graph. LPlatformChunk* Build(); + LInstruction* CheckElideControlInstruction(HControlInstruction* instr); + // Declare methods that deal with the individual node types. #define DECLARE_DO(type) LInstruction* Do##type(H##type* node); HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) @@ -2907,7 +2896,7 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED { LInstruction* DoArithmeticD(Token::Value op, HArithmeticBinaryOperation* instr); LInstruction* DoArithmeticT(Token::Value op, - HArithmeticBinaryOperation* instr); + HBinaryOperation* instr); LOperand* GetStoreKeyedValueOperand(HStoreKeyed* instr); @@ -2921,7 +2910,6 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED { HBasicBlock* next_block_; int argument_count_; LAllocator* allocator_; - int position_; LInstruction* instruction_pending_deoptimization_environment_; BailoutId pending_deoptimization_ast_id_; diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc index b65d328435..025bd891c2 100644 --- a/deps/v8/src/ia32/macro-assembler-ia32.cc +++ b/deps/v8/src/ia32/macro-assembler-ia32.cc @@ -33,6 +33,7 @@ #include "codegen.h" #include "cpu-profiler.h" #include "debug.h" +#include "isolate-inl.h" #include "runtime.h" #include "serialize.h" @@ -232,7 +233,7 @@ void MacroAssembler::TruncateDoubleToI(Register result_reg, j(not_equal, &done, Label::kNear); sub(esp, Immediate(kDoubleSize)); - movdbl(MemOperand(esp, 0), input_reg); + movsd(MemOperand(esp, 0), input_reg); SlowTruncateToI(result_reg, esp, 0); add(esp, Immediate(kDoubleSize)); bind(&done); @@ -253,8 +254,8 @@ void MacroAssembler::X87TOSToI(Register result_reg, Label::Distance dst) { Label done; sub(esp, Immediate(kPointerSize)); - fist_s(MemOperand(esp, 0)); fld(0); + fist_s(MemOperand(esp, 0)); fild_s(MemOperand(esp, 0)); pop(result_reg); FCmp(); @@ -283,7 +284,7 @@ void MacroAssembler::DoubleToI(Register result_reg, Label::Distance dst) { ASSERT(!input_reg.is(scratch)); cvttsd2si(result_reg, Operand(input_reg)); - cvtsi2sd(scratch, Operand(result_reg)); + Cvtsi2sd(scratch, Operand(result_reg)); ucomisd(scratch, input_reg); j(not_equal, conversion_failed, dst); j(parity_even, conversion_failed, dst); // NaN. @@ -344,7 +345,7 @@ void MacroAssembler::TruncateHeapNumberToI(Register result_reg, } } else if (CpuFeatures::IsSupported(SSE2)) { CpuFeatureScope scope(this, SSE2); - movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); + movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); cvttsd2si(result_reg, Operand(xmm0)); cmp(result_reg, 0x80000000u); j(not_equal, &done, Label::kNear); @@ -361,7 +362,7 @@ void MacroAssembler::TruncateHeapNumberToI(Register result_reg, if (input_reg.is(result_reg)) { // Input is clobbered. Restore number from double scratch. sub(esp, Immediate(kDoubleSize)); - movdbl(MemOperand(esp, 0), xmm0); + movsd(MemOperand(esp, 0), xmm0); SlowTruncateToI(result_reg, esp, 0); add(esp, Immediate(kDoubleSize)); } else { @@ -390,9 +391,9 @@ void MacroAssembler::TaggedToI(Register result_reg, ASSERT(!temp.is(no_xmm_reg)); CpuFeatureScope scope(this, SSE2); - movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); + movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); cvttsd2si(result_reg, Operand(xmm0)); - cvtsi2sd(temp, Operand(result_reg)); + Cvtsi2sd(temp, Operand(result_reg)); ucomisd(xmm0, temp); RecordComment("Deferred TaggedToI: lost precision"); j(not_equal, lost_precision, Label::kNear); @@ -445,25 +446,36 @@ void MacroAssembler::TaggedToI(Register result_reg, } - -static double kUint32Bias = - static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1; - - void MacroAssembler::LoadUint32(XMMRegister dst, Register src, XMMRegister scratch) { Label done; cmp(src, Immediate(0)); - movdbl(scratch, - Operand(reinterpret_cast<int32_t>(&kUint32Bias), RelocInfo::NONE32)); - cvtsi2sd(dst, src); + ExternalReference uint32_bias = + ExternalReference::address_of_uint32_bias(); + movsd(scratch, Operand::StaticVariable(uint32_bias)); + Cvtsi2sd(dst, src); j(not_sign, &done, Label::kNear); addsd(dst, scratch); bind(&done); } +void MacroAssembler::LoadUint32NoSSE2(Register src) { + Label done; + push(src); + fild_s(Operand(esp, 0)); + cmp(src, Immediate(0)); + j(not_sign, &done, Label::kNear); + ExternalReference uint32_bias = + ExternalReference::address_of_uint32_bias(); + fld_d(Operand::StaticVariable(uint32_bias)); + faddp(1); + bind(&done); + add(esp, Immediate(kPointerSize)); +} + + void MacroAssembler::RecordWriteArray(Register object, Register value, Register index, @@ -676,6 +688,12 @@ void MacroAssembler::DebugBreak() { #endif +void MacroAssembler::Cvtsi2sd(XMMRegister dst, const Operand& src) { + xorps(dst, dst); + cvtsi2sd(dst, src); +} + + void MacroAssembler::Set(Register dst, const Immediate& x) { if (x.is_zero()) { xor_(dst, dst); // Shorter than mov. @@ -799,9 +817,9 @@ void MacroAssembler::StoreNumberToDoubleElements( ExternalReference::address_of_canonical_non_hole_nan(); if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) { CpuFeatureScope use_sse2(this, SSE2); - movdbl(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset)); + movsd(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset)); bind(&have_double_value); - movdbl(FieldOperand(elements, key, times_4, + movsd(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize - elements_offset), scratch2); } else { @@ -821,7 +839,7 @@ void MacroAssembler::StoreNumberToDoubleElements( bind(&is_nan); if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) { CpuFeatureScope use_sse2(this, SSE2); - movdbl(scratch2, Operand::StaticVariable(canonical_nan_reference)); + movsd(scratch2, Operand::StaticVariable(canonical_nan_reference)); } else { fld_d(Operand::StaticVariable(canonical_nan_reference)); } @@ -834,8 +852,8 @@ void MacroAssembler::StoreNumberToDoubleElements( SmiUntag(scratch1); if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) { CpuFeatureScope fscope(this, SSE2); - cvtsi2sd(scratch2, scratch1); - movdbl(FieldOperand(elements, key, times_4, + Cvtsi2sd(scratch2, scratch1); + movsd(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize - elements_offset), scratch2); } else { @@ -996,6 +1014,30 @@ void MacroAssembler::AssertNotSmi(Register object) { } +void MacroAssembler::Prologue(PrologueFrameMode frame_mode) { + if (frame_mode == BUILD_STUB_FRAME) { + push(ebp); // Caller's frame pointer. + mov(ebp, esp); + push(esi); // Callee's context. + push(Immediate(Smi::FromInt(StackFrame::STUB))); + } else { + PredictableCodeSizeScope predictible_code_size_scope(this, + kNoCodeAgeSequenceLength); + if (isolate()->IsCodePreAgingActive()) { + // Pre-age the code. + call(isolate()->builtins()->MarkCodeAsExecutedOnce(), + RelocInfo::CODE_AGE_SEQUENCE); + Nop(kNoCodeAgeSequenceLength - Assembler::kCallInstructionLength); + } else { + push(ebp); // Caller's frame pointer. + mov(ebp, esp); + push(esi); // Callee's context. + push(edi); // Callee's JS function. + } + } +} + + void MacroAssembler::EnterFrame(StackFrame::Type type) { push(ebp); mov(ebp, esp); @@ -1051,7 +1093,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) { const int offset = -2 * kPointerSize; for (int i = 0; i < XMMRegister::kNumRegisters; i++) { XMMRegister reg = XMMRegister::from_code(i); - movdbl(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg); + movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg); } } else { sub(esp, Immediate(argc * kPointerSize)); @@ -1095,7 +1137,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) { const int offset = -2 * kPointerSize; for (int i = 0; i < XMMRegister::kNumRegisters; i++) { XMMRegister reg = XMMRegister::from_code(i); - movdbl(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize))); + movsd(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize))); } } @@ -1109,14 +1151,16 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) { // Push the return address to get ready to return. push(ecx); - LeaveExitFrameEpilogue(); + LeaveExitFrameEpilogue(true); } -void MacroAssembler::LeaveExitFrameEpilogue() { +void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) { // Restore current context from top and clear it in debug mode. ExternalReference context_address(Isolate::kContextAddress, isolate()); - mov(esi, Operand::StaticVariable(context_address)); + if (restore_context) { + mov(esi, Operand::StaticVariable(context_address)); + } #ifdef DEBUG mov(Operand::StaticVariable(context_address), Immediate(0)); #endif @@ -1128,11 +1172,11 @@ void MacroAssembler::LeaveExitFrameEpilogue() { } -void MacroAssembler::LeaveApiExitFrame() { +void MacroAssembler::LeaveApiExitFrame(bool restore_context) { mov(esp, ebp); pop(ebp); - LeaveExitFrameEpilogue(); + LeaveExitFrameEpilogue(restore_context); } @@ -2141,23 +2185,9 @@ void MacroAssembler::IndexFromHash(Register hash, Register index) { } -void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) { - CallRuntime(Runtime::FunctionForId(id), num_arguments); -} - - -void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) { - const Runtime::Function* function = Runtime::FunctionForId(id); - Set(eax, Immediate(function->nargs)); - mov(ebx, Immediate(ExternalReference(function, isolate()))); - CEntryStub ces(1, CpuFeatures::IsSupported(SSE2) ? kSaveFPRegs - : kDontSaveFPRegs); - CallStub(&ces); -} - - void MacroAssembler::CallRuntime(const Runtime::Function* f, - int num_arguments) { + int num_arguments, + SaveFPRegsMode save_doubles) { // If the expected number of arguments of the runtime function is // constant, we check that the actual number of arguments match the // expectation. @@ -2172,7 +2202,8 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, // smarter. Set(eax, Immediate(num_arguments)); mov(ebx, Immediate(ExternalReference(f, isolate()))); - CEntryStub ces(1); + CEntryStub ces(1, CpuFeatures::IsSupported(SSE2) ? save_doubles + : kDontSaveFPRegs); CallStub(&ces); } @@ -2221,11 +2252,13 @@ void MacroAssembler::PrepareCallApiFunction(int argc) { } -void MacroAssembler::CallApiFunctionAndReturn(Address function_address, - Address thunk_address, - Operand thunk_last_arg, - int stack_space, - int return_value_offset) { +void MacroAssembler::CallApiFunctionAndReturn( + Address function_address, + Address thunk_address, + Operand thunk_last_arg, + int stack_space, + Operand return_value_operand, + Operand* context_restore_operand) { ExternalReference next_address = ExternalReference::handle_scope_next_address(isolate()); ExternalReference limit_address = @@ -2281,9 +2314,10 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address, Label prologue; // Load the value from ReturnValue - mov(eax, Operand(ebp, return_value_offset * kPointerSize)); + mov(eax, return_value_operand); Label promote_scheduled_exception; + Label exception_handled; Label delete_allocated_handles; Label leave_exit_frame; @@ -2303,6 +2337,7 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address, cmp(Operand::StaticVariable(scheduled_exception_address), Immediate(isolate()->factory()->the_hole_value())); j(not_equal, &promote_scheduled_exception); + bind(&exception_handled); #if ENABLE_EXTRA_CHECKS // Check if the function returned a valid JavaScript value. @@ -2339,11 +2374,19 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address, bind(&ok); #endif - LeaveApiExitFrame(); + bool restore_context = context_restore_operand != NULL; + if (restore_context) { + mov(esi, *context_restore_operand); + } + LeaveApiExitFrame(!restore_context); ret(stack_space * kPointerSize); bind(&promote_scheduled_exception); - TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1); + { + FrameScope frame(this, StackFrame::INTERNAL); + CallRuntime(Runtime::kPromoteScheduledException, 0); + } + jmp(&exception_handled); // HandleScope limit has changed. Delete allocated extensions. ExternalReference delete_extensions = @@ -3003,6 +3046,88 @@ void MacroAssembler::LoadPowerOf2(XMMRegister dst, } +void MacroAssembler::LookupNumberStringCache(Register object, + Register result, + Register scratch1, + Register scratch2, + Label* not_found) { + // Use of registers. Register result is used as a temporary. + Register number_string_cache = result; + Register mask = scratch1; + Register scratch = scratch2; + + // Load the number string cache. + LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); + // Make the hash mask from the length of the number string cache. It + // contains two elements (number and string) for each cache entry. + mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset)); + shr(mask, kSmiTagSize + 1); // Untag length and divide it by two. + sub(mask, Immediate(1)); // Make mask. + + // Calculate the entry in the number string cache. The hash value in the + // number string cache for smis is just the smi value, and the hash for + // doubles is the xor of the upper and lower words. See + // Heap::GetNumberStringCache. + Label smi_hash_calculated; + Label load_result_from_cache; + Label not_smi; + STATIC_ASSERT(kSmiTag == 0); + JumpIfNotSmi(object, ¬_smi, Label::kNear); + mov(scratch, object); + SmiUntag(scratch); + jmp(&smi_hash_calculated, Label::kNear); + bind(¬_smi); + cmp(FieldOperand(object, HeapObject::kMapOffset), + isolate()->factory()->heap_number_map()); + j(not_equal, not_found); + STATIC_ASSERT(8 == kDoubleSize); + mov(scratch, FieldOperand(object, HeapNumber::kValueOffset)); + xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4)); + // Object is heap number and hash is now in scratch. Calculate cache index. + and_(scratch, mask); + Register index = scratch; + Register probe = mask; + mov(probe, + FieldOperand(number_string_cache, + index, + times_twice_pointer_size, + FixedArray::kHeaderSize)); + JumpIfSmi(probe, not_found); + if (CpuFeatures::IsSupported(SSE2)) { + CpuFeatureScope fscope(this, SSE2); + movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset)); + ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset)); + } else { + fld_d(FieldOperand(object, HeapNumber::kValueOffset)); + fld_d(FieldOperand(probe, HeapNumber::kValueOffset)); + FCmp(); + } + j(parity_even, not_found); // Bail out if NaN is involved. + j(not_equal, not_found); // The cache did not contain this value. + jmp(&load_result_from_cache, Label::kNear); + + bind(&smi_hash_calculated); + // Object is smi and hash is now in scratch. Calculate cache index. + and_(scratch, mask); + // Check if the entry is the smi we are looking for. + cmp(object, + FieldOperand(number_string_cache, + index, + times_twice_pointer_size, + FixedArray::kHeaderSize)); + j(not_equal, not_found); + + // Get the result from the cache. + bind(&load_result_from_cache); + mov(result, + FieldOperand(number_string_cache, + index, + times_twice_pointer_size, + FixedArray::kHeaderSize + kPointerSize)); + IncrementCounter(isolate()->counters()->number_to_string_native(), 1); +} + + void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii( Register instance_type, Register scratch, @@ -3408,9 +3533,8 @@ void MacroAssembler::CheckEnumCache(Label* call_runtime) { void MacroAssembler::TestJSArrayForAllocationMemento( Register receiver_reg, - Register scratch_reg) { - Label no_memento_available; - + Register scratch_reg, + Label* no_memento_found) { ExternalReference new_space_start = ExternalReference::new_space_start(isolate()); ExternalReference new_space_allocation_top = @@ -3419,12 +3543,11 @@ void MacroAssembler::TestJSArrayForAllocationMemento( lea(scratch_reg, Operand(receiver_reg, JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag)); cmp(scratch_reg, Immediate(new_space_start)); - j(less, &no_memento_available); + j(less, no_memento_found); cmp(scratch_reg, Operand::StaticVariable(new_space_allocation_top)); - j(greater, &no_memento_available); + j(greater, no_memento_found); cmp(MemOperand(scratch_reg, -AllocationMemento::kSize), - Immediate(Handle<Map>(isolate()->heap()->allocation_memento_map()))); - bind(&no_memento_available); + Immediate(isolate()->factory()->allocation_memento_map())); } diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h index e4e4533bf5..30f8a8dfbb 100644 --- a/deps/v8/src/ia32/macro-assembler-ia32.h +++ b/deps/v8/src/ia32/macro-assembler-ia32.h @@ -225,6 +225,9 @@ class MacroAssembler: public Assembler { void DebugBreak(); #endif + // Generates function and stub prologue code. + void Prologue(PrologueFrameMode frame_mode); + // Enter specific kind of exit frame. Expects the number of // arguments in register eax and sets up the number of arguments in // register edi and the pointer to the first argument in register @@ -240,7 +243,7 @@ class MacroAssembler: public Assembler { // Leave the current exit frame. Expects the return value in // register eax (untouched). - void LeaveApiExitFrame(); + void LeaveApiExitFrame(bool restore_context); // Find the function context up the context chain. void LoadContext(Register dst, int context_chain_length); @@ -366,6 +369,12 @@ class MacroAssembler: public Assembler { void Set(Register dst, const Immediate& x); void Set(const Operand& dst, const Immediate& x); + // cvtsi2sd instruction only writes to the low 64-bit of dst register, which + // hinders register renaming and makes dependence chains longer. So we use + // xorps to clear the dst register before cvtsi2sd to solve this issue. + void Cvtsi2sd(XMMRegister dst, Register src) { Cvtsi2sd(dst, Operand(src)); } + void Cvtsi2sd(XMMRegister dst, const Operand& src); + // Support for constant splitting. bool IsUnsafeImmediate(const Immediate& x); void SafeSet(Register dst, const Immediate& x); @@ -509,6 +518,7 @@ class MacroAssembler: public Assembler { } void LoadUint32(XMMRegister dst, Register src, XMMRegister scratch); + void LoadUint32NoSSE2(Register src); // Jump the register contains a smi. inline void JumpIfSmi(Register value, @@ -754,11 +764,18 @@ class MacroAssembler: public Assembler { void StubReturn(int argc); // Call a runtime routine. - void CallRuntime(const Runtime::Function* f, int num_arguments); - void CallRuntimeSaveDoubles(Runtime::FunctionId id); + void CallRuntime(const Runtime::Function* f, + int num_arguments, + SaveFPRegsMode save_doubles = kDontSaveFPRegs); + void CallRuntimeSaveDoubles(Runtime::FunctionId id) { + const Runtime::Function* function = Runtime::FunctionForId(id); + CallRuntime(function, function->nargs, kSaveFPRegs); + } // Convenience function: Same as above, but takes the fid instead. - void CallRuntime(Runtime::FunctionId id, int num_arguments); + void CallRuntime(Runtime::FunctionId id, int num_arguments) { + CallRuntime(Runtime::FunctionForId(id), num_arguments); + } // Convenience function: call an external reference. void CallExternalReference(ExternalReference ref, int num_arguments); @@ -807,7 +824,8 @@ class MacroAssembler: public Assembler { Address thunk_address, Operand thunk_last_arg, int stack_space, - int return_value_offset_from_ebp); + Operand return_value_operand, + Operand* context_restore_operand); // Jump to a runtime routine. void JumpToExternalReference(const ExternalReference& ext); @@ -890,6 +908,17 @@ class MacroAssembler: public Assembler { // --------------------------------------------------------------------------- // String utilities. + // Generate code to do a lookup in the number string cache. If the number in + // the register object is found in the cache the generated code falls through + // with the result in the result register. The object and the result register + // can be the same. If the number is not found in the cache the code jumps to + // the label not_found with only the content of register object unchanged. + void LookupNumberStringCache(Register object, + Register result, + Register scratch1, + Register scratch2, + Label* not_found); + // Check whether the instance type represents a flat ASCII string. Jump to the // label if not. If the instance type can be scratched specify same register // for both instance type and scratch. @@ -931,9 +960,20 @@ class MacroAssembler: public Assembler { // to another type. // On entry, receiver_reg should point to the array object. // scratch_reg gets clobbered. - // If allocation info is present, conditional code is set to equal + // If allocation info is present, conditional code is set to equal. void TestJSArrayForAllocationMemento(Register receiver_reg, - Register scratch_reg); + Register scratch_reg, + Label* no_memento_found); + + void JumpIfJSArrayHasAllocationMemento(Register receiver_reg, + Register scratch_reg, + Label* memento_found) { + Label no_memento_found; + TestJSArrayForAllocationMemento(receiver_reg, scratch_reg, + &no_memento_found); + j(equal, memento_found); + bind(&no_memento_found); + } private: bool generating_stub_; @@ -957,7 +997,7 @@ class MacroAssembler: public Assembler { void EnterExitFramePrologue(); void EnterExitFrameEpilogue(int argc, bool save_doubles); - void LeaveExitFrameEpilogue(); + void LeaveExitFrameEpilogue(bool restore_context); // Allocation support helpers. void LoadAllocationTopHelper(Register result, diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc index 354c2fdcb0..9786cffe86 100644 --- a/deps/v8/src/ia32/stub-cache-ia32.cc +++ b/deps/v8/src/ia32/stub-cache-ia32.cc @@ -329,32 +329,28 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm, Register receiver, Register scratch1, Register scratch2, - Label* miss, - bool support_wrappers) { + Label* miss) { Label check_wrapper; // Check if the object is a string leaving the instance type in the // scratch register. - GenerateStringCheck(masm, receiver, scratch1, miss, - support_wrappers ? &check_wrapper : miss); + GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper); // Load length from the string and convert to a smi. __ mov(eax, FieldOperand(receiver, String::kLengthOffset)); __ ret(0); - if (support_wrappers) { - // Check if the object is a JSValue wrapper. - __ bind(&check_wrapper); - __ cmp(scratch1, JS_VALUE_TYPE); - __ j(not_equal, miss); + // Check if the object is a JSValue wrapper. + __ bind(&check_wrapper); + __ cmp(scratch1, JS_VALUE_TYPE); + __ j(not_equal, miss); - // Check if the wrapped value is a string and load the length - // directly if it is. - __ mov(scratch2, FieldOperand(receiver, JSValue::kValueOffset)); - GenerateStringCheck(masm, scratch2, scratch1, miss, miss); - __ mov(eax, FieldOperand(scratch2, String::kLengthOffset)); - __ ret(0); - } + // Check if the wrapped value is a string and load the length + // directly if it is. + __ mov(scratch2, FieldOperand(receiver, JSValue::kValueOffset)); + GenerateStringCheck(masm, scratch2, scratch1, miss, miss); + __ mov(eax, FieldOperand(scratch2, String::kLengthOffset)); + __ ret(0); } @@ -462,50 +458,50 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm, Register scratch) { // Generates call to API function. static void GenerateFastApiCall(MacroAssembler* masm, const CallOptimization& optimization, - int argc) { + int argc, + bool restore_context) { // ----------- S t a t e ------------- // -- esp[0] : return address - // -- esp[4] : object passing the type check - // (last fast api call extra argument, - // set by CheckPrototypes) - // -- esp[8] : api function - // (first fast api call extra argument) - // -- esp[12] : api call data - // -- esp[16] : isolate - // -- esp[20] : ReturnValue default value - // -- esp[24] : ReturnValue - // -- esp[28] : last argument + // -- esp[4] - esp[28] : FunctionCallbackInfo, incl. + // : object passing the type check + // (set by CheckPrototypes) + // -- esp[32] : last argument // -- ... - // -- esp[(argc + 6) * 4] : first argument - // -- esp[(argc + 7) * 4] : receiver + // -- esp[(argc + 7) * 4] : first argument + // -- esp[(argc + 8) * 4] : receiver // ----------------------------------- + + typedef FunctionCallbackArguments FCA; + // Save calling context. + __ mov(Operand(esp, (1 + FCA::kContextSaveIndex) * kPointerSize), esi); + // Get the function and setup the context. Handle<JSFunction> function = optimization.constant_function(); __ LoadHeapObject(edi, function); __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); - // Pass the additional arguments. - __ mov(Operand(esp, 2 * kPointerSize), edi); + // Construct the FunctionCallbackInfo. + __ mov(Operand(esp, (1 + FCA::kCalleeIndex) * kPointerSize), edi); Handle<CallHandlerInfo> api_call_info = optimization.api_call_info(); Handle<Object> call_data(api_call_info->data(), masm->isolate()); if (masm->isolate()->heap()->InNewSpace(*call_data)) { __ mov(ecx, api_call_info); __ mov(ebx, FieldOperand(ecx, CallHandlerInfo::kDataOffset)); - __ mov(Operand(esp, 3 * kPointerSize), ebx); + __ mov(Operand(esp, (1 + FCA::kDataIndex) * kPointerSize), ebx); } else { - __ mov(Operand(esp, 3 * kPointerSize), Immediate(call_data)); + __ mov(Operand(esp, (1 + FCA::kDataIndex) * kPointerSize), + Immediate(call_data)); } - __ mov(Operand(esp, 4 * kPointerSize), + __ mov(Operand(esp, (1 + FCA::kIsolateIndex) * kPointerSize), Immediate(reinterpret_cast<int>(masm->isolate()))); - __ mov(Operand(esp, 5 * kPointerSize), + __ mov(Operand(esp, (1 + FCA::kReturnValueOffset) * kPointerSize), masm->isolate()->factory()->undefined_value()); - __ mov(Operand(esp, 6 * kPointerSize), + __ mov(Operand(esp, (1 + FCA::kReturnValueDefaultValueIndex) * kPointerSize), masm->isolate()->factory()->undefined_value()); // Prepare arguments. - STATIC_ASSERT(kFastApiCallArguments == 6); - __ lea(eax, Operand(esp, kFastApiCallArguments * kPointerSize)); - + STATIC_ASSERT(kFastApiCallArguments == 7); + __ lea(eax, Operand(esp, 1 * kPointerSize)); // API function gets reference to the v8::Arguments. If CPU profiler // is enabled wrapper function will be called and we need to pass @@ -521,14 +517,14 @@ static void GenerateFastApiCall(MacroAssembler* masm, Address function_address = v8::ToCData<Address>(api_call_info->callback()); __ PrepareCallApiFunction(kApiArgc + kApiStackSpace); - // v8::Arguments::implicit_args_. + // FunctionCallbackInfo::implicit_args_. __ mov(ApiParameterOperand(2), eax); - __ add(eax, Immediate(argc * kPointerSize)); - // v8::Arguments::values_. + __ add(eax, Immediate((argc + kFastApiCallArguments - 1) * kPointerSize)); + // FunctionCallbackInfo::values_. __ mov(ApiParameterOperand(3), eax); - // v8::Arguments::length_. + // FunctionCallbackInfo::length_. __ Set(ApiParameterOperand(4), Immediate(argc)); - // v8::Arguments::is_construct_call_. + // FunctionCallbackInfo::is_construct_call_. __ Set(ApiParameterOperand(5), Immediate(0)); // v8::InvocationCallback's argument. @@ -537,11 +533,17 @@ static void GenerateFastApiCall(MacroAssembler* masm, Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback); + Operand context_restore_operand(ebp, + (2 + FCA::kContextSaveIndex) * kPointerSize); + Operand return_value_operand(ebp, + (2 + FCA::kReturnValueOffset) * kPointerSize); __ CallApiFunctionAndReturn(function_address, thunk_address, ApiParameterOperand(1), argc + kFastApiCallArguments + 1, - kFastApiCallArguments + 1); + return_value_operand, + restore_context ? + &context_restore_operand : NULL); } @@ -556,6 +558,7 @@ static void GenerateFastApiCall(MacroAssembler* masm, ASSERT(!receiver.is(scratch)); const int stack_space = kFastApiCallArguments + argc + 1; + const int kHolderIndex = FunctionCallbackArguments::kHolderIndex + 1; // Copy return value. __ mov(scratch, Operand(esp, 0)); // Assign stack space for the call arguments. @@ -563,7 +566,7 @@ static void GenerateFastApiCall(MacroAssembler* masm, // Move the return address on top of the stack. __ mov(Operand(esp, 0), scratch); // Write holder to stack frame. - __ mov(Operand(esp, 1 * kPointerSize), receiver); + __ mov(Operand(esp, kHolderIndex * kPointerSize), receiver); // Write receiver to stack frame. int index = stack_space; __ mov(Operand(esp, index-- * kPointerSize), receiver); @@ -574,7 +577,7 @@ static void GenerateFastApiCall(MacroAssembler* masm, __ mov(Operand(esp, index-- * kPointerSize), values[i]); } - GenerateFastApiCall(masm, optimization, argc); + GenerateFastApiCall(masm, optimization, argc, true); } @@ -688,7 +691,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { // Invoke function. if (can_do_fast_api_call) { - GenerateFastApiCall(masm, optimization, arguments_.immediate()); + GenerateFastApiCall(masm, optimization, arguments_.immediate(), false); } else { CallKind call_kind = CallICBase::Contextual::decode(extra_state_) ? CALL_AS_FUNCTION @@ -776,9 +779,9 @@ class CallInterceptorCompiler BASE_EMBEDDED { }; -void BaseStoreStubCompiler::GenerateRestoreName(MacroAssembler* masm, - Label* label, - Handle<Name> name) { +void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm, + Label* label, + Handle<Name> name) { if (!label->is_unused()) { __ bind(label); __ mov(this->name(), Immediate(name)); @@ -809,7 +812,7 @@ static void GenerateCheckPropertyCell(MacroAssembler* masm, } -void BaseStoreStubCompiler::GenerateNegativeHolderLookup( +void StoreStubCompiler::GenerateNegativeHolderLookup( MacroAssembler* masm, Handle<JSObject> holder, Register holder_reg, @@ -827,19 +830,19 @@ void BaseStoreStubCompiler::GenerateNegativeHolderLookup( // Receiver_reg is preserved on jumps to miss_label, but may be destroyed if // store is successful. -void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm, - Handle<JSObject> object, - LookupResult* lookup, - Handle<Map> transition, - Handle<Name> name, - Register receiver_reg, - Register storage_reg, - Register value_reg, - Register scratch1, - Register scratch2, - Register unused, - Label* miss_label, - Label* slow) { +void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm, + Handle<JSObject> object, + LookupResult* lookup, + Handle<Map> transition, + Handle<Name> name, + Register receiver_reg, + Register storage_reg, + Register value_reg, + Register scratch1, + Register scratch2, + Register unused, + Label* miss_label, + Label* slow) { int descriptor = transition->LastAdded(); DescriptorArray* descriptors = transition->instance_descriptors(); PropertyDetails details = descriptors->GetDetails(descriptor); @@ -862,7 +865,7 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm, __ SmiUntag(value_reg); if (CpuFeatures::IsSupported(SSE2)) { CpuFeatureScope use_sse2(masm, SSE2); - __ cvtsi2sd(xmm0, value_reg); + __ Cvtsi2sd(xmm0, value_reg); } else { __ push(value_reg); __ fild_s(Operand(esp, 0)); @@ -876,7 +879,7 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm, miss_label, DONT_DO_SMI_CHECK); if (CpuFeatures::IsSupported(SSE2)) { CpuFeatureScope use_sse2(masm, SSE2); - __ movdbl(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset)); + __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset)); } else { __ fld_d(FieldOperand(value_reg, HeapNumber::kValueOffset)); } @@ -884,7 +887,7 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm, __ bind(&do_store); if (CpuFeatures::IsSupported(SSE2)) { CpuFeatureScope use_sse2(masm, SSE2); - __ movdbl(FieldOperand(storage_reg, HeapNumber::kValueOffset), xmm0); + __ movsd(FieldOperand(storage_reg, HeapNumber::kValueOffset), xmm0); } else { __ fstp_d(FieldOperand(storage_reg, HeapNumber::kValueOffset)); } @@ -998,15 +1001,15 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm, // Both name_reg and receiver_reg are preserved on jumps to miss_label, // but may be destroyed if store is successful. -void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm, - Handle<JSObject> object, - LookupResult* lookup, - Register receiver_reg, - Register name_reg, - Register value_reg, - Register scratch1, - Register scratch2, - Label* miss_label) { +void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm, + Handle<JSObject> object, + LookupResult* lookup, + Register receiver_reg, + Register name_reg, + Register value_reg, + Register scratch1, + Register scratch2, + Label* miss_label) { // Stub never generated for non-global objects that require access // checks. ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); @@ -1041,7 +1044,7 @@ void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm, __ SmiUntag(value_reg); if (CpuFeatures::IsSupported(SSE2)) { CpuFeatureScope use_sse2(masm, SSE2); - __ cvtsi2sd(xmm0, value_reg); + __ Cvtsi2sd(xmm0, value_reg); } else { __ push(value_reg); __ fild_s(Operand(esp, 0)); @@ -1054,14 +1057,14 @@ void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm, miss_label, DONT_DO_SMI_CHECK); if (CpuFeatures::IsSupported(SSE2)) { CpuFeatureScope use_sse2(masm, SSE2); - __ movdbl(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset)); + __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset)); } else { __ fld_d(FieldOperand(value_reg, HeapNumber::kValueOffset)); } __ bind(&do_store); if (CpuFeatures::IsSupported(SSE2)) { CpuFeatureScope use_sse2(masm, SSE2); - __ movdbl(FieldOperand(scratch1, HeapNumber::kValueOffset), xmm0); + __ movsd(FieldOperand(scratch1, HeapNumber::kValueOffset), xmm0); } else { __ fstp_d(FieldOperand(scratch1, HeapNumber::kValueOffset)); } @@ -1160,6 +1163,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object, int save_at_depth, Label* miss, PrototypeCheckType check) { + const int kHolderIndex = FunctionCallbackArguments::kHolderIndex + 1; // Make sure that the type feedback oracle harvests the receiver map. // TODO(svenpanne) Remove this hack when all ICs are reworked. __ mov(scratch1, Handle<Map>(object->map())); @@ -1176,7 +1180,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object, int depth = 0; if (save_at_depth == depth) { - __ mov(Operand(esp, kPointerSize), reg); + __ mov(Operand(esp, kHolderIndex * kPointerSize), reg); } // Traverse the prototype chain and check the maps in the prototype chain for @@ -1237,7 +1241,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object, } if (save_at_depth == depth) { - __ mov(Operand(esp, kPointerSize), reg); + __ mov(Operand(esp, kHolderIndex * kPointerSize), reg); } // Go to the next object in the prototype chain. @@ -1269,9 +1273,9 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object, } -void BaseLoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, - Label* success, - Label* miss) { +void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, + Label* success, + Label* miss) { if (!miss->is_unused()) { __ jmp(success); __ bind(miss); @@ -1280,9 +1284,9 @@ void BaseLoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, } -void BaseStoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, - Label* success, - Label* miss) { +void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, + Label* success, + Label* miss) { if (!miss->is_unused()) { __ jmp(success); GenerateRestoreName(masm(), miss, name); @@ -1291,7 +1295,7 @@ void BaseStoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, } -Register BaseLoadStubCompiler::CallbackHandlerFrontend( +Register LoadStubCompiler::CallbackHandlerFrontend( Handle<JSObject> object, Register object_reg, Handle<JSObject> holder, @@ -1351,7 +1355,7 @@ Register BaseLoadStubCompiler::CallbackHandlerFrontend( } -void BaseLoadStubCompiler::NonexistentHandlerFrontend( +void LoadStubCompiler::NonexistentHandlerFrontend( Handle<JSObject> object, Handle<JSObject> last, Handle<Name> name, @@ -1371,10 +1375,10 @@ void BaseLoadStubCompiler::NonexistentHandlerFrontend( } -void BaseLoadStubCompiler::GenerateLoadField(Register reg, - Handle<JSObject> holder, - PropertyIndex field, - Representation representation) { +void LoadStubCompiler::GenerateLoadField(Register reg, + Handle<JSObject> holder, + PropertyIndex field, + Representation representation) { if (!reg.is(receiver())) __ mov(receiver(), reg); if (kind() == Code::LOAD_IC) { LoadFieldStub stub(field.is_inobject(holder), @@ -1390,34 +1394,32 @@ void BaseLoadStubCompiler::GenerateLoadField(Register reg, } -void BaseLoadStubCompiler::GenerateLoadCallback( +void LoadStubCompiler::GenerateLoadCallback( const CallOptimization& call_optimization) { GenerateFastApiCall( masm(), call_optimization, receiver(), scratch3(), 0, NULL); } -void BaseLoadStubCompiler::GenerateLoadCallback( +void LoadStubCompiler::GenerateLoadCallback( Register reg, Handle<ExecutableAccessorInfo> callback) { // Insert additional parameters into the stack frame above return address. ASSERT(!scratch3().is(reg)); __ pop(scratch3()); // Get return address to place it below. - STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 0); - STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == -1); - STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == -2); - STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == -3); - STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == -4); - STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == -5); + STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0); + STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1); + STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2); + STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3); + STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4); + STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5); __ push(receiver()); // receiver - __ mov(scratch2(), esp); - ASSERT(!scratch2().is(reg)); // Push data from ExecutableAccessorInfo. if (isolate()->heap()->InNewSpace(callback->data())) { - Register scratch = reg.is(scratch1()) ? receiver() : scratch1(); - __ mov(scratch, Immediate(callback)); - __ push(FieldOperand(scratch, ExecutableAccessorInfo::kDataOffset)); + ASSERT(!scratch2().is(reg)); + __ mov(scratch2(), Immediate(callback)); + __ push(FieldOperand(scratch2(), ExecutableAccessorInfo::kDataOffset)); } else { __ push(Immediate(Handle<Object>(callback->data(), isolate()))); } @@ -1427,9 +1429,9 @@ void BaseLoadStubCompiler::GenerateLoadCallback( __ push(Immediate(reinterpret_cast<int>(isolate()))); __ push(reg); // holder - // Save a pointer to where we pushed the arguments pointer. This will be - // passed as the const ExecutableAccessorInfo& to the C++ callback. - __ push(scratch2()); + // Save a pointer to where we pushed the arguments. This will be + // passed as the const PropertyAccessorInfo& to the C++ callback. + __ push(esp); __ push(name()); // name __ mov(ebx, esp); // esp points to reference to name (handler). @@ -1460,18 +1462,19 @@ void BaseLoadStubCompiler::GenerateLoadCallback( thunk_address, ApiParameterOperand(2), kStackSpace, - 7); + Operand(ebp, 7 * kPointerSize), + NULL); } -void BaseLoadStubCompiler::GenerateLoadConstant(Handle<Object> value) { +void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) { // Return the constant value. __ LoadObject(eax, value); __ ret(0); } -void BaseLoadStubCompiler::GenerateLoadInterceptor( +void LoadStubCompiler::GenerateLoadInterceptor( Register holder_reg, Handle<JSObject> object, Handle<JSObject> interceptor_holder, @@ -2394,7 +2397,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall( // Check if the argument is a heap number and load its value into xmm0. Label slow; __ CheckMap(eax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK); - __ movdbl(xmm0, FieldOperand(eax, HeapNumber::kValueOffset)); + __ movsd(xmm0, FieldOperand(eax, HeapNumber::kValueOffset)); // Check if the argument is strictly positive. Note this also // discards NaN. @@ -2444,7 +2447,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall( // Return a new heap number. __ AllocateHeapNumber(eax, ebx, edx, &slow); - __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); + __ movsd(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); __ ret(2 * kPointerSize); // Return the argument (when it's an already round heap number). @@ -2623,7 +2626,7 @@ Handle<Code> CallStubCompiler::CompileFastApiCall( // esp[2 * kPointerSize] is uninitialized, esp[3 * kPointerSize] contains // duplicate of return address and will be overwritten. - GenerateFastApiCall(masm(), optimization, argc); + GenerateFastApiCall(masm(), optimization, argc, false); __ bind(&miss); __ add(esp, Immediate(kFastApiCallArguments * kPointerSize)); @@ -3111,18 +3114,14 @@ void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name, void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm, + Register receiver, Handle<JSFunction> getter) { - // ----------- S t a t e ------------- - // -- ecx : name - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- { FrameScope scope(masm, StackFrame::INTERNAL); if (!getter.is_null()) { // Call the JavaScript getter with the receiver on the stack. - __ push(edx); + __ push(receiver); ParameterCount actual(0); ParameterCount expected(getter); __ InvokeFunction(getter, expected, actual, diff --git a/deps/v8/src/ic-inl.h b/deps/v8/src/ic-inl.h index e6ff2daa62..06cbf2e112 100644 --- a/deps/v8/src/ic-inl.h +++ b/deps/v8/src/ic-inl.h @@ -102,9 +102,8 @@ void IC::SetTargetAtAddress(Address address, Code* target) { InlineCacheHolderFlag IC::GetCodeCacheForObject(Object* object, JSObject* holder) { - if (object->IsJSObject()) { - return GetCodeCacheForObject(JSObject::cast(object), holder); - } + if (object->IsJSObject()) return OWN_MAP; + // If the object is a value, we use the prototype map for the cache. ASSERT(object->IsString() || object->IsSymbol() || object->IsNumber() || object->IsBoolean()); @@ -112,23 +111,6 @@ InlineCacheHolderFlag IC::GetCodeCacheForObject(Object* object, } -InlineCacheHolderFlag IC::GetCodeCacheForObject(JSObject* object, - JSObject* holder) { - // Fast-properties and global objects store stubs in their own maps. - // Slow properties objects use prototype's map (unless the property is its own - // when holder == object). It works because slow properties objects having - // the same prototype (or a prototype with the same map) and not having - // the property are interchangeable for such a stub. - if (holder != object && - !object->HasFastProperties() && - !object->IsJSGlobalProxy() && - !object->IsJSGlobalObject()) { - return PROTOTYPE_MAP; - } - return OWN_MAP; -} - - JSObject* IC::GetCodeCacheHolder(Isolate* isolate, Object* object, InlineCacheHolderFlag holder) { diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc index 55187514f9..55d7ba936f 100644 --- a/deps/v8/src/ic.cc +++ b/deps/v8/src/ic.cc @@ -71,19 +71,16 @@ const char* GetTransitionMarkModifier(KeyedAccessStoreMode mode) { void IC::TraceIC(const char* type, - Handle<Object> name, - State old_state, - Code* new_target) { + Handle<Object> name) { if (FLAG_trace_ic) { - Object* undef = new_target->GetHeap()->undefined_value(); - State new_state = StateFrom(new_target, undef, undef); - PrintF("[%s in ", type); - Isolate* isolate = new_target->GetIsolate(); - StackFrameIterator it(isolate); + Code* new_target = raw_target(); + State new_state = new_target->ic_state(); + PrintF("[%s%s in ", new_target->is_keyed_stub() ? "Keyed" : "", type); + StackFrameIterator it(isolate()); while (it.frame()->fp() != this->fp()) it.Advance(); StackFrame* raw_frame = it.frame(); if (raw_frame->is_internal()) { - Code* apply_builtin = isolate->builtins()->builtin( + Code* apply_builtin = isolate()->builtins()->builtin( Builtins::kFunctionApply); if (raw_frame->unchecked_code() == apply_builtin) { PrintF("apply from "); @@ -91,12 +88,12 @@ void IC::TraceIC(const char* type, raw_frame = it.frame(); } } - JavaScriptFrame::PrintTop(isolate, stdout, false, true); - Code::ExtraICState state = new_target->extra_ic_state(); + JavaScriptFrame::PrintTop(isolate(), stdout, false, true); + Code::ExtraICState extra_state = new_target->extra_ic_state(); const char* modifier = - GetTransitionMarkModifier(Code::GetKeyedAccessStoreMode(state)); + GetTransitionMarkModifier(Code::GetKeyedAccessStoreMode(extra_state)); PrintF(" (%c->%c%s)", - TransitionMarkFromState(old_state), + TransitionMarkFromState(state()), TransitionMarkFromState(new_state), modifier); name->Print(); @@ -117,10 +114,12 @@ void IC::TraceIC(const char* type, #define TRACE_GENERIC_IC(isolate, type, reason) #endif // DEBUG -#define TRACE_IC(type, name, old_state, new_target) \ - ASSERT((TraceIC(type, name, old_state, new_target), true)) +#define TRACE_IC(type, name) \ + ASSERT((TraceIC(type, name), true)) -IC::IC(FrameDepth depth, Isolate* isolate) : isolate_(isolate) { +IC::IC(FrameDepth depth, Isolate* isolate) + : isolate_(isolate), + target_set_(false) { // To improve the performance of the (much used) IC code, we unfold a few // levels of the stack frame iteration code. This yields a ~35% speedup when // running DeltaBlue and a ~25% speedup of gbemu with the '--nouse-ic' flag. @@ -145,6 +144,8 @@ IC::IC(FrameDepth depth, Isolate* isolate) : isolate_(isolate) { #endif fp_ = fp; pc_address_ = StackFrame::ResolveReturnAddressLocation(pc_address); + target_ = handle(raw_target(), isolate); + state_ = target_->ic_state(); } @@ -179,32 +180,130 @@ Address IC::OriginalCodeAddress() const { #endif -static bool TryRemoveInvalidPrototypeDependentStub(Code* target, - Object* receiver, - Object* name) { - if (target->is_keyed_load_stub() || - target->is_keyed_call_stub() || - target->is_keyed_store_stub()) { +static bool HasInterceptorGetter(JSObject* object) { + return !object->GetNamedInterceptor()->getter()->IsUndefined(); +} + + +static bool HasInterceptorSetter(JSObject* object) { + return !object->GetNamedInterceptor()->setter()->IsUndefined(); +} + + +static void LookupForRead(Handle<Object> object, + Handle<String> name, + LookupResult* lookup) { + // Skip all the objects with named interceptors, but + // without actual getter. + while (true) { + object->Lookup(*name, lookup); + // Besides normal conditions (property not found or it's not + // an interceptor), bail out if lookup is not cacheable: we won't + // be able to IC it anyway and regular lookup should work fine. + if (!lookup->IsInterceptor() || !lookup->IsCacheable()) { + return; + } + + Handle<JSObject> holder(lookup->holder(), lookup->isolate()); + if (HasInterceptorGetter(*holder)) { + return; + } + + holder->LocalLookupRealNamedProperty(*name, lookup); + if (lookup->IsFound()) { + ASSERT(!lookup->IsInterceptor()); + return; + } + + Handle<Object> proto(holder->GetPrototype(), lookup->isolate()); + if (proto->IsNull()) { + ASSERT(!lookup->IsFound()); + return; + } + + object = proto; + } +} + + +bool CallIC::TryUpdateExtraICState(LookupResult* lookup, + Handle<Object> object) { + if (!lookup->IsConstantFunction()) return false; + JSFunction* function = lookup->GetConstantFunction(); + if (!function->shared()->HasBuiltinFunctionId()) return false; + + // Fetch the arguments passed to the called function. + const int argc = target()->arguments_count(); + Address entry = isolate()->c_entry_fp(isolate()->thread_local_top()); + Address fp = Memory::Address_at(entry + ExitFrameConstants::kCallerFPOffset); + Arguments args(argc + 1, + &Memory::Object_at(fp + + StandardFrameConstants::kCallerSPOffset + + argc * kPointerSize)); + switch (function->shared()->builtin_function_id()) { + case kStringCharCodeAt: + case kStringCharAt: + if (object->IsString()) { + String* string = String::cast(*object); + // Check there's the right string value or wrapper in the receiver slot. + ASSERT(string == args[0] || string == JSValue::cast(args[0])->value()); + // If we're in the default (fastest) state and the index is + // out of bounds, update the state to record this fact. + if (StringStubState::decode(extra_ic_state()) == DEFAULT_STRING_STUB && + argc >= 1 && args[1]->IsNumber()) { + double index = DoubleToInteger(args.number_at(1)); + if (index < 0 || index >= string->length()) { + extra_ic_state_ = + StringStubState::update(extra_ic_state(), + STRING_INDEX_OUT_OF_BOUNDS); + return true; + } + } + } + break; + default: + return false; + } + return false; +} + + +bool IC::TryRemoveInvalidPrototypeDependentStub(Handle<Object> receiver, + Handle<String> name) { + DisallowHeapAllocation no_gc; + + if (target()->is_call_stub()) { + LookupResult lookup(isolate()); + LookupForRead(receiver, name, &lookup); + if (static_cast<CallIC*>(this)->TryUpdateExtraICState(&lookup, receiver)) { + return true; + } + } + + if (target()->is_keyed_stub()) { // Determine whether the failure is due to a name failure. if (!name->IsName()) return false; - Name* stub_name = target->FindFirstName(); - if (Name::cast(name) != stub_name) return false; + Name* stub_name = target()->FindFirstName(); + if (*name != stub_name) return false; } InlineCacheHolderFlag cache_holder = - Code::ExtractCacheHolderFromFlags(target->flags()); + Code::ExtractCacheHolderFromFlags(target()->flags()); - Isolate* isolate = target->GetIsolate(); - if (cache_holder == OWN_MAP && !receiver->IsJSObject()) { - // The stub was generated for JSObject but called for non-JSObject. - // IC::GetCodeCacheHolder is not applicable. - return false; - } else if (cache_holder == PROTOTYPE_MAP && - receiver->GetPrototype(isolate)->IsNull()) { - // IC::GetCodeCacheHolder is not applicable. - return false; + switch (cache_holder) { + case OWN_MAP: + // The stub was generated for JSObject but called for non-JSObject. + // IC::GetCodeCacheHolder is not applicable. + if (!receiver->IsJSObject()) return false; + break; + case PROTOTYPE_MAP: + // IC::GetCodeCacheHolder is not applicable. + if (receiver->GetPrototype(isolate())->IsNull()) return false; + break; } - Map* map = IC::GetCodeCacheHolder(isolate, receiver, cache_holder)->map(); + + Handle<Map> map( + IC::GetCodeCacheHolder(isolate(), *receiver, cache_holder)->map()); // Decide whether the inline cache failed because of changes to the // receiver itself or changes to one of its prototypes. @@ -214,20 +313,11 @@ static bool TryRemoveInvalidPrototypeDependentStub(Code* target, // the receiver map's code cache. Therefore, if the current target // is in the receiver map's code cache, the inline cache failed due // to prototype check failure. - int index = map->IndexInCodeCache(name, target); + int index = map->IndexInCodeCache(*name, *target()); if (index >= 0) { - map->RemoveFromCodeCache(String::cast(name), target, index); - // For loads and stores, handlers are stored in addition to the ICs on the - // map. Remove those, too. - if ((target->is_load_stub() || target->is_keyed_load_stub() || - target->is_store_stub() || target->is_keyed_store_stub()) && - target->type() != Code::NORMAL) { - Code* handler = target->FindFirstCode(); - index = map->IndexInCodeCache(name, handler); - if (index >= 0) { - map->RemoveFromCodeCache(String::cast(name), handler, index); - } - } + map->RemoveFromCodeCache(*name, *target(), index); + // Handlers are stored in addition to the ICs on the map. Remove those, too. + TryRemoveInvalidHandlers(map, name); return true; } @@ -240,8 +330,8 @@ static bool TryRemoveInvalidPrototypeDependentStub(Code* target, // If the IC is shared between multiple receivers (slow dictionary mode), then // the map cannot be deprecated and the stub invalidated. if (cache_holder == OWN_MAP) { - Map* old_map = target->FindFirstMap(); - if (old_map == map) return true; + Map* old_map = target()->FindFirstMap(); + if (old_map == *map) return true; if (old_map != NULL) { if (old_map->is_deprecated()) return true; if (IsMoreGeneralElementsKindTransition(old_map->elements_kind(), @@ -252,11 +342,9 @@ static bool TryRemoveInvalidPrototypeDependentStub(Code* target, } if (receiver->IsGlobalObject()) { - if (!name->IsName()) return false; - Isolate* isolate = target->GetIsolate(); - LookupResult lookup(isolate); - GlobalObject* global = GlobalObject::cast(receiver); - global->LocalLookupRealNamedProperty(Name::cast(name), &lookup); + LookupResult lookup(isolate()); + GlobalObject* global = GlobalObject::cast(*receiver); + global->LocalLookupRealNamedProperty(*name, &lookup); if (!lookup.IsFound()) return false; PropertyCell* cell = global->GetPropertyCell(&lookup); return cell->type()->IsConstant(); @@ -266,21 +354,38 @@ static bool TryRemoveInvalidPrototypeDependentStub(Code* target, } -IC::State IC::StateFrom(Code* target, Object* receiver, Object* name) { - IC::State state = target->ic_state(); +void IC::TryRemoveInvalidHandlers(Handle<Map> map, Handle<String> name) { + CodeHandleList handlers; + target()->FindHandlers(&handlers); + for (int i = 0; i < handlers.length(); i++) { + Handle<Code> handler = handlers.at(i); + int index = map->IndexInCodeCache(*name, *handler); + if (index >= 0) { + map->RemoveFromCodeCache(*name, *handler, index); + return; + } + } +} + - if (state != MONOMORPHIC || !name->IsString()) return state; - if (receiver->IsUndefined() || receiver->IsNull()) return state; +void IC::UpdateState(Handle<Object> receiver, Handle<Object> name) { + if (!name->IsString()) return; + if (state() != MONOMORPHIC) { + if (state() == POLYMORPHIC && receiver->IsHeapObject()) { + TryRemoveInvalidHandlers( + handle(Handle<HeapObject>::cast(receiver)->map()), + Handle<String>::cast(name)); + } + return; + } + if (receiver->IsUndefined() || receiver->IsNull()) return; - Code::Kind kind = target->kind(); // Remove the target from the code cache if it became invalid // because of changes in the prototype chain to avoid hitting it // again. - // Call stubs handle this later to allow extra IC state - // transitions. - if (kind != Code::CALL_IC && kind != Code::KEYED_CALL_IC && - TryRemoveInvalidPrototypeDependentStub(target, receiver, name)) { - return MONOMORPHIC_PROTOTYPE_FAILURE; + if (TryRemoveInvalidPrototypeDependentStub( + receiver, Handle<String>::cast(name))) { + return MarkMonomorphicPrototypeFailure(); } // The builtins object is special. It only changes when JavaScript @@ -289,11 +394,7 @@ IC::State IC::StateFrom(Code* target, Object* receiver, Object* name) { // an inline cache miss for the builtins object after lazily loading // JavaScript builtins, we return uninitialized as the state to // force the inline cache back to monomorphic state. - if (receiver->IsJSBuiltinsObject()) { - return UNINITIALIZED; - } - - return MONOMORPHIC; + if (receiver->IsJSBuiltinsObject()) state_ = UNINITIALIZED; } @@ -403,7 +504,7 @@ void IC::Clear(Isolate* isolate, Address address) { void CallICBase::Clear(Address address, Code* target) { - if (target->ic_state() == UNINITIALIZED) return; + if (IsCleared(target)) return; bool contextual = CallICBase::Contextual::decode(target->extra_ic_state()); Code* code = target->GetIsolate()->stub_cache()->FindCallInitialize( @@ -415,35 +516,33 @@ void CallICBase::Clear(Address address, Code* target) { void KeyedLoadIC::Clear(Isolate* isolate, Address address, Code* target) { - if (target->ic_state() == UNINITIALIZED) return; + if (IsCleared(target)) return; // Make sure to also clear the map used in inline fast cases. If we // do not clear these maps, cached code can keep objects alive // through the embedded maps. - SetTargetAtAddress(address, *initialize_stub(isolate)); + SetTargetAtAddress(address, *pre_monomorphic_stub(isolate)); } void LoadIC::Clear(Isolate* isolate, Address address, Code* target) { - if (target->ic_state() == UNINITIALIZED) return; - SetTargetAtAddress(address, *initialize_stub(isolate)); + if (IsCleared(target)) return; + SetTargetAtAddress(address, *pre_monomorphic_stub(isolate)); } void StoreIC::Clear(Isolate* isolate, Address address, Code* target) { - if (target->ic_state() == UNINITIALIZED) return; + if (IsCleared(target)) return; SetTargetAtAddress(address, - (Code::GetStrictMode(target->extra_ic_state()) == kStrictMode) - ? *initialize_stub_strict(isolate) - : *initialize_stub(isolate)); + *pre_monomorphic_stub( + isolate, Code::GetStrictMode(target->extra_ic_state()))); } void KeyedStoreIC::Clear(Isolate* isolate, Address address, Code* target) { - if (target->ic_state() == UNINITIALIZED) return; + if (IsCleared(target)) return; SetTargetAtAddress(address, - (Code::GetStrictMode(target->extra_ic_state()) == kStrictMode) - ? *initialize_stub_strict(isolate) - : *initialize_stub(isolate)); + *pre_monomorphic_stub( + isolate, Code::GetStrictMode(target->extra_ic_state()))); } @@ -460,47 +559,6 @@ void CompareIC::Clear(Isolate* isolate, Address address, Code* target) { } -static bool HasInterceptorGetter(JSObject* object) { - return !object->GetNamedInterceptor()->getter()->IsUndefined(); -} - - -static void LookupForRead(Handle<Object> object, - Handle<String> name, - LookupResult* lookup) { - // Skip all the objects with named interceptors, but - // without actual getter. - while (true) { - object->Lookup(*name, lookup); - // Besides normal conditions (property not found or it's not - // an interceptor), bail out if lookup is not cacheable: we won't - // be able to IC it anyway and regular lookup should work fine. - if (!lookup->IsInterceptor() || !lookup->IsCacheable()) { - return; - } - - Handle<JSObject> holder(lookup->holder(), lookup->isolate()); - if (HasInterceptorGetter(*holder)) { - return; - } - - holder->LocalLookupRealNamedProperty(*name, lookup); - if (lookup->IsFound()) { - ASSERT(!lookup->IsInterceptor()); - return; - } - - Handle<Object> proto(holder->GetPrototype(), lookup->isolate()); - if (proto->IsNull()) { - ASSERT(!lookup->IsFound()); - return; - } - - object = proto; - } -} - - Handle<Object> CallICBase::TryCallAsFunction(Handle<Object> object) { Handle<Object> delegate = Execution::GetFunctionDelegate(isolate(), object); @@ -545,16 +603,18 @@ void CallICBase::ReceiverToObjectIfRequired(Handle<Object> callee, } -MaybeObject* CallICBase::LoadFunction(State state, - Code::ExtraICState extra_ic_state, - Handle<Object> object, +static bool MigrateDeprecated(Handle<Object> object) { + if (!object->IsJSObject()) return false; + Handle<JSObject> receiver = Handle<JSObject>::cast(object); + if (!receiver->map()->is_deprecated()) return false; + JSObject::MigrateInstance(Handle<JSObject>::cast(object)); + return true; +} + + +MaybeObject* CallICBase::LoadFunction(Handle<Object> object, Handle<String> name) { - if (object->IsJSObject()) { - Handle<JSObject> receiver = Handle<JSObject>::cast(object); - if (receiver->map()->is_deprecated()) { - JSObject::MigrateInstance(receiver); - } - } + bool use_ic = MigrateDeprecated(object) ? false : FLAG_use_ic; // If the object is undefined or null it's illegal to try to get any // of its properties; throw a TypeError in that case. @@ -590,9 +650,7 @@ MaybeObject* CallICBase::LoadFunction(State state, } // Lookup is valid: Update inline cache and stub cache. - if (FLAG_use_ic) { - UpdateCaches(&lookup, state, extra_ic_state, object, name); - } + if (use_ic) UpdateCaches(&lookup, object, name); // Get the property. PropertyAttributes attr; @@ -637,53 +695,7 @@ MaybeObject* CallICBase::LoadFunction(State state, } -bool CallICBase::TryUpdateExtraICState(LookupResult* lookup, - Handle<Object> object, - Code::ExtraICState* extra_ic_state) { - ASSERT(kind_ == Code::CALL_IC); - if (!lookup->IsConstantFunction()) return false; - JSFunction* function = lookup->GetConstantFunction(); - if (!function->shared()->HasBuiltinFunctionId()) return false; - - // Fetch the arguments passed to the called function. - const int argc = target()->arguments_count(); - Address entry = isolate()->c_entry_fp(isolate()->thread_local_top()); - Address fp = Memory::Address_at(entry + ExitFrameConstants::kCallerFPOffset); - Arguments args(argc + 1, - &Memory::Object_at(fp + - StandardFrameConstants::kCallerSPOffset + - argc * kPointerSize)); - switch (function->shared()->builtin_function_id()) { - case kStringCharCodeAt: - case kStringCharAt: - if (object->IsString()) { - String* string = String::cast(*object); - // Check there's the right string value or wrapper in the receiver slot. - ASSERT(string == args[0] || string == JSValue::cast(args[0])->value()); - // If we're in the default (fastest) state and the index is - // out of bounds, update the state to record this fact. - if (StringStubState::decode(*extra_ic_state) == DEFAULT_STRING_STUB && - argc >= 1 && args[1]->IsNumber()) { - double index = DoubleToInteger(args.number_at(1)); - if (index < 0 || index >= string->length()) { - *extra_ic_state = - StringStubState::update(*extra_ic_state, - STRING_INDEX_OUT_OF_BOUNDS); - return true; - } - } - } - break; - default: - return false; - } - return false; -} - - Handle<Code> CallICBase::ComputeMonomorphicStub(LookupResult* lookup, - State state, - Code::ExtraICState extra_state, Handle<Object> object, Handle<String> name) { int argc = target()->arguments_count(); @@ -692,7 +704,7 @@ Handle<Code> CallICBase::ComputeMonomorphicStub(LookupResult* lookup, case FIELD: { PropertyIndex index = lookup->GetFieldIndex(); return isolate()->stub_cache()->ComputeCallField( - argc, kind_, extra_state, name, object, holder, index); + argc, kind_, extra_ic_state(), name, object, holder, index); } case CONSTANT: { if (!lookup->IsConstantFunction()) return Handle<Code>::null(); @@ -701,7 +713,7 @@ Handle<Code> CallICBase::ComputeMonomorphicStub(LookupResult* lookup, // that the code stub is in the stub cache. Handle<JSFunction> function(lookup->GetConstantFunction(), isolate()); return isolate()->stub_cache()->ComputeCallConstant( - argc, kind_, extra_state, name, object, holder, function); + argc, kind_, extra_ic_state(), name, object, holder, function); } case NORMAL: { // If we return a null handle, the IC will not be patched. @@ -715,7 +727,8 @@ Handle<Code> CallICBase::ComputeMonomorphicStub(LookupResult* lookup, if (!cell->value()->IsJSFunction()) return Handle<Code>::null(); Handle<JSFunction> function(JSFunction::cast(cell->value())); return isolate()->stub_cache()->ComputeCallGlobal( - argc, kind_, extra_state, name, receiver, global, cell, function); + argc, kind_, extra_ic_state(), name, + receiver, global, cell, function); } else { // There is only one shared stub for calling normalized // properties. It does not traverse the prototype chain, so the @@ -723,117 +736,74 @@ Handle<Code> CallICBase::ComputeMonomorphicStub(LookupResult* lookup, // applicable. if (!holder.is_identical_to(receiver)) return Handle<Code>::null(); return isolate()->stub_cache()->ComputeCallNormal( - argc, kind_, extra_state); + argc, kind_, extra_ic_state()); } break; } case INTERCEPTOR: ASSERT(HasInterceptorGetter(*holder)); return isolate()->stub_cache()->ComputeCallInterceptor( - argc, kind_, extra_state, name, object, holder); + argc, kind_, extra_ic_state(), name, object, holder); default: return Handle<Code>::null(); } } +Handle<Code> CallICBase::megamorphic_stub() { + return isolate()->stub_cache()->ComputeCallMegamorphic( + target()->arguments_count(), kind_, extra_ic_state()); +} + + +Handle<Code> CallICBase::pre_monomorphic_stub() { + return isolate()->stub_cache()->ComputeCallPreMonomorphic( + target()->arguments_count(), kind_, extra_ic_state()); +} + + void CallICBase::UpdateCaches(LookupResult* lookup, - State state, - Code::ExtraICState extra_ic_state, Handle<Object> object, Handle<String> name) { // Bail out if we didn't find a result. if (!lookup->IsProperty() || !lookup->IsCacheable()) return; // Compute the number of arguments. - int argc = target()->arguments_count(); Handle<Code> code; - if (state == UNINITIALIZED) { - // This is the first time we execute this inline cache. - // Set the target to the pre monomorphic stub to delay - // setting the monomorphic state. - code = isolate()->stub_cache()->ComputeCallPreMonomorphic( - argc, kind_, extra_ic_state); - } else if (state == MONOMORPHIC) { - if (kind_ == Code::CALL_IC && - TryUpdateExtraICState(lookup, object, &extra_ic_state)) { - code = ComputeMonomorphicStub(lookup, state, extra_ic_state, - object, name); - } else if (TryRemoveInvalidPrototypeDependentStub(target(), - *object, - *name)) { - state = MONOMORPHIC_PROTOTYPE_FAILURE; - code = ComputeMonomorphicStub(lookup, state, extra_ic_state, - object, name); - } else { - code = isolate()->stub_cache()->ComputeCallMegamorphic( - argc, kind_, extra_ic_state); - } - } else { - code = ComputeMonomorphicStub(lookup, state, extra_ic_state, - object, name); - } + code = state() == UNINITIALIZED + ? pre_monomorphic_stub() + : ComputeMonomorphicStub(lookup, object, name); // If there's no appropriate stub we simply avoid updating the caches. + // TODO(verwaest): Install a slow fallback in this case to avoid not learning, + // and deopting Crankshaft code. if (code.is_null()) return; - // Patch the call site depending on the state of the cache. - switch (state) { - case UNINITIALIZED: - case MONOMORPHIC_PROTOTYPE_FAILURE: - case PREMONOMORPHIC: - case MONOMORPHIC: - set_target(*code); - break; - case MEGAMORPHIC: { - // Cache code holding map should be consistent with - // GenerateMonomorphicCacheProbe. It is not the map which holds the stub. - Handle<JSObject> cache_object = object->IsJSObject() - ? Handle<JSObject>::cast(object) - : Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate())), - isolate()); - // Update the stub cache. - UpdateMegamorphicCache(cache_object->map(), *name, *code); - break; - } - case DEBUG_STUB: - break; - case POLYMORPHIC: - case GENERIC: - UNREACHABLE(); - break; - } + Handle<JSObject> cache_object = object->IsJSObject() + ? Handle<JSObject>::cast(object) + : Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate())), + isolate()); - TRACE_IC(kind_ == Code::CALL_IC ? "CallIC" : "KeyedCallIC", - name, state, target()); + PatchCache(cache_object, name, code); + TRACE_IC("CallIC", name); } -MaybeObject* KeyedCallIC::LoadFunction(State state, - Handle<Object> object, +MaybeObject* KeyedCallIC::LoadFunction(Handle<Object> object, Handle<Object> key) { if (key->IsInternalizedString()) { - return CallICBase::LoadFunction(state, - Code::kNoExtraICState, - object, - Handle<String>::cast(key)); - } - - if (object->IsJSObject()) { - Handle<JSObject> receiver = Handle<JSObject>::cast(object); - if (receiver->map()->is_deprecated()) { - JSObject::MigrateInstance(receiver); - } + return CallICBase::LoadFunction(object, Handle<String>::cast(key)); } if (object->IsUndefined() || object->IsNull()) { return TypeError("non_object_property_call", object, key); } - bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded(); - ASSERT(!(use_ic && object->IsJSGlobalProxy())); + bool use_ic = MigrateDeprecated(object) + ? false : FLAG_use_ic && !object->IsAccessCheckNeeded(); - if (use_ic && state != MEGAMORPHIC) { + if (use_ic && state() != MEGAMORPHIC) { + ASSERT(!object->IsJSGlobalProxy()); int argc = target()->arguments_count(); Handle<Code> stub = isolate()->stub_cache()->ComputeCallMegamorphic( argc, Code::KEYED_CALL_IC, Code::kNoExtraICState); @@ -846,7 +816,7 @@ MaybeObject* KeyedCallIC::LoadFunction(State state, } ASSERT(!stub.is_null()); set_target(*stub); - TRACE_IC("KeyedCallIC", key, state, target()); + TRACE_IC("CallIC", key); } Handle<Object> result = GetProperty(isolate(), object, key); @@ -865,8 +835,7 @@ MaybeObject* KeyedCallIC::LoadFunction(State state, } -MaybeObject* LoadIC::Load(State state, - Handle<Object> object, +MaybeObject* LoadIC::Load(Handle<Object> object, Handle<String> name) { // If the object is undefined or null it's illegal to try to get any // of its properties; throw a TypeError in that case. @@ -879,32 +848,27 @@ MaybeObject* LoadIC::Load(State state, // string wrapper objects. The length property of string wrapper // objects is read-only and therefore always returns the length of // the underlying string value. See ECMA-262 15.5.5.1. - if ((object->IsString() || object->IsStringWrapper()) && + if (object->IsStringWrapper() && name->Equals(isolate()->heap()->length_string())) { Handle<Code> stub; - if (state == UNINITIALIZED) { + if (state() == UNINITIALIZED) { stub = pre_monomorphic_stub(); - } else if (state == PREMONOMORPHIC) { - StringLengthStub string_length_stub(kind(), !object->IsString()); + } else if (state() == PREMONOMORPHIC || state() == MONOMORPHIC) { + StringLengthStub string_length_stub(kind()); stub = string_length_stub.GetCode(isolate()); - } else if (state == MONOMORPHIC && object->IsStringWrapper()) { - StringLengthStub string_length_stub(kind(), true); - stub = string_length_stub.GetCode(isolate()); - } else if (state != MEGAMORPHIC) { - ASSERT(state != GENERIC); + } else if (state() != MEGAMORPHIC) { + ASSERT(state() != GENERIC); stub = megamorphic_stub(); } if (!stub.is_null()) { set_target(*stub); #ifdef DEBUG - if (FLAG_trace_ic) PrintF("[LoadIC : +#length /string]\n"); + if (FLAG_trace_ic) PrintF("[LoadIC : +#length /stringwrapper]\n"); #endif } // Get the string if we have a string wrapper object. - Handle<Object> string = object->IsJSValue() - ? Handle<Object>(Handle<JSValue>::cast(object)->value(), isolate()) - : object; - return Smi::FromInt(String::cast(*string)->length()); + String* string = String::cast(JSValue::cast(*object)->value()); + return Smi::FromInt(string->length()); } // Use specialized code for getting prototype of functions. @@ -912,13 +876,13 @@ MaybeObject* LoadIC::Load(State state, name->Equals(isolate()->heap()->prototype_string()) && Handle<JSFunction>::cast(object)->should_have_prototype()) { Handle<Code> stub; - if (state == UNINITIALIZED) { + if (state() == UNINITIALIZED) { stub = pre_monomorphic_stub(); - } else if (state == PREMONOMORPHIC) { + } else if (state() == PREMONOMORPHIC) { FunctionPrototypeStub function_prototype_stub(kind()); stub = function_prototype_stub.GetCode(isolate()); - } else if (state != MEGAMORPHIC) { - ASSERT(state != GENERIC); + } else if (state() != MEGAMORPHIC) { + ASSERT(state() != GENERIC); stub = megamorphic_stub(); } if (!stub.is_null()) { @@ -940,12 +904,7 @@ MaybeObject* LoadIC::Load(State state, return Runtime::GetElementOrCharAtOrFail(isolate(), object, index); } - if (object->IsJSObject()) { - Handle<JSObject> receiver = Handle<JSObject>::cast(object); - if (receiver->map()->is_deprecated()) { - JSObject::MigrateInstance(receiver); - } - } + bool use_ic = MigrateDeprecated(object) ? false : FLAG_use_ic; // Named lookup in the object. LookupResult lookup(isolate()); @@ -960,24 +919,20 @@ MaybeObject* LoadIC::Load(State state, } // Update inline cache and stub cache. - if (FLAG_use_ic) UpdateCaches(&lookup, state, object, name); + if (use_ic) UpdateCaches(&lookup, object, name); PropertyAttributes attr; - if (lookup.IsInterceptor() || lookup.IsHandler()) { - // Get the property. - Handle<Object> result = - Object::GetProperty(object, object, &lookup, name, &attr); - RETURN_IF_EMPTY_HANDLE(isolate(), result); - // If the property is not present, check if we need to throw an - // exception. - if (attr == ABSENT && IsUndeclaredGlobal(object)) { - return ReferenceError("not_defined", name); - } - return *result; - } - // Get the property. - return Object::GetPropertyOrFail(object, object, &lookup, name, &attr); + Handle<Object> result = + Object::GetProperty(object, object, &lookup, name, &attr); + RETURN_IF_EMPTY_HANDLE(isolate(), result); + // If the property is not present, check if we need to throw an + // exception. + if ((lookup.IsInterceptor() || lookup.IsHandler()) && + attr == ABSENT && IsUndeclaredGlobal(object)) { + return ReferenceError("not_defined", name); + } + return *result; } @@ -995,16 +950,10 @@ static bool AddOneReceiverMapIfMissing(MapHandleList* receiver_maps, } -bool IC::UpdatePolymorphicIC(State state, - Handle<HeapObject> receiver, +bool IC::UpdatePolymorphicIC(Handle<HeapObject> receiver, Handle<String> name, - Handle<Code> code, - StrictModeFlag strict_mode) { - if (code->type() == Code::NORMAL) return false; - if (target()->ic_state() == MONOMORPHIC && - target()->type() == Code::NORMAL) { - return false; - } + Handle<Code> code) { + if (!code->is_handler()) return false; MapHandleList receiver_maps; CodeHandleList handlers; @@ -1033,13 +982,11 @@ bool IC::UpdatePolymorphicIC(State state, } if (number_of_valid_maps >= 4) return false; + if (number_of_maps == 0) return false; - // Only allow 0 maps in case target() was reset to UNINITIALIZED by the GC. - // In that case, allow the IC to go back monomorphic. - if (number_of_maps == 0 && target()->ic_state() != UNINITIALIZED) { + if (!target()->FindHandlers(&handlers, receiver_maps.length())) { return false; } - target()->FindAllCode(&handlers, receiver_maps.length()); } number_of_valid_maps++; @@ -1050,73 +997,19 @@ bool IC::UpdatePolymorphicIC(State state, handlers.Add(code); } - Handle<Code> ic = ComputePolymorphicIC( - &receiver_maps, &handlers, number_of_valid_maps, name, strict_mode); + Handle<Code> ic = isolate()->stub_cache()->ComputePolymorphicIC( + &receiver_maps, &handlers, number_of_valid_maps, name, strict_mode()); set_target(*ic); return true; } -Handle<Code> LoadIC::ComputePolymorphicIC(MapHandleList* receiver_maps, - CodeHandleList* handlers, - int number_of_valid_maps, - Handle<Name> name, - StrictModeFlag strict_mode) { - return isolate()->stub_cache()->ComputePolymorphicLoadIC( - receiver_maps, handlers, number_of_valid_maps, name); -} - - -Handle<Code> StoreIC::ComputePolymorphicIC(MapHandleList* receiver_maps, - CodeHandleList* handlers, - int number_of_valid_maps, - Handle<Name> name, - StrictModeFlag strict_mode) { - return isolate()->stub_cache()->ComputePolymorphicStoreIC( - receiver_maps, handlers, number_of_valid_maps, name, strict_mode); -} - - -void LoadIC::UpdateMonomorphicIC(Handle<HeapObject> receiver, - Handle<Code> handler, - Handle<String> name, - StrictModeFlag strict_mode) { - if (handler->is_load_stub()) return set_target(*handler); - Handle<Code> ic = isolate()->stub_cache()->ComputeMonomorphicLoadIC( - receiver, handler, name); - set_target(*ic); -} - - -void KeyedLoadIC::UpdateMonomorphicIC(Handle<HeapObject> receiver, - Handle<Code> handler, - Handle<String> name, - StrictModeFlag strict_mode) { - if (handler->is_keyed_load_stub()) return set_target(*handler); - Handle<Code> ic = isolate()->stub_cache()->ComputeMonomorphicKeyedLoadIC( - receiver, handler, name); - set_target(*ic); -} - - -void StoreIC::UpdateMonomorphicIC(Handle<HeapObject> receiver, - Handle<Code> handler, - Handle<String> name, - StrictModeFlag strict_mode) { - if (handler->is_store_stub()) return set_target(*handler); - Handle<Code> ic = isolate()->stub_cache()->ComputeMonomorphicStoreIC( - receiver, handler, name, strict_mode); - set_target(*ic); -} - - -void KeyedStoreIC::UpdateMonomorphicIC(Handle<HeapObject> receiver, - Handle<Code> handler, - Handle<String> name, - StrictModeFlag strict_mode) { - if (handler->is_keyed_store_stub()) return set_target(*handler); - Handle<Code> ic = isolate()->stub_cache()->ComputeMonomorphicKeyedStoreIC( - receiver, handler, name, strict_mode); +void IC::UpdateMonomorphicIC(Handle<HeapObject> receiver, + Handle<Code> handler, + Handle<String> name) { + if (!handler->is_handler()) return set_target(*handler); + Handle<Code> ic = isolate()->stub_cache()->ComputeMonomorphicIC( + receiver, handler, name, strict_mode()); set_target(*ic); } @@ -1127,7 +1020,7 @@ void IC::CopyICToMegamorphicCache(Handle<String> name) { { DisallowHeapAllocation no_gc; target()->FindAllMaps(&receiver_maps); - target()->FindAllCode(&handlers, receiver_maps.length()); + if (!target()->FindHandlers(&handlers, receiver_maps.length())) return; } for (int i = 0; i < receiver_maps.length(); i++) { UpdateMegamorphicCache(*receiver_maps.at(i), *name, *handlers.at(i)); @@ -1151,69 +1044,58 @@ bool IC::IsTransitionedMapOfMonomorphicTarget(Map* receiver_map) { } -// Since GC may have been invoked, by the time PatchCache is called, |state| is -// not necessarily equal to target()->state(). -void IC::PatchCache(State state, - StrictModeFlag strict_mode, - Handle<HeapObject> receiver, +void IC::PatchCache(Handle<HeapObject> receiver, Handle<String> name, Handle<Code> code) { - switch (state) { + switch (state()) { case UNINITIALIZED: case PREMONOMORPHIC: case MONOMORPHIC_PROTOTYPE_FAILURE: - UpdateMonomorphicIC(receiver, code, name, strict_mode); + UpdateMonomorphicIC(receiver, code, name); break; case MONOMORPHIC: - // Only move to megamorphic if the target changes. - if (target() != *code) { - if (target()->is_load_stub() || target()->is_store_stub()) { - bool is_same_handler = false; - { - DisallowHeapAllocation no_allocation; - Code* old_handler = target()->FindFirstCode(); - is_same_handler = old_handler == *code; - } - if (is_same_handler - && IsTransitionedMapOfMonomorphicTarget(receiver->map())) { - UpdateMonomorphicIC(receiver, code, name, strict_mode); - break; - } - if (UpdatePolymorphicIC(state, receiver, name, code, strict_mode)) { - break; - } - - if (target()->type() != Code::NORMAL) { - CopyICToMegamorphicCache(name); - } + // For now, call stubs are allowed to rewrite to the same stub. This + // happens e.g., when the field does not contain a function. + ASSERT(target()->is_call_stub() || + target()->is_keyed_call_stub() || + !target().is_identical_to(code)); + if (!target()->is_keyed_stub()) { + bool is_same_handler = false; + { + DisallowHeapAllocation no_allocation; + Code* old_handler = target()->FindFirstHandler(); + is_same_handler = old_handler == *code; + } + if (is_same_handler + && IsTransitionedMapOfMonomorphicTarget(receiver->map())) { + UpdateMonomorphicIC(receiver, code, name); + break; + } + if (UpdatePolymorphicIC(receiver, name, code)) { + break; } - UpdateMegamorphicCache(receiver->map(), *name, *code); - set_target((strict_mode == kStrictMode) - ? *megamorphic_stub_strict() - : *megamorphic_stub()); + CopyICToMegamorphicCache(name); } + + UpdateMegamorphicCache(receiver->map(), *name, *code); + set_target(*megamorphic_stub()); break; case MEGAMORPHIC: - // Update the stub cache. UpdateMegamorphicCache(receiver->map(), *name, *code); break; case POLYMORPHIC: - if (target()->is_load_stub() || target()->is_store_stub()) { - if (UpdatePolymorphicIC(state, receiver, name, code, strict_mode)) { + if (target()->is_keyed_stub()) { + // When trying to patch a polymorphic keyed stub with anything other + // than another polymorphic stub, go generic. + set_target(*generic_stub()); + } else { + if (UpdatePolymorphicIC(receiver, name, code)) { break; } CopyICToMegamorphicCache(name); UpdateMegamorphicCache(receiver->map(), *name, *code); - set_target((strict_mode == kStrictMode) - ? *megamorphic_stub_strict() - : *megamorphic_stub()); - } else { - // When trying to patch a polymorphic keyed load/store element stub - // with anything other than another polymorphic stub, go generic. - set_target((strict_mode == kStrictMode) - ? *generic_stub_strict() - : *generic_stub()); + set_target(*megamorphic_stub()); } break; case DEBUG_STUB: @@ -1225,52 +1107,29 @@ void IC::PatchCache(State state, } -static void GetReceiverMapsForStub(Handle<Code> stub, - MapHandleList* result) { - ASSERT(stub->is_inline_cache_stub()); - switch (stub->ic_state()) { - case MONOMORPHIC: { - Map* map = stub->FindFirstMap(); - if (map != NULL) { - result->Add(Handle<Map>(map)); - } - break; - } - case POLYMORPHIC: { - DisallowHeapAllocation no_allocation; - int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT); - for (RelocIterator it(*stub, mask); !it.done(); it.next()) { - RelocInfo* info = it.rinfo(); - Handle<Object> object(info->target_object(), stub->GetIsolate()); - if (object->IsString()) break; - ASSERT(object->IsMap()); - AddOneReceiverMapIfMissing(result, Handle<Map>::cast(object)); - } - break; - } - case MEGAMORPHIC: - break; - case UNINITIALIZED: - case PREMONOMORPHIC: - case MONOMORPHIC_PROTOTYPE_FAILURE: - case GENERIC: - case DEBUG_STUB: - UNREACHABLE(); - break; +Handle<Code> LoadIC::SimpleFieldLoad(int offset, + bool inobject, + Representation representation) { + if (kind() == Code::LOAD_IC) { + LoadFieldStub stub(inobject, offset, representation); + return stub.GetCode(isolate()); + } else { + KeyedLoadFieldStub stub(inobject, offset, representation); + return stub.GetCode(isolate()); } } - void LoadIC::UpdateCaches(LookupResult* lookup, - State state, Handle<Object> object, Handle<String> name) { + // TODO(verwaest): It would be nice to support loading fields from smis as + // well. For now just fail to update the cache. if (!object->IsHeapObject()) return; Handle<HeapObject> receiver = Handle<HeapObject>::cast(object); Handle<Code> code; - if (state == UNINITIALIZED) { + if (state() == UNINITIALIZED) { // This is the first time we execute this inline cache. // Set the target to the pre monomorphic stub to delay // setting the monomorphic state. @@ -1278,17 +1137,25 @@ void LoadIC::UpdateCaches(LookupResult* lookup, } else if (!lookup->IsCacheable()) { // Bail out if the result is not cacheable. code = slow_stub(); + } else if (object->IsString() && + name->Equals(isolate()->heap()->length_string())) { + int length_index = String::kLengthOffset / kPointerSize; + code = SimpleFieldLoad(length_index); } else if (!object->IsJSObject()) { // TODO(jkummerow): It would be nice to support non-JSObjects in // ComputeLoadHandler, then we wouldn't need to go generic here. code = slow_stub(); + } else if (!lookup->IsProperty()) { + code = kind() == Code::LOAD_IC + ? isolate()->stub_cache()->ComputeLoadNonexistent( + name, Handle<JSObject>::cast(receiver)) + : slow_stub(); } else { - code = ComputeLoadHandler(lookup, Handle<JSObject>::cast(receiver), name); - if (code.is_null()) code = slow_stub(); + code = ComputeHandler(lookup, Handle<JSObject>::cast(receiver), name); } - PatchCache(state, kNonStrictMode, receiver, name, code); - TRACE_IC("LoadIC", name, state, target()); + PatchCache(receiver, name, code); + TRACE_IC("LoadIC", name); } @@ -1299,34 +1166,56 @@ void IC::UpdateMegamorphicCache(Map* map, Name* name, Code* code) { } -Handle<Code> LoadIC::ComputeLoadHandler(LookupResult* lookup, - Handle<JSObject> receiver, - Handle<String> name) { - if (!lookup->IsProperty()) { - // Nonexistent property. The result is undefined. - return isolate()->stub_cache()->ComputeLoadNonexistent(name, receiver); +Handle<Code> IC::ComputeHandler(LookupResult* lookup, + Handle<JSObject> receiver, + Handle<String> name, + Handle<Object> value) { + Handle<Code> code = isolate()->stub_cache()->FindHandler( + name, receiver, kind()); + if (!code.is_null()) return code; + + code = CompileHandler(lookup, receiver, name, value); + + if (code->is_handler() && code->type() != Code::NORMAL) { + HeapObject::UpdateMapCodeCache(receiver, name, code); } - // Compute monomorphic stub. + return code; +} + + +Handle<Code> LoadIC::CompileHandler(LookupResult* lookup, + Handle<JSObject> receiver, + Handle<String> name, + Handle<Object> unused) { Handle<JSObject> holder(lookup->holder()); + LoadStubCompiler compiler(isolate(), kind()); + switch (lookup->type()) { - case FIELD: - return isolate()->stub_cache()->ComputeLoadField( - name, receiver, holder, - lookup->GetFieldIndex(), lookup->representation()); + case FIELD: { + PropertyIndex field = lookup->GetFieldIndex(); + if (receiver.is_identical_to(holder)) { + return SimpleFieldLoad(field.translate(holder), + field.is_inobject(holder), + lookup->representation()); + } + return compiler.CompileLoadField( + receiver, holder, name, field, lookup->representation()); + } case CONSTANT: { Handle<Object> constant(lookup->GetConstant(), isolate()); // TODO(2803): Don't compute a stub for cons strings because they cannot // be embedded into code. - if (constant->IsConsString()) return Handle<Code>::null(); - return isolate()->stub_cache()->ComputeLoadConstant( - name, receiver, holder, constant); + if (constant->IsConsString()) break; + return compiler.CompileLoadConstant(receiver, holder, name, constant); } case NORMAL: + if (kind() != Code::LOAD_IC) break; if (holder->IsGlobalObject()) { Handle<GlobalObject> global = Handle<GlobalObject>::cast(holder); Handle<PropertyCell> cell( global->GetPropertyCell(lookup), isolate()); + // TODO(verwaest): Turn into a handler. return isolate()->stub_cache()->ComputeLoadGlobal( name, receiver, global, cell, lookup->IsDontDelete()); } @@ -1335,16 +1224,25 @@ Handle<Code> LoadIC::ComputeLoadHandler(LookupResult* lookup, // property must be found in the receiver for the stub to be // applicable. if (!holder.is_identical_to(receiver)) break; - return isolate()->stub_cache()->ComputeLoadNormal(name, receiver); + return isolate()->builtins()->LoadIC_Normal(); case CALLBACKS: { + // Use simple field loads for some well-known callback properties. + int object_offset; + Handle<Map> map(receiver->map()); + if (Accessors::IsJSObjectFieldAccessor(map, name, &object_offset)) { + PropertyIndex index = + PropertyIndex::NewHeaderIndex(object_offset / kPointerSize); + return compiler.CompileLoadField( + receiver, receiver, name, index, Representation::Tagged()); + } + Handle<Object> callback(lookup->GetCallbackObject(), isolate()); if (callback->IsExecutableAccessorInfo()) { Handle<ExecutableAccessorInfo> info = Handle<ExecutableAccessorInfo>::cast(callback); if (v8::ToCData<Address>(info->getter()) == 0) break; if (!info->IsCompatibleReceiver(*receiver)) break; - return isolate()->stub_cache()->ComputeLoadCallback( - name, receiver, holder, info); + return compiler.CompileLoadCallback(receiver, holder, name, info); } else if (callback->IsAccessorPair()) { Handle<Object> getter(Handle<AccessorPair>::cast(callback)->getter(), isolate()); @@ -1354,19 +1252,11 @@ Handle<Code> LoadIC::ComputeLoadHandler(LookupResult* lookup, Handle<JSFunction> function = Handle<JSFunction>::cast(getter); CallOptimization call_optimization(function); if (call_optimization.is_simple_api_call() && - call_optimization.IsCompatibleReceiver(*receiver) && - FLAG_js_accessor_ics) { - return isolate()->stub_cache()->ComputeLoadCallback( - name, receiver, holder, call_optimization); + call_optimization.IsCompatibleReceiver(*receiver)) { + return compiler.CompileLoadCallback( + receiver, holder, name, call_optimization); } - return isolate()->stub_cache()->ComputeLoadViaGetter( - name, receiver, holder, function); - } else if (receiver->IsJSArray() && - name->Equals(isolate()->heap()->length_string())) { - PropertyIndex lengthIndex = - PropertyIndex::NewHeaderIndex(JSArray::kLengthOffset / kPointerSize); - return isolate()->stub_cache()->ComputeLoadField( - name, receiver, holder, lengthIndex, Representation::Tagged()); + return compiler.CompileLoadViaGetter(receiver, holder, name, function); } // TODO(dcarney): Handle correctly. if (callback->IsDeclaredAccessorInfo()) break; @@ -1376,12 +1266,12 @@ Handle<Code> LoadIC::ComputeLoadHandler(LookupResult* lookup, } case INTERCEPTOR: ASSERT(HasInterceptorGetter(*holder)); - return isolate()->stub_cache()->ComputeLoadInterceptor( - name, receiver, holder); + return compiler.CompileLoadInterceptor(receiver, holder, name); default: break; } - return Handle<Code>::null(); + + return slow_stub(); } @@ -1406,8 +1296,6 @@ static Handle<Object> TryConvertKey(Handle<Object> key, Isolate* isolate) { Handle<Code> KeyedLoadIC::LoadElementStub(Handle<JSObject> receiver) { - State ic_state = target()->ic_state(); - // Don't handle megamorphic property accesses for INTERCEPTORS or CALLBACKS // via megamorphic stubs, since they don't have a map in their relocation info // and so the stubs can't be harvested for the object needed for a map check. @@ -1418,17 +1306,16 @@ Handle<Code> KeyedLoadIC::LoadElementStub(Handle<JSObject> receiver) { Handle<Map> receiver_map(receiver->map(), isolate()); MapHandleList target_receiver_maps; - if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) { + if (state() == UNINITIALIZED || state() == PREMONOMORPHIC) { // Optimistically assume that ICs that haven't reached the MONOMORPHIC state // yet will do so and stay there. return isolate()->stub_cache()->ComputeKeyedLoadElement(receiver_map); } - if (target() == *string_stub()) { + if (target().is_identical_to(string_stub())) { target_receiver_maps.Add(isolate()->factory()->string_map()); } else { - GetReceiverMapsForStub(Handle<Code>(target(), isolate()), - &target_receiver_maps); + target()->FindAllMaps(&target_receiver_maps); if (target_receiver_maps.length() == 0) { return isolate()->stub_cache()->ComputeKeyedLoadElement(receiver_map); } @@ -1441,14 +1328,14 @@ Handle<Code> KeyedLoadIC::LoadElementStub(Handle<JSObject> receiver) { // monomorphic. If this optimistic assumption is not true, the IC will // miss again and it will become polymorphic and support both the // untransitioned and transitioned maps. - if (ic_state == MONOMORPHIC && + if (state() == MONOMORPHIC && IsMoreGeneralElementsKindTransition( target_receiver_maps.at(0)->elements_kind(), receiver->GetElementsKind())) { return isolate()->stub_cache()->ComputeKeyedLoadElement(receiver_map); } - ASSERT(ic_state != GENERIC); + ASSERT(state() != GENERIC); // Determine the list of receiver maps that this call site has seen, // adding the map that was just encountered. @@ -1471,132 +1358,69 @@ Handle<Code> KeyedLoadIC::LoadElementStub(Handle<JSObject> receiver) { } -MaybeObject* KeyedLoadIC::Load(State state, - Handle<Object> object, +MaybeObject* KeyedLoadIC::Load(Handle<Object> object, Handle<Object> key, ICMissMode miss_mode) { + if (MigrateDeprecated(object)) { + return Runtime::GetObjectPropertyOrFail(isolate(), object, key); + } + + MaybeObject* maybe_object = NULL; + Handle<Code> stub = generic_stub(); + // Check for values that can be converted into an internalized string directly // or is representable as a smi. key = TryConvertKey(key, isolate()); if (key->IsInternalizedString()) { - return LoadIC::Load(state, object, Handle<String>::cast(key)); - } - - bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded(); - ASSERT(!(use_ic && object->IsJSGlobalProxy())); - - if (use_ic) { - Handle<Code> stub = generic_stub(); + maybe_object = LoadIC::Load(object, Handle<String>::cast(key)); + if (maybe_object->IsFailure()) return maybe_object; + } else if (FLAG_use_ic && !object->IsAccessCheckNeeded()) { + ASSERT(!object->IsJSGlobalProxy()); if (miss_mode != MISS_FORCE_GENERIC) { if (object->IsString() && key->IsNumber()) { - if (state == UNINITIALIZED) { - stub = string_stub(); - } + if (state() == UNINITIALIZED) stub = string_stub(); } else if (object->IsJSObject()) { Handle<JSObject> receiver = Handle<JSObject>::cast(object); - if (receiver->map()->is_deprecated()) { - JSObject::MigrateInstance(receiver); - } - if (receiver->elements()->map() == isolate()->heap()->non_strict_arguments_elements_map()) { stub = non_strict_arguments_stub(); } else if (receiver->HasIndexedInterceptor()) { stub = indexed_interceptor_stub(); } else if (!key->ToSmi()->IsFailure() && - (target() != *non_strict_arguments_stub())) { + (!target().is_identical_to(non_strict_arguments_stub()))) { stub = LoadElementStub(receiver); } } - } else { - TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "force generic"); + } + } + + if (!is_target_set()) { + if (*stub == *generic_stub()) { + TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "set generic"); } ASSERT(!stub.is_null()); set_target(*stub); - TRACE_IC("KeyedLoadIC", key, state, target()); + TRACE_IC("LoadIC", key); } - + if (maybe_object != NULL) return maybe_object; return Runtime::GetObjectPropertyOrFail(isolate(), object, key); } -Handle<Code> KeyedLoadIC::ComputeLoadHandler(LookupResult* lookup, - Handle<JSObject> receiver, - Handle<String> name) { - // Bail out if we didn't find a result. - if (!lookup->IsProperty()) return Handle<Code>::null(); - - // Compute a monomorphic stub. - Handle<JSObject> holder(lookup->holder(), isolate()); - switch (lookup->type()) { - case FIELD: - return isolate()->stub_cache()->ComputeKeyedLoadField( - name, receiver, holder, - lookup->GetFieldIndex(), lookup->representation()); - case CONSTANT: { - Handle<Object> constant(lookup->GetConstant(), isolate()); - // TODO(2803): Don't compute a stub for cons strings because they cannot - // be embedded into code. - if (constant->IsConsString()) return Handle<Code>::null(); - return isolate()->stub_cache()->ComputeKeyedLoadConstant( - name, receiver, holder, constant); - } - case CALLBACKS: { - Handle<Object> callback_object(lookup->GetCallbackObject(), isolate()); - // TODO(dcarney): Handle DeclaredAccessorInfo correctly. - if (callback_object->IsExecutableAccessorInfo()) { - Handle<ExecutableAccessorInfo> callback = - Handle<ExecutableAccessorInfo>::cast(callback_object); - if (v8::ToCData<Address>(callback->getter()) == 0) break; - if (!callback->IsCompatibleReceiver(*receiver)) break; - return isolate()->stub_cache()->ComputeKeyedLoadCallback( - name, receiver, holder, callback); - } else if (callback_object->IsAccessorPair()) { - Handle<Object> getter( - Handle<AccessorPair>::cast(callback_object)->getter(), - isolate()); - if (!getter->IsJSFunction()) break; - if (holder->IsGlobalObject()) break; - if (!holder->HasFastProperties()) break; - Handle<JSFunction> function = Handle<JSFunction>::cast(getter); - CallOptimization call_optimization(function); - if (call_optimization.is_simple_api_call() && - call_optimization.IsCompatibleReceiver(*receiver) && - FLAG_js_accessor_ics) { - return isolate()->stub_cache()->ComputeKeyedLoadCallback( - name, receiver, holder, call_optimization); - } - } - break; - } - case INTERCEPTOR: - ASSERT(HasInterceptorGetter(lookup->holder())); - return isolate()->stub_cache()->ComputeKeyedLoadInterceptor( - name, receiver, holder); - default: - // Always rewrite to the generic case so that we do not - // repeatedly try to rewrite. - return generic_stub(); - } - return Handle<Code>::null(); -} - - static bool LookupForWrite(Handle<JSObject> receiver, Handle<String> name, Handle<Object> value, LookupResult* lookup, - IC::State* state) { + IC* ic) { Handle<JSObject> holder = receiver; receiver->Lookup(*name, lookup); if (lookup->IsFound()) { if (lookup->IsReadOnly() || !lookup->IsCacheable()) return false; if (lookup->holder() == *receiver) { - if (lookup->IsInterceptor() && - receiver->GetNamedInterceptor()->setter()->IsUndefined()) { + if (lookup->IsInterceptor() && !HasInterceptorSetter(*receiver)) { receiver->LocalLookupRealNamedProperty(*name, lookup); return lookup->IsFound() && !lookup->IsReadOnly() && @@ -1642,22 +1466,21 @@ static bool LookupForWrite(Handle<JSObject> receiver, // entirely by the migration above. receiver->map()->LookupTransition(*holder, *name, lookup); if (!lookup->IsTransition()) return false; - *state = MONOMORPHIC_PROTOTYPE_FAILURE; + ic->MarkMonomorphicPrototypeFailure(); } return true; } -MaybeObject* StoreIC::Store(State state, - StrictModeFlag strict_mode, - Handle<Object> object, +MaybeObject* StoreIC::Store(Handle<Object> object, Handle<String> name, Handle<Object> value, JSReceiver::StoreFromKeyed store_mode) { - // Handle proxies. - if (object->IsJSProxy()) { - return JSReceiver::SetPropertyOrFail( - Handle<JSReceiver>::cast(object), name, value, NONE, strict_mode); + if (MigrateDeprecated(object) || object->IsJSProxy()) { + Handle<Object> result = JSReceiver::SetProperty( + Handle<JSReceiver>::cast(object), name, value, NONE, strict_mode()); + RETURN_IF_EMPTY_HANDLE(isolate(), result); + return *result; } // If the object is undefined or null it's illegal to try to set any @@ -1667,7 +1490,7 @@ MaybeObject* StoreIC::Store(State state, } // The length property of string values is read-only. Throw in strict mode. - if (strict_mode == kStrictMode && object->IsString() && + if (strict_mode() == kStrictMode && object->IsString() && name->Equals(isolate()->heap()->length_string())) { return TypeError("strict_read_only_property", object, name); } @@ -1678,23 +1501,21 @@ MaybeObject* StoreIC::Store(State state, Handle<JSObject> receiver = Handle<JSObject>::cast(object); - if (receiver->map()->is_deprecated()) { - JSObject::MigrateInstance(receiver); - } - // Check if the given name is an array index. uint32_t index; if (name->AsArrayIndex(&index)) { Handle<Object> result = - JSObject::SetElement(receiver, index, value, NONE, strict_mode); + JSObject::SetElement(receiver, index, value, NONE, strict_mode()); RETURN_IF_EMPTY_HANDLE(isolate(), result); return *value; } // Observed objects are always modified through the runtime. if (FLAG_harmony_observation && receiver->map()->is_observed()) { - return JSReceiver::SetPropertyOrFail( - receiver, name, value, NONE, strict_mode, store_mode); + Handle<Object> result = JSReceiver::SetProperty( + receiver, name, value, NONE, strict_mode(), store_mode); + RETURN_IF_EMPTY_HANDLE(isolate(), result); + return *result; } // Use specialized code for setting the length of arrays with fast @@ -1708,63 +1529,62 @@ MaybeObject* StoreIC::Store(State state, receiver->HasFastProperties() && !receiver->map()->is_frozen()) { Handle<Code> stub = - StoreArrayLengthStub(kind(), strict_mode).GetCode(isolate()); + StoreArrayLengthStub(kind(), strict_mode()).GetCode(isolate()); set_target(*stub); - TRACE_IC("StoreIC", name, state, *stub); - return JSReceiver::SetPropertyOrFail( - receiver, name, value, NONE, strict_mode, store_mode); + TRACE_IC("StoreIC", name); + Handle<Object> result = JSReceiver::SetProperty( + receiver, name, value, NONE, strict_mode(), store_mode); + RETURN_IF_EMPTY_HANDLE(isolate(), result); + return *result; } if (receiver->IsJSGlobalProxy()) { if (FLAG_use_ic && kind() != Code::KEYED_STORE_IC) { // Generate a generic stub that goes to the runtime when we see a global // proxy as receiver. - Handle<Code> stub = (strict_mode == kStrictMode) - ? global_proxy_stub_strict() - : global_proxy_stub(); + Handle<Code> stub = global_proxy_stub(); set_target(*stub); - TRACE_IC("StoreIC", name, state, *stub); + TRACE_IC("StoreIC", name); } - return JSReceiver::SetPropertyOrFail( - receiver, name, value, NONE, strict_mode, store_mode); + Handle<Object> result = JSReceiver::SetProperty( + receiver, name, value, NONE, strict_mode(), store_mode); + RETURN_IF_EMPTY_HANDLE(isolate(), result); + return *result; } LookupResult lookup(isolate()); - bool can_store = LookupForWrite(receiver, name, value, &lookup, &state); + bool can_store = LookupForWrite(receiver, name, value, &lookup, this); if (!can_store && - strict_mode == kStrictMode && + strict_mode() == kStrictMode && !(lookup.IsProperty() && lookup.IsReadOnly()) && IsUndeclaredGlobal(object)) { // Strict mode doesn't allow setting non-existent global property. return ReferenceError("not_defined", name); } if (FLAG_use_ic) { - if (state == UNINITIALIZED) { - Handle<Code> stub = (strict_mode == kStrictMode) - ? pre_monomorphic_stub_strict() - : pre_monomorphic_stub(); + if (state() == UNINITIALIZED) { + Handle<Code> stub = pre_monomorphic_stub(); set_target(*stub); - TRACE_IC("StoreIC", name, state, *stub); + TRACE_IC("StoreIC", name); } else if (can_store) { - UpdateCaches(&lookup, state, strict_mode, receiver, name, value); + UpdateCaches(&lookup, receiver, name, value); } else if (!name->IsCacheable(isolate()) || lookup.IsNormal() || (lookup.IsField() && lookup.CanHoldValue(value))) { - Handle<Code> stub = (strict_mode == kStrictMode) ? generic_stub_strict() - : generic_stub(); + Handle<Code> stub = generic_stub(); set_target(*stub); } } // Set the property. - return JSReceiver::SetPropertyOrFail( - receiver, name, value, NONE, strict_mode, store_mode); + Handle<Object> result = JSReceiver::SetProperty( + receiver, name, value, NONE, strict_mode(), store_mode); + RETURN_IF_EMPTY_HANDLE(isolate(), result); + return *result; } void StoreIC::UpdateCaches(LookupResult* lookup, - State state, - StrictModeFlag strict_mode, Handle<JSObject> receiver, Handle<String> name, Handle<Object> value) { @@ -1774,31 +1594,39 @@ void StoreIC::UpdateCaches(LookupResult* lookup, // These are not cacheable, so we never see such LookupResults here. ASSERT(!lookup->IsHandler()); - Handle<Code> code = ComputeStoreMonomorphic( - lookup, strict_mode, receiver, name, value); - if (code.is_null()) { - Handle<Code> stub = strict_mode == kStrictMode - ? generic_stub_strict() : generic_stub(); - set_target(*stub); - return; - } + Handle<Code> code = ComputeHandler(lookup, receiver, name, value); - PatchCache(state, strict_mode, receiver, name, code); - TRACE_IC("StoreIC", name, state, target()); + PatchCache(receiver, name, code); + TRACE_IC("StoreIC", name); } -Handle<Code> StoreIC::ComputeStoreMonomorphic(LookupResult* lookup, - StrictModeFlag strict_mode, - Handle<JSObject> receiver, - Handle<String> name, - Handle<Object> value) { +Handle<Code> StoreIC::CompileHandler(LookupResult* lookup, + Handle<JSObject> receiver, + Handle<String> name, + Handle<Object> value) { Handle<JSObject> holder(lookup->holder()); + StoreStubCompiler compiler(isolate(), strict_mode(), kind()); switch (lookup->type()) { case FIELD: - return isolate()->stub_cache()->ComputeStoreField( - name, receiver, lookup, strict_mode); + return compiler.CompileStoreField(receiver, lookup, name); + case TRANSITION: { + // Explicitly pass in the receiver map since LookupForWrite may have + // stored something else than the receiver in the holder. + Handle<Map> transition( + lookup->GetTransitionTarget(receiver->map()), isolate()); + int descriptor = transition->LastAdded(); + + DescriptorArray* target_descriptors = transition->instance_descriptors(); + PropertyDetails details = target_descriptors->GetDetails(descriptor); + + if (details.type() == CALLBACKS || details.attributes() != NONE) break; + + return compiler.CompileStoreTransition( + receiver, lookup, transition, name); + } case NORMAL: + if (kind() == Code::KEYED_STORE_IC) break; if (receiver->IsGlobalObject()) { // The stub generated for the global object picks the value directly // from the property cell. So the property must be directly on the @@ -1806,12 +1634,16 @@ Handle<Code> StoreIC::ComputeStoreMonomorphic(LookupResult* lookup, Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver); Handle<PropertyCell> cell( global->GetPropertyCell(lookup), isolate()); + // TODO(verwaest): Turn into a handler. return isolate()->stub_cache()->ComputeStoreGlobal( - name, global, cell, value, strict_mode); + name, global, cell, value, strict_mode()); } ASSERT(holder.is_identical_to(receiver)); - return isolate()->stub_cache()->ComputeStoreNormal(strict_mode); + return strict_mode() == kStrictMode + ? isolate()->builtins()->StoreIC_Normal_Strict() + : isolate()->builtins()->StoreIC_Normal(); case CALLBACKS: { + if (kind() == Code::KEYED_STORE_IC) break; Handle<Object> callback(lookup->GetCallbackObject(), isolate()); if (callback->IsExecutableAccessorInfo()) { Handle<ExecutableAccessorInfo> info = @@ -1819,8 +1651,7 @@ Handle<Code> StoreIC::ComputeStoreMonomorphic(LookupResult* lookup, if (v8::ToCData<Address>(info->setter()) == 0) break; if (!holder->HasFastProperties()) break; if (!info->IsCompatibleReceiver(*receiver)) break; - return isolate()->stub_cache()->ComputeStoreCallback( - name, receiver, holder, info, strict_mode); + return compiler.CompileStoreCallback(receiver, holder, name, info); } else if (callback->IsAccessorPair()) { Handle<Object> setter( Handle<AccessorPair>::cast(callback)->setter(), isolate()); @@ -1830,14 +1661,12 @@ Handle<Code> StoreIC::ComputeStoreMonomorphic(LookupResult* lookup, Handle<JSFunction> function = Handle<JSFunction>::cast(setter); CallOptimization call_optimization(function); if (call_optimization.is_simple_api_call() && - call_optimization.IsCompatibleReceiver(*receiver) && - FLAG_js_accessor_ics) { - return isolate()->stub_cache()->ComputeStoreCallback( - name, receiver, holder, call_optimization, strict_mode); + call_optimization.IsCompatibleReceiver(*receiver)) { + return compiler.CompileStoreCallback( + receiver, holder, name, call_optimization); } - return isolate()->stub_cache()->ComputeStoreViaSetter( - name, receiver, holder, Handle<JSFunction>::cast(setter), - strict_mode); + return compiler.CompileStoreViaSetter( + receiver, holder, name, Handle<JSFunction>::cast(setter)); } // TODO(dcarney): Handle correctly. if (callback->IsDeclaredAccessorInfo()) break; @@ -1846,55 +1675,38 @@ Handle<Code> StoreIC::ComputeStoreMonomorphic(LookupResult* lookup, break; } case INTERCEPTOR: - ASSERT(!receiver->GetNamedInterceptor()->setter()->IsUndefined()); - return isolate()->stub_cache()->ComputeStoreInterceptor( - name, receiver, strict_mode); + if (kind() == Code::KEYED_STORE_IC) break; + ASSERT(HasInterceptorSetter(*receiver)); + return compiler.CompileStoreInterceptor(receiver, name); case CONSTANT: break; - case TRANSITION: { - // Explicitly pass in the receiver map since LookupForWrite may have - // stored something else than the receiver in the holder. - Handle<Map> transition( - lookup->GetTransitionTarget(receiver->map()), isolate()); - int descriptor = transition->LastAdded(); - - DescriptorArray* target_descriptors = transition->instance_descriptors(); - PropertyDetails details = target_descriptors->GetDetails(descriptor); - - if (details.type() == CALLBACKS || details.attributes() != NONE) break; - - return isolate()->stub_cache()->ComputeStoreTransition( - name, receiver, lookup, transition, strict_mode); - } case NONEXISTENT: case HANDLER: UNREACHABLE(); break; } - return Handle<Code>::null(); + return slow_stub(); } Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver, - KeyedAccessStoreMode store_mode, - StrictModeFlag strict_mode) { + KeyedAccessStoreMode store_mode) { // Don't handle megamorphic property accesses for INTERCEPTORS or CALLBACKS // via megamorphic stubs, since they don't have a map in their relocation info // and so the stubs can't be harvested for the object needed for a map check. if (target()->type() != Code::NORMAL) { TRACE_GENERIC_IC(isolate(), "KeyedIC", "non-NORMAL target type"); - return strict_mode == kStrictMode ? generic_stub_strict() : generic_stub(); + return generic_stub(); } - State ic_state = target()->ic_state(); Handle<Map> receiver_map(receiver->map(), isolate()); - if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) { + if (state() == UNINITIALIZED || state() == PREMONOMORPHIC) { // Optimistically assume that ICs that haven't reached the MONOMORPHIC state // yet will do so and stay there. Handle<Map> monomorphic_map = ComputeTransitionedMap(receiver, store_mode); store_mode = GetNonTransitioningStoreMode(store_mode); return isolate()->stub_cache()->ComputeKeyedStoreElement( - monomorphic_map, strict_mode, store_mode); + monomorphic_map, strict_mode(), store_mode); } MapHandleList target_receiver_maps; @@ -1903,9 +1715,7 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver, // In the case that there is a non-map-specific IC is installed (e.g. keyed // stores into properties in dictionary mode), then there will be not // receiver maps in the target. - return strict_mode == kStrictMode - ? generic_stub_strict() - : generic_stub(); + return generic_stub(); } // There are several special cases where an IC that is MONOMORPHIC can still @@ -1915,7 +1725,7 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver, KeyedAccessStoreMode old_store_mode = Code::GetKeyedAccessStoreMode(target()->extra_ic_state()); Handle<Map> previous_receiver_map = target_receiver_maps.at(0); - if (ic_state == MONOMORPHIC) { + if (state() == MONOMORPHIC) { // If the "old" and "new" maps are in the same elements map family, stay // MONOMORPHIC and use the map for the most generic ElementsKind. Handle<Map> transitioned_receiver_map = receiver_map; @@ -1927,7 +1737,7 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver, // Element family is the same, use the "worst" case map. store_mode = GetNonTransitioningStoreMode(store_mode); return isolate()->stub_cache()->ComputeKeyedStoreElement( - transitioned_receiver_map, strict_mode, store_mode); + transitioned_receiver_map, strict_mode(), store_mode); } else if (*previous_receiver_map == receiver->map() && old_store_mode == STANDARD_STORE && (IsGrowStoreMode(store_mode) || @@ -1937,11 +1747,11 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver, // grow at the end of the array, handle OOB accesses or copy COW arrays // and still stay MONOMORPHIC. return isolate()->stub_cache()->ComputeKeyedStoreElement( - receiver_map, strict_mode, store_mode); + receiver_map, strict_mode(), store_mode); } } - ASSERT(ic_state != GENERIC); + ASSERT(state() != GENERIC); bool map_added = AddOneReceiverMapIfMissing(&target_receiver_maps, receiver_map); @@ -1957,14 +1767,14 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver, // If the miss wasn't due to an unseen map, a polymorphic stub // won't help, use the generic stub. TRACE_GENERIC_IC(isolate(), "KeyedIC", "same map added twice"); - return strict_mode == kStrictMode ? generic_stub_strict() : generic_stub(); + return generic_stub(); } // If the maximum number of receiver maps has been exceeded, use the generic // version of the IC. if (target_receiver_maps.length() > kMaxKeyedPolymorphism) { TRACE_GENERIC_IC(isolate(), "KeyedIC", "max polymorph exceeded"); - return strict_mode == kStrictMode ? generic_stub_strict() : generic_stub(); + return generic_stub(); } // Make sure all polymorphic handlers have the same store mode, otherwise the @@ -1975,9 +1785,7 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver, store_mode = old_store_mode; } else if (store_mode != old_store_mode) { TRACE_GENERIC_IC(isolate(), "KeyedIC", "store mode mismatch"); - return strict_mode == kStrictMode - ? generic_stub_strict() - : generic_stub(); + return generic_stub(); } } @@ -1995,14 +1803,12 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver, external_arrays != target_receiver_maps.length()) { TRACE_GENERIC_IC(isolate(), "KeyedIC", "unsupported combination of external and normal arrays"); - return strict_mode == kStrictMode - ? generic_stub_strict() - : generic_stub(); + return generic_stub(); } } return isolate()->stub_cache()->ComputeStoreElementPolymorphic( - &target_receiver_maps, store_mode, strict_mode); + &target_receiver_maps, store_mode, strict_mode()); } @@ -2125,117 +1931,73 @@ KeyedAccessStoreMode KeyedStoreIC::GetStoreMode(Handle<JSObject> receiver, } -MaybeObject* KeyedStoreIC::Store(State state, - StrictModeFlag strict_mode, - Handle<Object> object, +MaybeObject* KeyedStoreIC::Store(Handle<Object> object, Handle<Object> key, Handle<Object> value, ICMissMode miss_mode) { + if (MigrateDeprecated(object)) { + return Runtime::SetObjectPropertyOrFail( + isolate(), object , key, value, NONE, strict_mode()); + } + // Check for values that can be converted into an internalized string directly // or is representable as a smi. key = TryConvertKey(key, isolate()); + MaybeObject* maybe_object = NULL; + Handle<Code> stub = generic_stub(); + if (key->IsInternalizedString()) { - return StoreIC::Store(state, - strict_mode, - object, - Handle<String>::cast(key), - value, - JSReceiver::MAY_BE_STORE_FROM_KEYED); - } - - bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded() && - !(FLAG_harmony_observation && object->IsJSObject() && - JSObject::cast(*object)->map()->is_observed()); - if (use_ic && !object->IsSmi()) { - // Don't use ICs for maps of the objects in Array's prototype chain. We - // expect to be able to trap element sets to objects with those maps in the - // runtime to enable optimization of element hole access. - Handle<HeapObject> heap_object = Handle<HeapObject>::cast(object); - if (heap_object->map()->IsMapInArrayPrototypeChain()) use_ic = false; - } - ASSERT(!(use_ic && object->IsJSGlobalProxy())); - - if (use_ic) { - Handle<Code> stub = (strict_mode == kStrictMode) - ? generic_stub_strict() - : generic_stub(); - if (miss_mode != MISS_FORCE_GENERIC) { - if (object->IsJSObject()) { - Handle<JSObject> receiver = Handle<JSObject>::cast(object); - if (receiver->map()->is_deprecated()) { - JSObject::MigrateInstance(receiver); - } - bool key_is_smi_like = key->IsSmi() || !key->ToSmi()->IsFailure(); - if (receiver->elements()->map() == - isolate()->heap()->non_strict_arguments_elements_map()) { - stub = non_strict_arguments_stub(); - } else if (key_is_smi_like && - (target() != *non_strict_arguments_stub())) { - KeyedAccessStoreMode store_mode = GetStoreMode(receiver, key, value); - stub = StoreElementStub(receiver, store_mode, strict_mode); - } else { - TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "key not a number"); + maybe_object = StoreIC::Store(object, + Handle<String>::cast(key), + value, + JSReceiver::MAY_BE_STORE_FROM_KEYED); + if (maybe_object->IsFailure()) return maybe_object; + } else { + bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded() && + !(FLAG_harmony_observation && object->IsJSObject() && + JSObject::cast(*object)->map()->is_observed()); + if (use_ic && !object->IsSmi()) { + // Don't use ICs for maps of the objects in Array's prototype chain. We + // expect to be able to trap element sets to objects with those maps in + // the runtime to enable optimization of element hole access. + Handle<HeapObject> heap_object = Handle<HeapObject>::cast(object); + if (heap_object->map()->IsMapInArrayPrototypeChain()) use_ic = false; + } + + if (use_ic) { + ASSERT(!object->IsJSGlobalProxy()); + + if (miss_mode != MISS_FORCE_GENERIC) { + if (object->IsJSObject()) { + Handle<JSObject> receiver = Handle<JSObject>::cast(object); + bool key_is_smi_like = key->IsSmi() || !key->ToSmi()->IsFailure(); + if (receiver->elements()->map() == + isolate()->heap()->non_strict_arguments_elements_map()) { + stub = non_strict_arguments_stub(); + } else if (key_is_smi_like && + (!target().is_identical_to(non_strict_arguments_stub()))) { + KeyedAccessStoreMode store_mode = + GetStoreMode(receiver, key, value); + stub = StoreElementStub(receiver, store_mode); + } } - } else { - TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "not an object"); } - } else { - TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "force generic"); + } + } + + if (!is_target_set()) { + if (*stub == *generic_stub()) { + TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "set generic"); } ASSERT(!stub.is_null()); set_target(*stub); - TRACE_IC("KeyedStoreIC", key, state, target()); + TRACE_IC("StoreIC", key); } + if (maybe_object) return maybe_object; return Runtime::SetObjectPropertyOrFail( - isolate(), object , key, value, NONE, strict_mode); -} - - -Handle<Code> KeyedStoreIC::ComputeStoreMonomorphic(LookupResult* lookup, - StrictModeFlag strict_mode, - Handle<JSObject> receiver, - Handle<String> name, - Handle<Object> value) { - // If the property has a non-field type allowing map transitions - // where there is extra room in the object, we leave the IC in its - // current state. - switch (lookup->type()) { - case FIELD: - return isolate()->stub_cache()->ComputeKeyedStoreField( - name, receiver, lookup, strict_mode); - case TRANSITION: { - // Explicitly pass in the receiver map since LookupForWrite may have - // stored something else than the receiver in the holder. - Handle<Map> transition( - lookup->GetTransitionTarget(receiver->map()), isolate()); - int descriptor = transition->LastAdded(); - - DescriptorArray* target_descriptors = transition->instance_descriptors(); - PropertyDetails details = target_descriptors->GetDetails(descriptor); - - if (details.type() != CALLBACKS && details.attributes() == NONE) { - return isolate()->stub_cache()->ComputeKeyedStoreTransition( - name, receiver, lookup, transition, strict_mode); - } - // fall through. - } - case NORMAL: - case CONSTANT: - case CALLBACKS: - case INTERCEPTOR: - // Always rewrite to the generic case so that we do not - // repeatedly try to rewrite. - return (strict_mode == kStrictMode) - ? generic_stub_strict() - : generic_stub(); - case HANDLER: - case NONEXISTENT: - UNREACHABLE(); - break; - } - return Handle<Code>::null(); + isolate(), object , key, value, NONE, strict_mode()); } @@ -2251,12 +2013,10 @@ RUNTIME_FUNCTION(MaybeObject*, CallIC_Miss) { HandleScope scope(isolate); ASSERT(args.length() == 2); CallIC ic(isolate); - IC::State state = IC::StateFrom(ic.target(), args[0], args[1]); - Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state(); - MaybeObject* maybe_result = ic.LoadFunction(state, - extra_ic_state, - args.at<Object>(0), - args.at<String>(1)); + Handle<Object> receiver = args.at<Object>(0); + Handle<String> key = args.at<String>(1); + ic.UpdateState(receiver, key); + MaybeObject* maybe_result = ic.LoadFunction(receiver, key); JSFunction* raw_function; if (!maybe_result->To(&raw_function)) return maybe_result; @@ -2278,9 +2038,10 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedCallIC_Miss) { HandleScope scope(isolate); ASSERT(args.length() == 2); KeyedCallIC ic(isolate); - IC::State state = IC::StateFrom(ic.target(), args[0], args[1]); - MaybeObject* maybe_result = - ic.LoadFunction(state, args.at<Object>(0), args.at<Object>(1)); + Handle<Object> receiver = args.at<Object>(0); + Handle<Object> key = args.at<Object>(1); + ic.UpdateState(receiver, key); + MaybeObject* maybe_result = ic.LoadFunction(receiver, key); // Result could be a function or a failure. JSFunction* raw_function = NULL; if (!maybe_result->To(&raw_function)) return maybe_result; @@ -2298,8 +2059,10 @@ RUNTIME_FUNCTION(MaybeObject*, LoadIC_Miss) { HandleScope scope(isolate); ASSERT(args.length() == 2); LoadIC ic(IC::NO_EXTRA_FRAME, isolate); - IC::State state = IC::StateFrom(ic.target(), args[0], args[1]); - return ic.Load(state, args.at<Object>(0), args.at<String>(1)); + Handle<Object> receiver = args.at<Object>(0); + Handle<String> key = args.at<String>(1); + ic.UpdateState(receiver, key); + return ic.Load(receiver, key); } @@ -2308,8 +2071,10 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_Miss) { HandleScope scope(isolate); ASSERT(args.length() == 2); KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate); - IC::State state = IC::StateFrom(ic.target(), args[0], args[1]); - return ic.Load(state, args.at<Object>(0), args.at<Object>(1), MISS); + Handle<Object> receiver = args.at<Object>(0); + Handle<Object> key = args.at<Object>(1); + ic.UpdateState(receiver, key); + return ic.Load(receiver, key, MISS); } @@ -2317,8 +2082,10 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissFromStubFailure) { HandleScope scope(isolate); ASSERT(args.length() == 2); KeyedLoadIC ic(IC::EXTRA_CALL_FRAME, isolate); - IC::State state = IC::StateFrom(ic.target(), args[0], args[1]); - return ic.Load(state, args.at<Object>(0), args.at<Object>(1), MISS); + Handle<Object> receiver = args.at<Object>(0); + Handle<Object> key = args.at<Object>(1); + ic.UpdateState(receiver, key); + return ic.Load(receiver, key, MISS); } @@ -2326,11 +2093,10 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissForceGeneric) { HandleScope scope(isolate); ASSERT(args.length() == 2); KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate); - IC::State state = IC::StateFrom(ic.target(), args[0], args[1]); - return ic.Load(state, - args.at<Object>(0), - args.at<Object>(1), - MISS_FORCE_GENERIC); + Handle<Object> receiver = args.at<Object>(0); + Handle<Object> key = args.at<Object>(1); + ic.UpdateState(receiver, key); + return ic.Load(receiver, key, MISS_FORCE_GENERIC); } @@ -2339,13 +2105,10 @@ RUNTIME_FUNCTION(MaybeObject*, StoreIC_Miss) { HandleScope scope(isolate); ASSERT(args.length() == 3); StoreIC ic(IC::NO_EXTRA_FRAME, isolate); - IC::State state = IC::StateFrom(ic.target(), args[0], args[1]); - Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state(); - return ic.Store(state, - Code::GetStrictMode(extra_ic_state), - args.at<Object>(0), - args.at<String>(1), - args.at<Object>(2)); + Handle<Object> receiver = args.at<Object>(0); + Handle<String> key = args.at<String>(1); + ic.UpdateState(receiver, key); + return ic.Store(receiver, key, args.at<Object>(2)); } @@ -2353,13 +2116,10 @@ RUNTIME_FUNCTION(MaybeObject*, StoreIC_MissFromStubFailure) { HandleScope scope(isolate); ASSERT(args.length() == 3); StoreIC ic(IC::EXTRA_CALL_FRAME, isolate); - IC::State state = IC::StateFrom(ic.target(), args[0], args[1]); - Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state(); - return ic.Store(state, - Code::GetStrictMode(extra_ic_state), - args.at<Object>(0), - args.at<String>(1), - args.at<Object>(2)); + Handle<Object> receiver = args.at<Object>(0); + Handle<String> key = args.at<String>(1); + ic.UpdateState(receiver, key); + return ic.Store(receiver, key, args.at<Object>(2)); } @@ -2442,14 +2202,10 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Miss) { HandleScope scope(isolate); ASSERT(args.length() == 3); KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate); - IC::State state = IC::StateFrom(ic.target(), args[0], args[1]); - Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state(); - return ic.Store(state, - Code::GetStrictMode(extra_ic_state), - args.at<Object>(0), - args.at<Object>(1), - args.at<Object>(2), - MISS); + Handle<Object> receiver = args.at<Object>(0); + Handle<Object> key = args.at<Object>(1); + ic.UpdateState(receiver, key); + return ic.Store(receiver, key, args.at<Object>(2), MISS); } @@ -2457,26 +2213,21 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissFromStubFailure) { HandleScope scope(isolate); ASSERT(args.length() == 3); KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate); - IC::State state = IC::StateFrom(ic.target(), args[0], args[1]); - Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state(); - return ic.Store(state, - Code::GetStrictMode(extra_ic_state), - args.at<Object>(0), - args.at<Object>(1), - args.at<Object>(2), - MISS); + Handle<Object> receiver = args.at<Object>(0); + Handle<Object> key = args.at<Object>(1); + ic.UpdateState(receiver, key); + return ic.Store(receiver, key, args.at<Object>(2), MISS); } RUNTIME_FUNCTION(MaybeObject*, StoreIC_Slow) { - SealHandleScope shs(isolate); + HandleScope scope(isolate); ASSERT(args.length() == 3); StoreIC ic(IC::NO_EXTRA_FRAME, isolate); - Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state(); Handle<Object> object = args.at<Object>(0); Handle<Object> key = args.at<Object>(1); Handle<Object> value = args.at<Object>(2); - StrictModeFlag strict_mode = Code::GetStrictMode(extra_ic_state); + StrictModeFlag strict_mode = ic.strict_mode(); return Runtime::SetObjectProperty(isolate, object, key, @@ -2487,14 +2238,13 @@ RUNTIME_FUNCTION(MaybeObject*, StoreIC_Slow) { RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Slow) { - SealHandleScope shs(isolate); + HandleScope scope(isolate); ASSERT(args.length() == 3); KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate); - Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state(); Handle<Object> object = args.at<Object>(0); Handle<Object> key = args.at<Object>(1); Handle<Object> value = args.at<Object>(2); - StrictModeFlag strict_mode = Code::GetStrictMode(extra_ic_state); + StrictModeFlag strict_mode = ic.strict_mode(); return Runtime::SetObjectProperty(isolate, object, key, @@ -2508,26 +2258,21 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissForceGeneric) { HandleScope scope(isolate); ASSERT(args.length() == 3); KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate); - IC::State state = IC::StateFrom(ic.target(), args[0], args[1]); - Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state(); - return ic.Store(state, - Code::GetStrictMode(extra_ic_state), - args.at<Object>(0), - args.at<Object>(1), - args.at<Object>(2), - MISS_FORCE_GENERIC); + Handle<Object> receiver = args.at<Object>(0); + Handle<Object> key = args.at<Object>(1); + ic.UpdateState(receiver, key); + return ic.Store(receiver, key, args.at<Object>(2), MISS_FORCE_GENERIC); } RUNTIME_FUNCTION(MaybeObject*, ElementsTransitionAndStoreIC_Miss) { - SealHandleScope scope(isolate); + HandleScope scope(isolate); ASSERT(args.length() == 4); KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate); - Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state(); Handle<Object> value = args.at<Object>(0); Handle<Object> key = args.at<Object>(2); Handle<Object> object = args.at<Object>(3); - StrictModeFlag strict_mode = Code::GetStrictMode(extra_ic_state); + StrictModeFlag strict_mode = ic.strict_mode(); return Runtime::SetObjectProperty(isolate, object, key, @@ -2537,11 +2282,6 @@ RUNTIME_FUNCTION(MaybeObject*, ElementsTransitionAndStoreIC_Miss) { } -void BinaryOpIC::patch(Code* code) { - set_target(code); -} - - const char* BinaryOpIC::GetName(TypeInfo type_info) { switch (type_info) { case UNINITIALIZED: return "Uninitialized"; @@ -2556,256 +2296,68 @@ const char* BinaryOpIC::GetName(TypeInfo type_info) { } -BinaryOpIC::State BinaryOpIC::ToState(TypeInfo type_info) { - switch (type_info) { - case UNINITIALIZED: - return ::v8::internal::UNINITIALIZED; - case SMI: - case INT32: - case NUMBER: - case ODDBALL: - case STRING: - return MONOMORPHIC; - case GENERIC: - return ::v8::internal::GENERIC; - } - UNREACHABLE(); - return ::v8::internal::UNINITIALIZED; -} +MaybeObject* BinaryOpIC::Transition(Handle<Object> left, Handle<Object> right) { + Code::ExtraICState extra_ic_state = target()->extended_extra_ic_state(); + BinaryOpStub stub(extra_ic_state); + Handle<Type> left_type = stub.GetLeftType(isolate()); + Handle<Type> right_type = stub.GetRightType(isolate()); + bool smi_was_enabled = left_type->Maybe(Type::Smi()) && + right_type->Maybe(Type::Smi()); -Handle<Type> BinaryOpIC::TypeInfoToType(BinaryOpIC::TypeInfo binary_type, - Isolate* isolate) { - switch (binary_type) { - case UNINITIALIZED: - return handle(Type::None(), isolate); - case SMI: - return handle(Type::Smi(), isolate); - case INT32: - return handle(Type::Signed32(), isolate); - case NUMBER: - return handle(Type::Number(), isolate); - case ODDBALL: - return handle(Type::Optional( - handle(Type::Union( - handle(Type::Number(), isolate), - handle(Type::String(), isolate)), isolate)), isolate); - case STRING: - return handle(Type::String(), isolate); - case GENERIC: - return handle(Type::Any(), isolate); - } - UNREACHABLE(); - return handle(Type::Any(), isolate); -} - - -void BinaryOpIC::StubInfoToType(int minor_key, - Handle<Type>* left, - Handle<Type>* right, - Handle<Type>* result, - Isolate* isolate) { - TypeInfo left_typeinfo, right_typeinfo, result_typeinfo; - BinaryOpStub::decode_types_from_minor_key( - minor_key, &left_typeinfo, &right_typeinfo, &result_typeinfo); - *left = TypeInfoToType(left_typeinfo, isolate); - *right = TypeInfoToType(right_typeinfo, isolate); - *result = TypeInfoToType(result_typeinfo, isolate); -} - - -static BinaryOpIC::TypeInfo TypeInfoFromValue(Handle<Object> value, - Token::Value op) { - v8::internal::TypeInfo type = v8::internal::TypeInfo::FromValue(value); - if (type.IsSmi()) return BinaryOpIC::SMI; - if (type.IsInteger32()) { - if (SmiValuesAre32Bits()) return BinaryOpIC::SMI; - return BinaryOpIC::INT32; - } - if (type.IsNumber()) return BinaryOpIC::NUMBER; - if (type.IsString()) return BinaryOpIC::STRING; - if (value->IsUndefined()) { - if (op == Token::BIT_AND || - op == Token::BIT_OR || - op == Token::BIT_XOR || - op == Token::SAR || - op == Token::SHL || - op == Token::SHR) { - if (SmiValuesAre32Bits()) return BinaryOpIC::SMI; - return BinaryOpIC::INT32; - } - return BinaryOpIC::ODDBALL; + Maybe<Handle<Object> > result = stub.Result(left, right, isolate()); + if (!result.has_value) return Failure::Exception(); + +#ifdef DEBUG + if (FLAG_trace_ic) { + char buffer[100]; + NoAllocationStringAllocator allocator(buffer, + static_cast<unsigned>(sizeof(buffer))); + StringStream stream(&allocator); + stream.Add("["); + stub.PrintName(&stream); + + stub.UpdateStatus(left, right, result); + + stream.Add(" => "); + stub.PrintState(&stream); + stream.Add(" "); + stream.OutputToStdOut(); + PrintF(" @ %p <- ", static_cast<void*>(*stub.GetCode(isolate()))); + JavaScriptFrame::PrintTop(isolate(), stdout, false, true); + PrintF("]\n"); + } else { + stub.UpdateStatus(left, right, result); } - return BinaryOpIC::GENERIC; -} +#else + stub.UpdateStatus(left, right, result); +#endif + Handle<Code> code = stub.GetCode(isolate()); + set_target(*code); -static BinaryOpIC::TypeInfo InputState(BinaryOpIC::TypeInfo old_type, - Handle<Object> value, - Token::Value op) { - BinaryOpIC::TypeInfo new_type = TypeInfoFromValue(value, op); - if (old_type == BinaryOpIC::STRING) { - if (new_type == BinaryOpIC::STRING) return new_type; - return BinaryOpIC::GENERIC; - } - return Max(old_type, new_type); -} + left_type = stub.GetLeftType(isolate()); + right_type = stub.GetRightType(isolate()); + bool enable_smi = left_type->Maybe(Type::Smi()) && + right_type->Maybe(Type::Smi()); + if (!smi_was_enabled && enable_smi) { + PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK); + } else if (smi_was_enabled && !enable_smi) { + PatchInlinedSmiCode(address(), DISABLE_INLINED_SMI_CHECK); + } -#ifdef DEBUG -static void TraceBinaryOp(BinaryOpIC::TypeInfo left, - BinaryOpIC::TypeInfo right, - Maybe<int32_t> fixed_right_arg, - BinaryOpIC::TypeInfo result) { - PrintF("%s*%s", BinaryOpIC::GetName(left), BinaryOpIC::GetName(right)); - if (fixed_right_arg.has_value) PrintF("{%d}", fixed_right_arg.value); - PrintF("->%s", BinaryOpIC::GetName(result)); + ASSERT(result.has_value); + return static_cast<MaybeObject*>(*result.value); } -#endif -RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) { - ASSERT(args.length() == 3); - +RUNTIME_FUNCTION(MaybeObject*, BinaryOpIC_Miss) { HandleScope scope(isolate); Handle<Object> left = args.at<Object>(0); Handle<Object> right = args.at<Object>(1); - int key = args.smi_at(2); - Token::Value op = BinaryOpStub::decode_op_from_minor_key(key); - - BinaryOpIC::TypeInfo previous_left, previous_right, previous_result; - BinaryOpStub::decode_types_from_minor_key( - key, &previous_left, &previous_right, &previous_result); - - BinaryOpIC::TypeInfo new_left = InputState(previous_left, left, op); - BinaryOpIC::TypeInfo new_right = InputState(previous_right, right, op); - BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED; - - // STRING is only used for ADD operations. - if ((new_left == BinaryOpIC::STRING || new_right == BinaryOpIC::STRING) && - op != Token::ADD) { - new_left = new_right = BinaryOpIC::GENERIC; - } - - BinaryOpIC::TypeInfo new_overall = Max(new_left, new_right); - BinaryOpIC::TypeInfo previous_overall = Max(previous_left, previous_right); - - Maybe<int> previous_fixed_right_arg = - BinaryOpStub::decode_fixed_right_arg_from_minor_key(key); - - int32_t value; - bool new_has_fixed_right_arg = - op == Token::MOD && - right->ToInt32(&value) && - BinaryOpStub::can_encode_arg_value(value) && - (previous_overall == BinaryOpIC::UNINITIALIZED || - (previous_fixed_right_arg.has_value && - previous_fixed_right_arg.value == value)); - Maybe<int32_t> new_fixed_right_arg( - new_has_fixed_right_arg, new_has_fixed_right_arg ? value : 1); - - if (previous_fixed_right_arg.has_value == new_fixed_right_arg.has_value) { - if (new_overall == BinaryOpIC::SMI && previous_overall == BinaryOpIC::SMI) { - if (op == Token::DIV || - op == Token::MUL || - op == Token::SHR || - SmiValuesAre32Bits()) { - // Arithmetic on two Smi inputs has yielded a heap number. - // That is the only way to get here from the Smi stub. - // With 32-bit Smis, all overflows give heap numbers, but with - // 31-bit Smis, most operations overflow to int32 results. - result_type = BinaryOpIC::NUMBER; - } else { - // Other operations on SMIs that overflow yield int32s. - result_type = BinaryOpIC::INT32; - } - } - if (new_overall == BinaryOpIC::INT32 && - previous_overall == BinaryOpIC::INT32) { - if (new_left == previous_left && new_right == previous_right) { - result_type = BinaryOpIC::NUMBER; - } - } - } - - BinaryOpStub stub(key, new_left, new_right, result_type, new_fixed_right_arg); - Handle<Code> code = stub.GetCode(isolate); - if (!code.is_null()) { -#ifdef DEBUG - if (FLAG_trace_ic) { - PrintF("[BinaryOpIC in "); - JavaScriptFrame::PrintTop(isolate, stdout, false, true); - PrintF(" "); - TraceBinaryOp(previous_left, previous_right, previous_fixed_right_arg, - previous_result); - PrintF(" => "); - TraceBinaryOp(new_left, new_right, new_fixed_right_arg, result_type); - PrintF(" #%s @ %p]\n", Token::Name(op), static_cast<void*>(*code)); - } -#endif - BinaryOpIC ic(isolate); - ic.patch(*code); - - // Activate inlined smi code. - if (previous_overall == BinaryOpIC::UNINITIALIZED) { - PatchInlinedSmiCode(ic.address(), ENABLE_INLINED_SMI_CHECK); - } - } - - Handle<JSBuiltinsObject> builtins(isolate->js_builtins_object()); - Object* builtin = NULL; // Initialization calms down the compiler. - switch (op) { - case Token::ADD: - builtin = builtins->javascript_builtin(Builtins::ADD); - break; - case Token::SUB: - builtin = builtins->javascript_builtin(Builtins::SUB); - break; - case Token::MUL: - builtin = builtins->javascript_builtin(Builtins::MUL); - break; - case Token::DIV: - builtin = builtins->javascript_builtin(Builtins::DIV); - break; - case Token::MOD: - builtin = builtins->javascript_builtin(Builtins::MOD); - break; - case Token::BIT_AND: - builtin = builtins->javascript_builtin(Builtins::BIT_AND); - break; - case Token::BIT_OR: - builtin = builtins->javascript_builtin(Builtins::BIT_OR); - break; - case Token::BIT_XOR: - builtin = builtins->javascript_builtin(Builtins::BIT_XOR); - break; - case Token::SHR: - builtin = builtins->javascript_builtin(Builtins::SHR); - break; - case Token::SAR: - builtin = builtins->javascript_builtin(Builtins::SAR); - break; - case Token::SHL: - builtin = builtins->javascript_builtin(Builtins::SHL); - break; - default: - UNREACHABLE(); - } - - Handle<JSFunction> builtin_function(JSFunction::cast(builtin), isolate); - - bool caught_exception; - Handle<Object> builtin_args[] = { right }; - Handle<Object> result = Execution::Call(isolate, - builtin_function, - left, - ARRAY_SIZE(builtin_args), - builtin_args, - &caught_exception); - if (caught_exception) { - return Failure::Exception(); - } - return *result; + BinaryOpIC ic(isolate); + return ic.Transition(left, right); } @@ -3032,16 +2584,16 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) { // Used from ICCompareStub::GenerateMiss in code-stubs-<arch>.cc. RUNTIME_FUNCTION(Code*, CompareIC_Miss) { - SealHandleScope shs(isolate); + HandleScope scope(isolate); ASSERT(args.length() == 3); CompareIC ic(isolate, static_cast<Token::Value>(args.smi_at(2))); ic.UpdateCaches(args.at<Object>(0), args.at<Object>(1)); - return ic.target(); + return ic.raw_target(); } void CompareNilIC::Clear(Address address, Code* target) { - if (target->ic_state() == UNINITIALIZED) return; + if (IsCleared(target)) return; Code::ExtraICState state = target->extended_extra_ic_state(); CompareNilICStub stub(state, HydrogenCodeStub::UNINITIALIZED); @@ -3106,6 +2658,47 @@ RUNTIME_FUNCTION(MaybeObject*, Unreachable) { } +Builtins::JavaScript BinaryOpIC::TokenToJSBuiltin(Token::Value op) { + switch (op) { + default: + UNREACHABLE(); + case Token::ADD: + return Builtins::ADD; + break; + case Token::SUB: + return Builtins::SUB; + break; + case Token::MUL: + return Builtins::MUL; + break; + case Token::DIV: + return Builtins::DIV; + break; + case Token::MOD: + return Builtins::MOD; + break; + case Token::BIT_OR: + return Builtins::BIT_OR; + break; + case Token::BIT_AND: + return Builtins::BIT_AND; + break; + case Token::BIT_XOR: + return Builtins::BIT_XOR; + break; + case Token::SAR: + return Builtins::SAR; + break; + case Token::SHR: + return Builtins::SHR; + break; + case Token::SHL: + return Builtins::SHL; + break; + } +} + + MaybeObject* ToBooleanIC::ToBoolean(Handle<Object> object, Code::ExtraICState extra_ic_state) { ToBooleanStub stub(extra_ic_state); @@ -3121,8 +2714,8 @@ RUNTIME_FUNCTION(MaybeObject*, ToBooleanIC_Miss) { HandleScope scope(isolate); Handle<Object> object = args.at<Object>(0); ToBooleanIC ic(isolate); - Code::ExtraICState ic_state = ic.target()->extended_extra_ic_state(); - return ic.ToBoolean(object, ic_state); + Code::ExtraICState extra_ic_state = ic.target()->extended_extra_ic_state(); + return ic.ToBoolean(object, extra_ic_state); } diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h index 8f09e1d0a2..fde4bc77a5 100644 --- a/deps/v8/src/ic.h +++ b/deps/v8/src/ic.h @@ -57,8 +57,8 @@ namespace internal { ICU(LoadPropertyWithInterceptorForCall) \ ICU(KeyedLoadPropertyWithInterceptor) \ ICU(StoreInterceptorProperty) \ - ICU(BinaryOp_Patch) \ ICU(CompareIC_Miss) \ + ICU(BinaryOpIC_Miss) \ ICU(CompareNilIC_Miss) \ ICU(Unreachable) \ ICU(ToBooleanIC_Miss) @@ -95,11 +95,17 @@ class IC { virtual ~IC() {} // Get the call-site target; used for determining the state. - Code* target() const { return GetTargetAtAddress(address()); } + Handle<Code> target() const { return target_; } + Code* raw_target() const { return GetTargetAtAddress(address()); } + + State state() const { return state_; } inline Address address() const; // Compute the current IC state based on the target stub, receiver and name. - static State StateFrom(Code* target, Object* receiver, Object* name); + void UpdateState(Handle<Object> receiver, Handle<Object> name); + void MarkMonomorphicPrototypeFailure() { + state_ = MONOMORPHIC_PROTOTYPE_FAILURE; + } // Clear the inline cache to initial state. static void Clear(Isolate* isolate, Address address); @@ -128,12 +134,15 @@ class IC { // These methods should not be called with undefined or null. static inline InlineCacheHolderFlag GetCodeCacheForObject(Object* object, JSObject* holder); - static inline InlineCacheHolderFlag GetCodeCacheForObject(JSObject* object, - JSObject* holder); static inline JSObject* GetCodeCacheHolder(Isolate* isolate, Object* object, InlineCacheHolderFlag holder); + static bool IsCleared(Code* code) { + InlineCacheState state = code->ic_state(); + return state == UNINITIALIZED || state == PREMONOMORPHIC; + } + protected: Address fp() const { return fp_; } Address pc() const { return *pc_address_; } @@ -146,15 +155,17 @@ class IC { #endif // Set the call-site target. - void set_target(Code* code) { SetTargetAtAddress(address(), code); } + void set_target(Code* code) { + SetTargetAtAddress(address(), code); + target_set_ = true; + } + + bool is_target_set() { return target_set_; } #ifdef DEBUG char TransitionMarkFromState(IC::State state); - void TraceIC(const char* type, - Handle<Object> name, - State old_state, - Code* new_target); + void TraceIC(const char* type, Handle<Object> name); #endif Failure* TypeError(const char* type, @@ -167,51 +178,52 @@ class IC { static inline void SetTargetAtAddress(Address address, Code* target); static void PostPatching(Address address, Code* target, Code* old_target); - virtual void UpdateMonomorphicIC(Handle<HeapObject> receiver, - Handle<Code> handler, - Handle<String> name, - StrictModeFlag strict_mode) { - set_target(*handler); - } - bool UpdatePolymorphicIC(State state, - Handle<HeapObject> receiver, - Handle<String> name, - Handle<Code> code, - StrictModeFlag strict_mode); - - virtual Handle<Code> ComputePolymorphicIC(MapHandleList* receiver_maps, - CodeHandleList* handlers, - int number_of_valid_maps, - Handle<Name> name, - StrictModeFlag strict_mode) { + // Compute the handler either by compiling or by retrieving a cached version. + Handle<Code> ComputeHandler(LookupResult* lookup, + Handle<JSObject> receiver, + Handle<String> name, + Handle<Object> value = Handle<Code>::null()); + virtual Handle<Code> CompileHandler(LookupResult* lookup, + Handle<JSObject> receiver, + Handle<String> name, + Handle<Object> value) { UNREACHABLE(); return Handle<Code>::null(); - }; + } + void UpdateMonomorphicIC(Handle<HeapObject> receiver, + Handle<Code> handler, + Handle<String> name); + + bool UpdatePolymorphicIC(Handle<HeapObject> receiver, + Handle<String> name, + Handle<Code> code); void CopyICToMegamorphicCache(Handle<String> name); bool IsTransitionedMapOfMonomorphicTarget(Map* receiver_map); - void PatchCache(State state, - StrictModeFlag strict_mode, - Handle<HeapObject> receiver, + void PatchCache(Handle<HeapObject> receiver, Handle<String> name, Handle<Code> code); virtual void UpdateMegamorphicCache(Map* map, Name* name, Code* code); - virtual Handle<Code> megamorphic_stub() { + virtual Code::Kind kind() const { UNREACHABLE(); - return Handle<Code>::null(); + return Code::STUB; } - virtual Handle<Code> megamorphic_stub_strict() { + virtual Handle<Code> slow_stub() const { UNREACHABLE(); return Handle<Code>::null(); } - virtual Handle<Code> generic_stub() const { + virtual Handle<Code> megamorphic_stub() { UNREACHABLE(); return Handle<Code>::null(); } - virtual Handle<Code> generic_stub_strict() const { + virtual Handle<Code> generic_stub() const { UNREACHABLE(); return Handle<Code>::null(); } + virtual StrictModeFlag strict_mode() const { return kNonStrictMode; } + bool TryRemoveInvalidPrototypeDependentStub(Handle<Object> receiver, + Handle<String> name); + void TryRemoveInvalidHandlers(Handle<Map> map, Handle<String> name); private: // Frame pointer for the frame that uses (calls) the IC. @@ -225,6 +237,11 @@ class IC { Isolate* isolate_; + // The original code target that missed. + Handle<Code> target_; + State state_; + bool target_set_; + DISALLOW_IMPLICIT_CONSTRUCTORS(IC); }; @@ -251,31 +268,23 @@ class CallICBase: public IC { class StringStubState: public BitField<StringStubFeedback, 1, 1> {}; // Returns a JSFunction or a Failure. - MUST_USE_RESULT MaybeObject* LoadFunction(State state, - Code::ExtraICState extra_ic_state, - Handle<Object> object, + MUST_USE_RESULT MaybeObject* LoadFunction(Handle<Object> object, Handle<String> name); protected: CallICBase(Code::Kind kind, Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate), kind_(kind) {} - bool TryUpdateExtraICState(LookupResult* lookup, - Handle<Object> object, - Code::ExtraICState* extra_ic_state); + virtual Code::ExtraICState extra_ic_state() { return Code::kNoExtraICState; } // Compute a monomorphic stub if possible, otherwise return a null handle. Handle<Code> ComputeMonomorphicStub(LookupResult* lookup, - State state, - Code::ExtraICState extra_state, Handle<Object> object, Handle<String> name); // Update the inline cache and the global stub cache based on the lookup // result. void UpdateCaches(LookupResult* lookup, - State state, - Code::ExtraICState extra_ic_state, Handle<Object> object, Handle<String> name); @@ -302,6 +311,9 @@ class CallICBase: public IC { Code::Kind kind, Code::ExtraICState extra_state); + virtual Handle<Code> megamorphic_stub(); + virtual Handle<Code> pre_monomorphic_stub(); + Code::Kind kind_; friend class IC; @@ -310,7 +322,9 @@ class CallICBase: public IC { class CallIC: public CallICBase { public: - explicit CallIC(Isolate* isolate) : CallICBase(Code::CALL_IC, isolate) { + explicit CallIC(Isolate* isolate) + : CallICBase(Code::CALL_IC, isolate), + extra_ic_state_(target()->extra_ic_state()) { ASSERT(target()->is_call_stub()); } @@ -335,6 +349,13 @@ class CallIC: public CallICBase { CallICBase::GenerateNormal(masm, argc); GenerateMiss(masm, argc, Code::kNoExtraICState); } + bool TryUpdateExtraICState(LookupResult* lookup, Handle<Object> object); + + protected: + virtual Code::ExtraICState extra_ic_state() { return extra_ic_state_; } + + private: + Code::ExtraICState extra_ic_state_; }; @@ -345,8 +366,7 @@ class KeyedCallIC: public CallICBase { ASSERT(target()->is_keyed_call_stub()); } - MUST_USE_RESULT MaybeObject* LoadFunction(State state, - Handle<Object> object, + MUST_USE_RESULT MaybeObject* LoadFunction(Handle<Object> object, Handle<Object> key); // Code generator routines. @@ -381,8 +401,7 @@ class LoadIC: public IC { static void GenerateNormal(MacroAssembler* masm); static void GenerateRuntimeGetProperty(MacroAssembler* masm); - MUST_USE_RESULT MaybeObject* Load(State state, - Handle<Object> object, + MUST_USE_RESULT MaybeObject* Load(Handle<Object> object, Handle<String> name); protected: @@ -399,34 +418,33 @@ class LoadIC: public IC { // Update the inline cache and the global stub cache based on the // lookup result. void UpdateCaches(LookupResult* lookup, - State state, Handle<Object> object, Handle<String> name); - virtual void UpdateMonomorphicIC(Handle<HeapObject> receiver, - Handle<Code> handler, - Handle<String> name, - StrictModeFlag strict_mode); - - virtual Handle<Code> ComputePolymorphicIC(MapHandleList* receiver_maps, - CodeHandleList* handlers, - int number_of_valid_maps, - Handle<Name> name, - StrictModeFlag strict_mode); - - virtual Handle<Code> ComputeLoadHandler(LookupResult* lookup, - Handle<JSObject> receiver, - Handle<String> name); + virtual Handle<Code> CompileHandler(LookupResult* lookup, + Handle<JSObject> receiver, + Handle<String> name, + Handle<Object> unused); private: // Stub accessors. static Handle<Code> initialize_stub(Isolate* isolate) { return isolate->builtins()->LoadIC_Initialize(); } + + static Handle<Code> pre_monomorphic_stub(Isolate* isolate) { + return isolate->builtins()->LoadIC_PreMonomorphic(); + } + virtual Handle<Code> pre_monomorphic_stub() { - return isolate()->builtins()->LoadIC_PreMonomorphic(); + return pre_monomorphic_stub(isolate()); } + Handle<Code> SimpleFieldLoad(int offset, + bool inobject = true, + Representation representation = + Representation::Tagged()); + static void Clear(Isolate* isolate, Address address, Code* target); friend class IC; @@ -446,8 +464,7 @@ class KeyedLoadIC: public LoadIC { ASSERT(target()->is_keyed_load_stub()); } - MUST_USE_RESULT MaybeObject* Load(State state, - Handle<Object> object, + MUST_USE_RESULT MaybeObject* Load(Handle<Object> object, Handle<Object> key, ICMissMode force_generic); @@ -487,14 +504,6 @@ class KeyedLoadIC: public LoadIC { return isolate()->builtins()->KeyedLoadIC_Slow(); } - // Update the inline cache. - virtual void UpdateMonomorphicIC(Handle<HeapObject> receiver, - Handle<Code> handler, - Handle<String> name, - StrictModeFlag strict_mode); - virtual Handle<Code> ComputeLoadHandler(LookupResult* lookup, - Handle<JSObject> receiver, - Handle<String> name); virtual void UpdateMegamorphicCache(Map* map, Name* name, Code* code) { } private: @@ -502,8 +511,11 @@ class KeyedLoadIC: public LoadIC { static Handle<Code> initialize_stub(Isolate* isolate) { return isolate->builtins()->KeyedLoadIC_Initialize(); } + static Handle<Code> pre_monomorphic_stub(Isolate* isolate) { + return isolate->builtins()->KeyedLoadIC_PreMonomorphic(); + } virtual Handle<Code> pre_monomorphic_stub() { - return isolate()->builtins()->KeyedLoadIC_PreMonomorphic(); + return pre_monomorphic_stub(isolate()); } Handle<Code> indexed_interceptor_stub() { return isolate()->builtins()->KeyedLoadIC_IndexedInterceptor(); @@ -523,10 +535,14 @@ class KeyedLoadIC: public LoadIC { class StoreIC: public IC { public: - StoreIC(FrameDepth depth, Isolate* isolate) : IC(depth, isolate) { + StoreIC(FrameDepth depth, Isolate* isolate) + : IC(depth, isolate), + strict_mode_(Code::GetStrictMode(target()->extra_ic_state())) { ASSERT(target()->is_store_stub() || target()->is_keyed_store_stub()); } + virtual StrictModeFlag strict_mode() const { return strict_mode_; } + // Code generators for stub routines. Only called once at startup. static void GenerateSlow(MacroAssembler* masm); static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); } @@ -541,8 +557,6 @@ class StoreIC: public IC { StrictModeFlag strict_mode); MUST_USE_RESULT MaybeObject* Store( - State state, - StrictModeFlag strict_mode, Handle<Object> object, Handle<String> name, Handle<Object> value, @@ -552,58 +566,60 @@ class StoreIC: public IC { protected: virtual Code::Kind kind() const { return Code::STORE_IC; } virtual Handle<Code> megamorphic_stub() { - return isolate()->builtins()->StoreIC_Megamorphic(); + if (strict_mode() == kStrictMode) { + return isolate()->builtins()->StoreIC_Megamorphic_Strict(); + } else { + return isolate()->builtins()->StoreIC_Megamorphic(); + } } // Stub accessors. - virtual Handle<Code> megamorphic_stub_strict() { - return isolate()->builtins()->StoreIC_Megamorphic_Strict(); - } virtual Handle<Code> generic_stub() const { - return isolate()->builtins()->StoreIC_Generic(); + if (strict_mode() == kStrictMode) { + return isolate()->builtins()->StoreIC_Generic_Strict(); + } else { + return isolate()->builtins()->StoreIC_Generic(); + } } - virtual Handle<Code> generic_stub_strict() const { - return isolate()->builtins()->StoreIC_Generic_Strict(); + + virtual Handle<Code> slow_stub() const { + if (strict_mode() == kStrictMode) { + return isolate()->builtins()->StoreIC_Slow_Strict(); + } else { + return isolate()->builtins()->StoreIC_Slow(); + } } - virtual Handle<Code> pre_monomorphic_stub() const { - return isolate()->builtins()->StoreIC_PreMonomorphic(); + + virtual Handle<Code> pre_monomorphic_stub() { + return pre_monomorphic_stub(isolate(), strict_mode()); } - virtual Handle<Code> pre_monomorphic_stub_strict() const { - return isolate()->builtins()->StoreIC_PreMonomorphic_Strict(); + + static Handle<Code> pre_monomorphic_stub(Isolate* isolate, + StrictModeFlag strict_mode) { + if (strict_mode == kStrictMode) { + return isolate->builtins()->StoreIC_PreMonomorphic_Strict(); + } else { + return isolate->builtins()->StoreIC_PreMonomorphic(); + } } + virtual Handle<Code> global_proxy_stub() { - return isolate()->builtins()->StoreIC_GlobalProxy(); - } - virtual Handle<Code> global_proxy_stub_strict() { - return isolate()->builtins()->StoreIC_GlobalProxy_Strict(); + if (strict_mode() == kStrictMode) { + return isolate()->builtins()->StoreIC_GlobalProxy_Strict(); + } else { + return isolate()->builtins()->StoreIC_GlobalProxy(); + } } - virtual void UpdateMonomorphicIC(Handle<HeapObject> receiver, - Handle<Code> handler, - Handle<String> name, - StrictModeFlag strict_mode); - - virtual Handle<Code> ComputePolymorphicIC(MapHandleList* receiver_maps, - CodeHandleList* handlers, - int number_of_valid_maps, - Handle<Name> name, - StrictModeFlag strict_mode); - // Update the inline cache and the global stub cache based on the // lookup result. void UpdateCaches(LookupResult* lookup, - State state, - StrictModeFlag strict_mode, Handle<JSObject> receiver, Handle<String> name, Handle<Object> value); - // Compute the code stub for this store; used for rewriting to - // monomorphic state and making sure that the code stub is in the - // stub cache. - virtual Handle<Code> ComputeStoreMonomorphic(LookupResult* lookup, - StrictModeFlag strict_mode, - Handle<JSObject> receiver, - Handle<String> name, - Handle<Object> value); + virtual Handle<Code> CompileHandler(LookupResult* lookup, + Handle<JSObject> receiver, + Handle<String> name, + Handle<Object> value); private: void set_target(Code* code) { @@ -613,14 +629,19 @@ class StoreIC: public IC { IC::set_target(code); } - static Handle<Code> initialize_stub(Isolate* isolate) { - return isolate->builtins()->StoreIC_Initialize(); - } - static Handle<Code> initialize_stub_strict(Isolate* isolate) { - return isolate->builtins()->StoreIC_Initialize_Strict(); + static Handle<Code> initialize_stub(Isolate* isolate, + StrictModeFlag strict_mode) { + if (strict_mode == kStrictMode) { + return isolate->builtins()->StoreIC_Initialize_Strict(); + } else { + return isolate->builtins()->StoreIC_Initialize(); + } } + static void Clear(Isolate* isolate, Address address, Code* target); + StrictModeFlag strict_mode_; + friend class IC; }; @@ -644,9 +665,7 @@ class KeyedStoreIC: public StoreIC { ASSERT(target()->is_keyed_store_stub()); } - MUST_USE_RESULT MaybeObject* Store(State state, - StrictModeFlag strict_mode, - Handle<Object> object, + MUST_USE_RESULT MaybeObject* Store(Handle<Object> object, Handle<Object> name, Handle<Object> value, ICMissMode force_generic); @@ -668,56 +687,62 @@ class KeyedStoreIC: public StoreIC { protected: virtual Code::Kind kind() const { return Code::KEYED_STORE_IC; } - virtual Handle<Code> ComputeStoreMonomorphic(LookupResult* lookup, - StrictModeFlag strict_mode, - Handle<JSObject> receiver, - Handle<String> name, - Handle<Object> value); virtual void UpdateMegamorphicCache(Map* map, Name* name, Code* code) { } - virtual Handle<Code> pre_monomorphic_stub() const { - return isolate()->builtins()->KeyedStoreIC_PreMonomorphic(); + virtual Handle<Code> pre_monomorphic_stub() { + return pre_monomorphic_stub(isolate(), strict_mode()); } - virtual Handle<Code> pre_monomorphic_stub_strict() const { - return isolate()->builtins()->KeyedStoreIC_PreMonomorphic_Strict(); + static Handle<Code> pre_monomorphic_stub(Isolate* isolate, + StrictModeFlag strict_mode) { + if (strict_mode == kStrictMode) { + return isolate->builtins()->KeyedStoreIC_PreMonomorphic_Strict(); + } else { + return isolate->builtins()->KeyedStoreIC_PreMonomorphic(); + } } - virtual Handle<Code> megamorphic_stub() { - return isolate()->builtins()->KeyedStoreIC_Generic(); + virtual Handle<Code> slow_stub() const { + if (strict_mode() == kStrictMode) { + return isolate()->builtins()->KeyedStoreIC_Slow_Strict(); + } else { + return isolate()->builtins()->KeyedStoreIC_Slow(); + } } - virtual Handle<Code> megamorphic_stub_strict() { - return isolate()->builtins()->KeyedStoreIC_Generic_Strict(); + virtual Handle<Code> megamorphic_stub() { + if (strict_mode() == kStrictMode) { + return isolate()->builtins()->KeyedStoreIC_Generic_Strict(); + } else { + return isolate()->builtins()->KeyedStoreIC_Generic(); + } } Handle<Code> StoreElementStub(Handle<JSObject> receiver, - KeyedAccessStoreMode store_mode, - StrictModeFlag strict_mode); - - virtual void UpdateMonomorphicIC(Handle<HeapObject> receiver, - Handle<Code> handler, - Handle<String> name, - StrictModeFlag strict_mode); + KeyedAccessStoreMode store_mode); private: void set_target(Code* code) { // Strict mode must be preserved across IC patching. - ASSERT(Code::GetStrictMode(code->extra_ic_state()) == - Code::GetStrictMode(target()->extra_ic_state())); + ASSERT(Code::GetStrictMode(code->extra_ic_state()) == strict_mode()); IC::set_target(code); } // Stub accessors. - static Handle<Code> initialize_stub(Isolate* isolate) { - return isolate->builtins()->KeyedStoreIC_Initialize(); - } - static Handle<Code> initialize_stub_strict(Isolate* isolate) { - return isolate->builtins()->KeyedStoreIC_Initialize_Strict(); - } - Handle<Code> generic_stub() const { - return isolate()->builtins()->KeyedStoreIC_Generic(); + static Handle<Code> initialize_stub(Isolate* isolate, + StrictModeFlag strict_mode) { + if (strict_mode == kStrictMode) { + return isolate->builtins()->KeyedStoreIC_Initialize_Strict(); + } else { + return isolate->builtins()->KeyedStoreIC_Initialize(); + } } - Handle<Code> generic_stub_strict() const { - return isolate()->builtins()->KeyedStoreIC_Generic_Strict(); + + virtual Handle<Code> generic_stub() const { + if (strict_mode() == kStrictMode) { + return isolate()->builtins()->KeyedStoreIC_Generic_Strict(); + } else { + return isolate()->builtins()->KeyedStoreIC_Generic(); + } } + Handle<Code> non_strict_arguments_stub() { return isolate()->builtins()->KeyedStoreIC_NonStrictArguments(); } @@ -748,22 +773,14 @@ class BinaryOpIC: public IC { GENERIC }; - static void StubInfoToType(int minor_key, - Handle<Type>* left, - Handle<Type>* right, - Handle<Type>* result, - Isolate* isolate); - - explicit BinaryOpIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) { } + explicit BinaryOpIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) { } - void patch(Code* code); + static Builtins::JavaScript TokenToJSBuiltin(Token::Value op); static const char* GetName(TypeInfo type_info); - static State ToState(TypeInfo type_info); - - private: - static Handle<Type> TypeInfoToType(TypeInfo binary_type, Isolate* isolate); + MUST_USE_RESULT MaybeObject* Transition(Handle<Object> left, + Handle<Object> right); }; @@ -870,6 +887,7 @@ DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissFromStubFailure); DECLARE_RUNTIME_FUNCTION(MaybeObject*, UnaryOpIC_Miss); DECLARE_RUNTIME_FUNCTION(MaybeObject*, StoreIC_MissFromStubFailure); DECLARE_RUNTIME_FUNCTION(MaybeObject*, ElementsTransitionAndStoreIC_Miss); +DECLARE_RUNTIME_FUNCTION(MaybeObject*, BinaryOpIC_Miss); DECLARE_RUNTIME_FUNCTION(MaybeObject*, CompareNilIC_Miss); DECLARE_RUNTIME_FUNCTION(MaybeObject*, ToBooleanIC_Miss); diff --git a/deps/v8/src/incremental-marking.cc b/deps/v8/src/incremental-marking.cc index df0f14a74c..4223dde211 100644 --- a/deps/v8/src/incremental-marking.cc +++ b/deps/v8/src/incremental-marking.cc @@ -648,6 +648,8 @@ void IncrementalMarking::StartMarking(CompactionFlag flag) { IncrementalMarkingRootMarkingVisitor visitor(this); heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG); + heap_->mark_compact_collector()->MarkWeakObjectToCodeTable(); + // Ready to start incremental marking. if (FLAG_trace_incremental_marking) { PrintF("[IncrementalMarking] Running\n"); @@ -726,7 +728,7 @@ void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) { IncrementalMarkingMarkingVisitor::IterateBody(map, obj); MarkBit mark_bit = Marking::MarkBitFrom(obj); -#ifdef DEBUG +#if ENABLE_SLOW_ASSERTS MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); SLOW_ASSERT(Marking::IsGrey(mark_bit) || (obj->IsFiller() && Marking::IsWhite(mark_bit)) || diff --git a/deps/v8/src/isolate-inl.h b/deps/v8/src/isolate-inl.h index 45076f5657..764bcb8bf3 100644 --- a/deps/v8/src/isolate-inl.h +++ b/deps/v8/src/isolate-inl.h @@ -48,6 +48,11 @@ SaveContext::SaveContext(Isolate* isolate) } +bool Isolate::IsCodePreAgingActive() { + return FLAG_optimize_for_size && FLAG_age_code && !IsDebuggerActive(); +} + + bool Isolate::IsDebuggerActive() { #ifdef ENABLE_DEBUGGER_SUPPORT if (!NoBarrier_Load(&debugger_initialized_)) return false; diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc index 6fa496a902..71cd301581 100644 --- a/deps/v8/src/isolate.cc +++ b/deps/v8/src/isolate.cc @@ -42,7 +42,6 @@ #include "isolate-inl.h" #include "lithium-allocator.h" #include "log.h" -#include "marking-thread.h" #include "messages.h" #include "platform.h" #include "regexp-stack.h" @@ -121,11 +120,7 @@ void ThreadLocalTop::InitializeInternal() { void ThreadLocalTop::Initialize() { InitializeInternal(); #ifdef USE_SIMULATOR -#if V8_TARGET_ARCH_ARM simulator_ = Simulator::current(isolate_); -#elif V8_TARGET_ARCH_MIPS - simulator_ = Simulator::current(isolate_); -#endif #endif thread_id_ = ThreadId::Current(); } @@ -147,8 +142,6 @@ int SystemThreadManager::NumberOfParallelSystemThreads( return number_of_threads; } else if (type == CONCURRENT_SWEEPING) { return number_of_threads - 1; - } else if (type == PARALLEL_MARKING) { - return number_of_threads; } return 1; } @@ -345,6 +338,14 @@ Thread::LocalStorageKey Isolate::per_isolate_thread_data_key_; Thread::LocalStorageKey PerThreadAssertScopeBase::thread_local_key; #endif // DEBUG Mutex Isolate::process_wide_mutex_; +// TODO(dcarney): Remove with default isolate. +enum DefaultIsolateStatus { + kDefaultIsolateUninitialized, + kDefaultIsolateInitialized, + kDefaultIsolateCrashIfInitialized +}; +static DefaultIsolateStatus default_isolate_status_ + = kDefaultIsolateUninitialized; Isolate::ThreadDataTable* Isolate::thread_data_table_ = NULL; Atomic32 Isolate::isolate_counter_ = 0; @@ -382,8 +383,16 @@ Isolate::PerIsolateThreadData* Isolate::FindPerThreadDataForThread( } +void Isolate::SetCrashIfDefaultIsolateInitialized() { + LockGuard<Mutex> lock_guard(&process_wide_mutex_); + CHECK(default_isolate_status_ != kDefaultIsolateInitialized); + default_isolate_status_ = kDefaultIsolateCrashIfInitialized; +} + + void Isolate::EnsureDefaultIsolate() { LockGuard<Mutex> lock_guard(&process_wide_mutex_); + CHECK(default_isolate_status_ != kDefaultIsolateCrashIfInitialized); if (default_isolate_ == NULL) { isolate_key_ = Thread::CreateThreadLocalKey(); thread_id_key_ = Thread::CreateThreadLocalKey(); @@ -1087,7 +1096,7 @@ Failure* Isolate::StackOverflow() { Handle<String> key = factory()->stack_overflow_string(); Handle<JSObject> boilerplate = Handle<JSObject>::cast(GetProperty(this, js_builtins_object(), key)); - Handle<JSObject> exception = Copy(boilerplate); + Handle<JSObject> exception = JSObject::Copy(boilerplate); DoThrow(*exception, NULL); // Get stack trace limit. @@ -1657,11 +1666,7 @@ char* Isolate::RestoreThread(char* from) { // This might be just paranoia, but it seems to be needed in case a // thread_local_top_ is restored on a separate OS thread. #ifdef USE_SIMULATOR -#if V8_TARGET_ARCH_ARM thread_local_top()->simulator_ = Simulator::current(this); -#elif V8_TARGET_ARCH_MIPS - thread_local_top()->simulator_ = Simulator::current(this); -#endif #endif ASSERT(context() == NULL || context()->IsContext()); return from + sizeof(ThreadLocalTop); @@ -1776,7 +1781,6 @@ Isolate::Isolate() // TODO(bmeurer) Initialized lazily because it depends on flags; can // be fixed once the default isolate cleanup is done. random_number_generator_(NULL), - is_memory_constrained_(false), has_fatal_error_(false), use_crankshaft_(true), initialized_from_snapshot_(false), @@ -1784,8 +1788,7 @@ Isolate::Isolate() heap_profiler_(NULL), function_entry_hook_(NULL), deferred_handles_head_(NULL), - optimizing_compiler_thread_(this), - marking_thread_(NULL), + optimizing_compiler_thread_(NULL), sweeper_thread_(NULL), stress_deopt_count_(0) { id_ = NoBarrier_AtomicIncrement(&isolate_counter_, 1); @@ -1879,7 +1882,10 @@ void Isolate::Deinit() { debugger()->UnloadDebugger(); #endif - if (FLAG_concurrent_recompilation) optimizing_compiler_thread_.Stop(); + if (FLAG_concurrent_recompilation) { + optimizing_compiler_thread_->Stop(); + delete optimizing_compiler_thread_; + } if (FLAG_sweeper_threads > 0) { for (int i = 0; i < FLAG_sweeper_threads; i++) { @@ -1889,14 +1895,6 @@ void Isolate::Deinit() { delete[] sweeper_thread_; } - if (FLAG_marking_threads > 0) { - for (int i = 0; i < FLAG_marking_threads; i++) { - marking_thread_[i]->Stop(); - delete marking_thread_[i]; - } - delete[] marking_thread_; - } - if (FLAG_hydrogen_stats) GetHStatistics()->Print(); if (FLAG_print_deopt_stress) { @@ -1911,7 +1909,7 @@ void Isolate::Deinit() { deoptimizer_data_ = NULL; if (FLAG_preemption) { v8::Locker locker(reinterpret_cast<v8::Isolate*>(this)); - v8::Locker::StopPreemption(); + v8::Locker::StopPreemption(reinterpret_cast<v8::Isolate*>(this)); } builtins_.TearDown(); bootstrapper_->TearDown(); @@ -2219,6 +2217,11 @@ bool Isolate::Init(Deserializer* des) { deoptimizer_data_ = new DeoptimizerData(memory_allocator_); + if (FLAG_concurrent_recompilation) { + optimizing_compiler_thread_ = new OptimizingCompilerThread(this); + optimizing_compiler_thread_->Start(); + } + const bool create_heap_objects = (des == NULL); if (create_heap_objects && !heap_.CreateHeapObjects()) { V8::FatalProcessOutOfMemory("heap object creation"); @@ -2248,7 +2251,7 @@ bool Isolate::Init(Deserializer* des) { if (FLAG_preemption) { v8::Locker locker(reinterpret_cast<v8::Isolate*>(this)); - v8::Locker::StartPreemption(100); + v8::Locker::StartPreemption(reinterpret_cast<v8::Isolate*>(this), 100); } #ifdef ENABLE_DEBUGGER_SUPPORT @@ -2318,21 +2321,13 @@ bool Isolate::Init(Deserializer* des) { DONT_TRACK_ALLOCATION_SITE, 0); stub.InitializeInterfaceDescriptor( this, code_stub_interface_descriptor(CodeStub::FastCloneShallowArray)); + BinaryOpStub::InitializeForIsolate(this); CompareNilICStub::InitializeForIsolate(this); ToBooleanStub::InitializeForIsolate(this); ArrayConstructorStubBase::InstallDescriptors(this); InternalArrayConstructorStubBase::InstallDescriptors(this); FastNewClosureStub::InstallDescriptors(this); - } - - if (FLAG_concurrent_recompilation) optimizing_compiler_thread_.Start(); - - if (FLAG_marking_threads > 0) { - marking_thread_ = new MarkingThread*[FLAG_marking_threads]; - for (int i = 0; i < FLAG_marking_threads; i++) { - marking_thread_[i] = new MarkingThread(this); - marking_thread_[i]->Start(); - } + NumberToStringStub::InstallDescriptors(this); } if (FLAG_sweeper_threads > 0) { diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h index b826ec596a..9aa14ee025 100644 --- a/deps/v8/src/isolate.h +++ b/deps/v8/src/isolate.h @@ -75,7 +75,6 @@ class HTracer; class InlineRuntimeFunctionsTable; class NoAllocationStringAllocator; class InnerPointerToCodeCache; -class MarkingThread; class PreallocatedMemoryThread; class RandomNumberGenerator; class RegExpStack; @@ -274,10 +273,8 @@ class ThreadLocalTop BASE_EMBEDDED { Address handler_; // try-blocks are chained through the stack #ifdef USE_SIMULATOR -#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS Simulator* simulator_; #endif -#endif // USE_SIMULATOR Address js_entry_sp_; // the stack pointer of the bottom JS entry frame // the external callback we're currently in @@ -308,7 +305,6 @@ class SystemThreadManager { enum ParallelSystemComponent { PARALLEL_SWEEPING, CONCURRENT_SWEEPING, - PARALLEL_MARKING, PARALLEL_RECOMPILATION }; @@ -497,6 +493,7 @@ class Isolate { bool IsDefaultIsolate() const { return this == default_isolate_; } + static void SetCrashIfDefaultIsolateInitialized(); // Ensures that process-wide resources and the default isolate have been // allocated. It is only necessary to call this method in rare cases, for // example if you are using V8 from within the body of a static initializer. @@ -753,6 +750,19 @@ class Isolate { // Returns if the top context may access the given global object. If // the result is false, the pending exception is guaranteed to be // set. + + // TODO(yangguo): temporary wrappers + bool MayNamedAccessWrapper(Handle<JSObject> receiver, + Handle<Object> key, + v8::AccessType type) { + return MayNamedAccess(*receiver, *key, type); + } + bool MayIndexedAccessWrapper(Handle<JSObject> receiver, + uint32_t index, + v8::AccessType type) { + return MayIndexedAccess(*receiver, index, type); + } + bool MayNamedAccess(JSObject* receiver, Object* key, v8::AccessType type); @@ -984,6 +994,8 @@ class Isolate { void PreallocatedStorageDelete(void* p); void PreallocatedStorageInit(size_t size); + inline bool IsCodePreAgingActive(); + #ifdef ENABLE_DEBUGGER_SUPPORT Debugger* debugger() { if (!NoBarrier_Load(&debugger_initialized_)) InitializeDebugger(); @@ -1098,7 +1110,7 @@ class Isolate { #endif // DEBUG OptimizingCompilerThread* optimizing_compiler_thread() { - return &optimizing_compiler_thread_; + return optimizing_compiler_thread_; } // PreInits and returns a default isolate. Needed when a new thread tries @@ -1106,10 +1118,6 @@ class Isolate { // TODO(svenpanne) This method is on death row... static v8::Isolate* GetDefaultIsolateForLocking(); - MarkingThread** marking_threads() { - return marking_thread_; - } - SweeperThread** sweeper_threads() { return sweeper_thread_; } @@ -1131,13 +1139,6 @@ class Isolate { // Given an address occupied by a live code object, return that object. Object* FindCodeObject(Address a); - bool is_memory_constrained() const { - return is_memory_constrained_; - } - void set_is_memory_constrained(bool value) { - is_memory_constrained_ = value; - } - private: Isolate(); @@ -1310,7 +1311,6 @@ class Isolate { unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_; CodeStubInterfaceDescriptor* code_stub_interface_descriptors_; RandomNumberGenerator* random_number_generator_; - bool is_memory_constrained_; // True if fatal error has been signaled for this isolate. bool has_fatal_error_; @@ -1368,8 +1368,7 @@ class Isolate { #endif DeferredHandles* deferred_handles_head_; - OptimizingCompilerThread optimizing_compiler_thread_; - MarkingThread** marking_thread_; + OptimizingCompilerThread* optimizing_compiler_thread_; SweeperThread** sweeper_thread_; // Counts deopt points if deopt_every_n_times is enabled. @@ -1378,7 +1377,6 @@ class Isolate { friend class ExecutionAccess; friend class HandleScopeImplementer; friend class IsolateInitializer; - friend class MarkingThread; friend class OptimizingCompilerThread; friend class SweeperThread; friend class ThreadManager; @@ -1426,9 +1424,9 @@ class SaveContext BASE_EMBEDDED { class AssertNoContextChange BASE_EMBEDDED { #ifdef DEBUG public: - AssertNoContextChange() - : isolate_(Isolate::Current()), - context_(isolate_->context()) { } + explicit AssertNoContextChange(Isolate* isolate) + : isolate_(isolate), + context_(isolate->context(), isolate) { } ~AssertNoContextChange() { ASSERT(isolate_->context() == *context_); } @@ -1438,32 +1436,7 @@ class AssertNoContextChange BASE_EMBEDDED { Handle<Context> context_; #else public: - AssertNoContextChange() { } -#endif -}; - - -// TODO(mstarzinger): Depracate as soon as everything is handlified. -class AssertNoContextChangeWithHandleScope BASE_EMBEDDED { -#ifdef DEBUG - public: - AssertNoContextChangeWithHandleScope() : - isolate_(Isolate::Current()), - scope_(isolate_), - context_(isolate_->context(), isolate_) { - } - - ~AssertNoContextChangeWithHandleScope() { - ASSERT(isolate_->context() == *context_); - } - - private: - Isolate* isolate_; - HandleScope scope_; - Handle<Context> context_; -#else - public: - AssertNoContextChangeWithHandleScope() { } + explicit AssertNoContextChange(Isolate* isolate) { } #endif }; diff --git a/deps/v8/src/json.js b/deps/v8/src/json.js index b0e14e1965..c21e6351d4 100644 --- a/deps/v8/src/json.js +++ b/deps/v8/src/json.js @@ -181,7 +181,7 @@ function JSONSerialize(key, holder, replacer, stack, indent, gap) { } } // Undefined or a callable object. - return void 0; + return UNDEFINED; } @@ -236,5 +236,5 @@ function JSONSerializeAdapter(key, object) { var holder = {}; holder[key] = object; // No need to pass the actual holder since there is no replacer function. - return JSONSerialize(key, holder, void 0, new InternalArray(), "", ""); + return JSONSerialize(key, holder, UNDEFINED, new InternalArray(), "", ""); } diff --git a/deps/v8/src/list.h b/deps/v8/src/list.h index 0e4e35bb41..41666deb26 100644 --- a/deps/v8/src/list.h +++ b/deps/v8/src/list.h @@ -84,7 +84,7 @@ class List { // backing store (e.g. Add). inline T& operator[](int i) const { ASSERT(0 <= i); - ASSERT(i < length_); + SLOW_ASSERT(i < length_); return data_[i]; } inline T& at(int i) const { return operator[](i); } diff --git a/deps/v8/src/lithium-allocator-inl.h b/deps/v8/src/lithium-allocator-inl.h index 8cca19b2ef..deee98877d 100644 --- a/deps/v8/src/lithium-allocator-inl.h +++ b/deps/v8/src/lithium-allocator-inl.h @@ -145,16 +145,14 @@ void UseIterator::Advance() { } -void LAllocator::SetLiveRangeAssignedRegister( - LiveRange* range, - int reg, - RegisterKind register_kind) { - if (register_kind == DOUBLE_REGISTERS) { +void LAllocator::SetLiveRangeAssignedRegister(LiveRange* range, int reg) { + if (range->Kind() == DOUBLE_REGISTERS) { assigned_double_registers_->Add(reg); } else { + ASSERT(range->Kind() == GENERAL_REGISTERS); assigned_registers_->Add(reg); } - range->set_assigned_register(reg, register_kind, chunk()->zone()); + range->set_assigned_register(reg, chunk()->zone()); } diff --git a/deps/v8/src/lithium-allocator.cc b/deps/v8/src/lithium-allocator.cc index 3c5abd1984..29c31942e4 100644 --- a/deps/v8/src/lithium-allocator.cc +++ b/deps/v8/src/lithium-allocator.cc @@ -131,7 +131,7 @@ bool LiveRange::HasOverlap(UseInterval* target) const { LiveRange::LiveRange(int id, Zone* zone) : id_(id), spilled_(false), - is_double_(false), + kind_(UNALLOCATED_REGISTERS), assigned_register_(kInvalidAssignment), last_interval_(NULL), first_interval_(NULL), @@ -145,12 +145,9 @@ LiveRange::LiveRange(int id, Zone* zone) spill_start_index_(kMaxInt) { } -void LiveRange::set_assigned_register(int reg, - RegisterKind register_kind, - Zone* zone) { +void LiveRange::set_assigned_register(int reg, Zone* zone) { ASSERT(!HasRegisterAssigned() && !IsSpilled()); assigned_register_ = reg; - is_double_ = (register_kind == DOUBLE_REGISTERS); ConvertOperands(zone); } @@ -234,10 +231,15 @@ LOperand* LiveRange::CreateAssignedOperand(Zone* zone) { LOperand* op = NULL; if (HasRegisterAssigned()) { ASSERT(!IsSpilled()); - if (IsDouble()) { - op = LDoubleRegister::Create(assigned_register(), zone); - } else { - op = LRegister::Create(assigned_register(), zone); + switch (Kind()) { + case GENERAL_REGISTERS: + op = LRegister::Create(assigned_register(), zone); + break; + case DOUBLE_REGISTERS: + op = LDoubleRegister::Create(assigned_register(), zone); + break; + default: + UNREACHABLE(); } } else if (IsSpilled()) { ASSERT(!HasRegisterAssigned()); @@ -352,6 +354,7 @@ void LiveRange::SplitAt(LifetimePosition position, // Link the new live range in the chain before any of the other // ranges linked from the range before the split. result->parent_ = (parent_ == NULL) ? this : parent_; + result->kind_ = result->parent_->kind_; result->next_ = next_; next_ = result; @@ -553,7 +556,7 @@ LAllocator::LAllocator(int num_values, HGraph* graph) reusable_slots_(8, zone()), next_virtual_register_(num_values), first_artificial_register_(num_values), - mode_(GENERAL_REGISTERS), + mode_(UNALLOCATED_REGISTERS), num_registers_(-1), graph_(graph), has_osr_entry_(false), @@ -653,7 +656,8 @@ LiveRange* LAllocator::FixedLiveRangeFor(int index) { if (result == NULL) { result = new(zone()) LiveRange(FixedLiveRangeID(index), chunk()->zone()); ASSERT(result->IsFixed()); - SetLiveRangeAssignedRegister(result, index, GENERAL_REGISTERS); + result->kind_ = GENERAL_REGISTERS; + SetLiveRangeAssignedRegister(result, index); fixed_live_ranges_[index] = result; } return result; @@ -667,7 +671,8 @@ LiveRange* LAllocator::FixedDoubleLiveRangeFor(int index) { result = new(zone()) LiveRange(FixedDoubleLiveRangeID(index), chunk()->zone()); ASSERT(result->IsFixed()); - SetLiveRangeAssignedRegister(result, index, DOUBLE_REGISTERS); + result->kind_ = DOUBLE_REGISTERS; + SetLiveRangeAssignedRegister(result, index); fixed_double_live_ranges_[index] = result; } return result; @@ -1375,6 +1380,12 @@ void LAllocator::BuildLiveRanges() { } #endif } + + for (int i = 0; i < live_ranges_.length(); ++i) { + if (live_ranges_[i] != NULL) { + live_ranges_[i]->kind_ = RequiredRegisterKind(live_ranges_[i]->id()); + } + } } @@ -1481,6 +1492,7 @@ void LAllocator::PopulatePointerMaps() { void LAllocator::AllocateGeneralRegisters() { LAllocatorPhase phase("L_Allocate general registers", this); num_registers_ = Register::NumAllocatableRegisters(); + mode_ = GENERAL_REGISTERS; AllocateRegisters(); } @@ -1498,7 +1510,7 @@ void LAllocator::AllocateRegisters() { for (int i = 0; i < live_ranges_.length(); ++i) { if (live_ranges_[i] != NULL) { - if (RequiredRegisterKind(live_ranges_[i]->id()) == mode_) { + if (live_ranges_[i]->Kind() == mode_) { AddToUnhandledUnsorted(live_ranges_[i]); } } @@ -1518,6 +1530,7 @@ void LAllocator::AllocateRegisters() { } } } else { + ASSERT(mode_ == GENERAL_REGISTERS); for (int i = 0; i < fixed_live_ranges_.length(); ++i) { LiveRange* current = fixed_live_ranges_.at(i); if (current != NULL) { @@ -1812,7 +1825,7 @@ bool LAllocator::TryAllocateFreeReg(LiveRange* current) { TraceAlloc("Assigning preferred reg %s to live range %d\n", RegisterName(register_index), current->id()); - SetLiveRangeAssignedRegister(current, register_index, mode_); + SetLiveRangeAssignedRegister(current, register_index); return true; } } @@ -1847,7 +1860,7 @@ bool LAllocator::TryAllocateFreeReg(LiveRange* current) { TraceAlloc("Assigning free reg %s to live range %d\n", RegisterName(reg), current->id()); - SetLiveRangeAssignedRegister(current, reg, mode_); + SetLiveRangeAssignedRegister(current, reg); return true; } @@ -1932,7 +1945,7 @@ void LAllocator::AllocateBlockedReg(LiveRange* current) { TraceAlloc("Assigning blocked reg %s to live range %d\n", RegisterName(reg), current->id()); - SetLiveRangeAssignedRegister(current, reg, mode_); + SetLiveRangeAssignedRegister(current, reg); // This register was not free. Thus we need to find and spill // parts of active and inactive live regions that use the same register @@ -2149,7 +2162,7 @@ void LAllocator::Spill(LiveRange* range) { if (!first->HasAllocatedSpillOperand()) { LOperand* op = TryReuseSpillSlot(range); - if (op == NULL) op = chunk_->GetNextSpillSlot(mode_ == DOUBLE_REGISTERS); + if (op == NULL) op = chunk_->GetNextSpillSlot(range->Kind()); first->SetSpillOperand(op); } range->MakeSpilled(chunk()->zone()); diff --git a/deps/v8/src/lithium-allocator.h b/deps/v8/src/lithium-allocator.h index e5edd3cf03..9908ea823d 100644 --- a/deps/v8/src/lithium-allocator.h +++ b/deps/v8/src/lithium-allocator.h @@ -146,6 +146,7 @@ class LifetimePosition { enum RegisterKind { + UNALLOCATED_REGISTERS, GENERAL_REGISTERS, DOUBLE_REGISTERS }; @@ -290,9 +291,7 @@ class LiveRange: public ZoneObject { LOperand* CreateAssignedOperand(Zone* zone); int assigned_register() const { return assigned_register_; } int spill_start_index() const { return spill_start_index_; } - void set_assigned_register(int reg, - RegisterKind register_kind, - Zone* zone); + void set_assigned_register(int reg, Zone* zone); void MakeSpilled(Zone* zone); // Returns use position in this live range that follows both start @@ -323,7 +322,7 @@ class LiveRange: public ZoneObject { // live range to the result live range. void SplitAt(LifetimePosition position, LiveRange* result, Zone* zone); - bool IsDouble() const { return is_double_; } + RegisterKind Kind() const { return kind_; } bool HasRegisterAssigned() const { return assigned_register_ != kInvalidAssignment; } @@ -392,7 +391,7 @@ class LiveRange: public ZoneObject { int id_; bool spilled_; - bool is_double_; + RegisterKind kind_; int assigned_register_; UseInterval* last_interval_; UseInterval* first_interval_; @@ -406,6 +405,8 @@ class LiveRange: public ZoneObject { LOperand* current_hint_operand_; LOperand* spill_operand_; int spill_start_index_; + + friend class LAllocator; // Assigns to kind_. }; @@ -568,9 +569,7 @@ class LAllocator BASE_EMBEDDED { HBasicBlock* block, HBasicBlock* pred); - inline void SetLiveRangeAssignedRegister(LiveRange* range, - int reg, - RegisterKind register_kind); + inline void SetLiveRangeAssignedRegister(LiveRange* range, int reg); // Return parallel move that should be used to connect ranges split at the // given position. diff --git a/deps/v8/src/lithium-codegen.cc b/deps/v8/src/lithium-codegen.cc new file mode 100644 index 0000000000..19ebe7e516 --- /dev/null +++ b/deps/v8/src/lithium-codegen.cc @@ -0,0 +1,150 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "v8.h" + +#include "lithium-codegen.h" + +#if V8_TARGET_ARCH_IA32 +#include "ia32/lithium-ia32.h" +#include "ia32/lithium-codegen-ia32.h" +#elif V8_TARGET_ARCH_X64 +#include "x64/lithium-x64.h" +#include "x64/lithium-codegen-x64.h" +#elif V8_TARGET_ARCH_ARM +#include "arm/lithium-arm.h" +#include "arm/lithium-codegen-arm.h" +#elif V8_TARGET_ARCH_MIPS +#include "mips/lithium-mips.h" +#include "mips/lithium-codegen-mips.h" +#else +#error Unsupported target architecture. +#endif + +namespace v8 { +namespace internal { + + +HGraph* LCodeGenBase::graph() const { + return chunk()->graph(); +} + + +LCodeGenBase::LCodeGenBase(LChunk* chunk, + MacroAssembler* assembler, + CompilationInfo* info) + : chunk_(static_cast<LPlatformChunk*>(chunk)), + masm_(assembler), + info_(info), + zone_(info->zone()), + status_(UNUSED), + current_block_(-1), + current_instruction_(-1), + instructions_(chunk->instructions()), + last_lazy_deopt_pc_(0) { +} + + +bool LCodeGenBase::GenerateBody() { + ASSERT(is_generating()); + bool emit_instructions = true; + LCodeGen* codegen = static_cast<LCodeGen*>(this); + for (current_instruction_ = 0; + !is_aborted() && current_instruction_ < instructions_->length(); + current_instruction_++) { + LInstruction* instr = instructions_->at(current_instruction_); + + // Don't emit code for basic blocks with a replacement. + if (instr->IsLabel()) { + emit_instructions = !LLabel::cast(instr)->HasReplacement() && + (!FLAG_unreachable_code_elimination || + instr->hydrogen_value()->block()->IsReachable()); + if (FLAG_code_comments && !emit_instructions) { + Comment( + ";;; <@%d,#%d> -------------------- B%d (unreachable/replaced) " + "--------------------", + current_instruction_, + instr->hydrogen_value()->id(), + instr->hydrogen_value()->block()->block_id()); + } + } + if (!emit_instructions) continue; + + if (FLAG_code_comments && instr->HasInterestingComment(codegen)) { + Comment(";;; <@%d,#%d> %s", + current_instruction_, + instr->hydrogen_value()->id(), + instr->Mnemonic()); + } + + GenerateBodyInstructionPre(instr); + + HValue* value = instr->hydrogen_value(); + if (value->position() != RelocInfo::kNoPosition) { + ASSERT(!graph()->info()->IsOptimizing() || + !FLAG_emit_opt_code_positions || + value->position() != RelocInfo::kNoPosition); + RecordAndWritePosition(value->position()); + } + + instr->CompileToNative(codegen); + + GenerateBodyInstructionPost(instr); + } + EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); + last_lazy_deopt_pc_ = masm()->pc_offset(); + return !is_aborted(); +} + + +void LCodeGenBase::Comment(const char* format, ...) { + if (!FLAG_code_comments) return; + char buffer[4 * KB]; + StringBuilder builder(buffer, ARRAY_SIZE(buffer)); + va_list arguments; + va_start(arguments, format); + builder.AddFormattedList(format, arguments); + va_end(arguments); + + // Copy the string before recording it in the assembler to avoid + // issues when the stack allocated buffer goes out of scope. + size_t length = builder.position(); + Vector<char> copy = Vector<char>::New(static_cast<int>(length) + 1); + OS::MemCopy(copy.start(), builder.Finalize(), copy.length()); + masm()->RecordComment(copy.start()); +} + + +int LCodeGenBase::GetNextEmittedBlock() const { + for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) { + if (!chunk_->GetLabel(i)->HasReplacement()) return i; + } + return -1; +} + + +} } // namespace v8::internal diff --git a/deps/v8/src/lithium-codegen.h b/deps/v8/src/lithium-codegen.h new file mode 100644 index 0000000000..9caab8127d --- /dev/null +++ b/deps/v8/src/lithium-codegen.h @@ -0,0 +1,96 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_LITHIUM_CODEGEN_H_ +#define V8_LITHIUM_CODEGEN_H_ + +#include "v8.h" + +#include "compiler.h" + +namespace v8 { +namespace internal { + +class LInstruction; +class LPlatformChunk; + +class LCodeGenBase BASE_EMBEDDED { + public: + LCodeGenBase(LChunk* chunk, + MacroAssembler* assembler, + CompilationInfo* info); + virtual ~LCodeGenBase() {} + + // Simple accessors. + MacroAssembler* masm() const { return masm_; } + CompilationInfo* info() const { return info_; } + Isolate* isolate() const { return info_->isolate(); } + Factory* factory() const { return isolate()->factory(); } + Heap* heap() const { return isolate()->heap(); } + Zone* zone() const { return zone_; } + LPlatformChunk* chunk() const { return chunk_; } + HGraph* graph() const; + + void FPRINTF_CHECKING Comment(const char* format, ...); + + bool GenerateBody(); + virtual void GenerateBodyInstructionPre(LInstruction* instr) {} + virtual void GenerateBodyInstructionPost(LInstruction* instr) {} + + virtual void EnsureSpaceForLazyDeopt(int space_needed) = 0; + virtual void RecordAndWritePosition(int position) = 0; + + int GetNextEmittedBlock() const; + + protected: + enum Status { + UNUSED, + GENERATING, + DONE, + ABORTED + }; + + LPlatformChunk* const chunk_; + MacroAssembler* const masm_; + CompilationInfo* const info_; + Zone* zone_; + Status status_; + int current_block_; + int current_instruction_; + const ZoneList<LInstruction*>* instructions_; + int last_lazy_deopt_pc_; + + bool is_unused() const { return status_ == UNUSED; } + bool is_generating() const { return status_ == GENERATING; } + bool is_done() const { return status_ == DONE; } + bool is_aborted() const { return status_ == ABORTED; } +}; + + +} } // namespace v8::internal + +#endif // V8_LITHIUM_CODEGEN_H_ diff --git a/deps/v8/src/lithium.cc b/deps/v8/src/lithium.cc index fa837c7ede..1be4b0654b 100644 --- a/deps/v8/src/lithium.cc +++ b/deps/v8/src/lithium.cc @@ -229,7 +229,7 @@ void LPointerMap::PrintTo(StringStream* stream) { if (i != 0) stream->Add(";"); pointer_operands_[i]->PrintTo(stream); } - stream->Add("} @%d", position()); + stream->Add("}"); } @@ -490,6 +490,14 @@ void LChunk::set_allocated_double_registers(BitVector* allocated_registers) { } +LInstruction* LChunkBuilder::CheckElideControlInstruction( + HControlInstruction* instr) { + HBasicBlock* successor; + if (!instr->KnownSuccessorBlock(&successor)) return NULL; + return new(zone()) LGoto(successor); +} + + LPhase::~LPhase() { if (ShouldProduceTraceOutput()) { isolate()->GetHTracer()->TraceLithium(name(), chunk_); diff --git a/deps/v8/src/lithium.h b/deps/v8/src/lithium.h index fd50ee8f8b..4f84087835 100644 --- a/deps/v8/src/lithium.h +++ b/deps/v8/src/lithium.h @@ -476,10 +476,9 @@ class LParallelMove V8_FINAL : public ZoneObject { class LPointerMap V8_FINAL : public ZoneObject { public: - explicit LPointerMap(int position, Zone* zone) + explicit LPointerMap(Zone* zone) : pointer_operands_(8, zone), untagged_operands_(0, zone), - position_(position), lithium_position_(-1) { } const ZoneList<LOperand*>* GetNormalizedOperands() { @@ -489,7 +488,6 @@ class LPointerMap V8_FINAL : public ZoneObject { untagged_operands_.Clear(); return &pointer_operands_; } - int position() const { return position_; } int lithium_position() const { return lithium_position_; } void set_lithium_position(int pos) { @@ -505,7 +503,6 @@ class LPointerMap V8_FINAL : public ZoneObject { private: ZoneList<LOperand*> pointer_operands_; ZoneList<LOperand*> untagged_operands_; - int position_; int lithium_position_; }; diff --git a/deps/v8/src/liveedit-debugger.js b/deps/v8/src/liveedit-debugger.js index 451b146bde..4618eda366 100644 --- a/deps/v8/src/liveedit-debugger.js +++ b/deps/v8/src/liveedit-debugger.js @@ -186,7 +186,7 @@ Debug.LiveEdit = new function() { // to old version. if (link_to_old_script_list.length == 0) { %LiveEditReplaceScript(script, new_source, null); - old_script = void 0; + old_script = UNDEFINED; } else { var old_script_name = CreateNameForOldScript(script); @@ -221,7 +221,7 @@ Debug.LiveEdit = new function() { change_log.push( {position_patched: position_patch_report} ); for (var i = 0; i < update_positions_list.length; i++) { - // TODO(LiveEdit): take into account wether it's source_changed or + // TODO(LiveEdit): take into account whether it's source_changed or // unchanged and whether positions changed at all. PatchPositions(update_positions_list[i], diff_array, position_patch_report); @@ -266,7 +266,7 @@ Debug.LiveEdit = new function() { // LiveEdit itself believe that any function in heap that points to a // particular script is a regular function. // For some functions we will restore this link later. - %LiveEditFunctionSetScript(info.shared_function_info, void 0); + %LiveEditFunctionSetScript(info.shared_function_info, UNDEFINED); compile_info.push(info); old_index_map.push(i); } @@ -288,7 +288,7 @@ Debug.LiveEdit = new function() { } } - // After sorting update outer_inder field using old_index_map. Also + // After sorting update outer_index field using old_index_map. Also // set next_sibling_index field. var current_index = 0; @@ -542,16 +542,16 @@ Debug.LiveEdit = new function() { this.children = children; // an index in array of compile_info this.array_index = array_index; - this.parent = void 0; + this.parent = UNDEFINED; this.status = FunctionStatus.UNCHANGED; // Status explanation is used for debugging purposes and will be shown // in user UI if some explanations are needed. - this.status_explanation = void 0; - this.new_start_pos = void 0; - this.new_end_pos = void 0; - this.corresponding_node = void 0; - this.unmatched_new_nodes = void 0; + this.status_explanation = UNDEFINED; + this.new_start_pos = UNDEFINED; + this.new_end_pos = UNDEFINED; + this.corresponding_node = UNDEFINED; + this.unmatched_new_nodes = UNDEFINED; // 'Textual' correspondence/matching is weaker than 'pure' // correspondence/matching. We need 'textual' level for visual presentation @@ -559,10 +559,10 @@ Debug.LiveEdit = new function() { // Sometimes only function body is changed (functions in old and new script // textually correspond), but we cannot patch the code, so we see them // as an old function deleted and new function created. - this.textual_corresponding_node = void 0; - this.textually_unmatched_new_nodes = void 0; + this.textual_corresponding_node = UNDEFINED; + this.textually_unmatched_new_nodes = UNDEFINED; - this.live_shared_function_infos = void 0; + this.live_shared_function_infos = UNDEFINED; } // From array of function infos that is implicitly a tree creates @@ -692,10 +692,10 @@ Debug.LiveEdit = new function() { ProcessInternals(code_info_tree); } - // For ecah old function (if it is not damaged) tries to find a corresponding + // For each old function (if it is not damaged) tries to find a corresponding // function in new script. Typically it should succeed (non-damaged functions // by definition may only have changes inside their bodies). However there are - // reasons for corresponence not to be found; function with unmodified text + // reasons for correspondence not to be found; function with unmodified text // in new script may become enclosed into other function; the innocent change // inside function body may in fact be something like "} function B() {" that // splits a function into 2 functions. @@ -703,7 +703,13 @@ Debug.LiveEdit = new function() { // A recursive function that tries to find a correspondence for all // child functions and for their inner functions. - function ProcessChildren(old_node, new_node) { + function ProcessNode(old_node, new_node) { + var scope_change_description = + IsFunctionContextLocalsChanged(old_node.info, new_node.info); + if (scope_change_description) { + old_node.status = FunctionStatus.CHANGED; + } + var old_children = old_node.children; var new_children = new_node.children; @@ -729,13 +735,20 @@ Debug.LiveEdit = new function() { new_children[new_index]; old_children[old_index].textual_corresponding_node = new_children[new_index]; - if (old_children[old_index].status != FunctionStatus.UNCHANGED) { - ProcessChildren(old_children[old_index], + if (scope_change_description) { + old_children[old_index].status = FunctionStatus.DAMAGED; + old_children[old_index].status_explanation = + "Enclosing function is now incompatible. " + + scope_change_description; + old_children[old_index].corresponding_node = UNDEFINED; + } else if (old_children[old_index].status != + FunctionStatus.UNCHANGED) { + ProcessNode(old_children[old_index], new_children[new_index]); if (old_children[old_index].status == FunctionStatus.DAMAGED) { unmatched_new_nodes_list.push( old_children[old_index].corresponding_node); - old_children[old_index].corresponding_node = void 0; + old_children[old_index].corresponding_node = UNDEFINED; old_node.status = FunctionStatus.CHANGED; } } @@ -772,11 +785,10 @@ Debug.LiveEdit = new function() { } if (old_node.status == FunctionStatus.CHANGED) { - var why_wrong_expectations = - WhyFunctionExpectationsDiffer(old_node.info, new_node.info); - if (why_wrong_expectations) { + if (old_node.info.param_num != new_node.info.param_num) { old_node.status = FunctionStatus.DAMAGED; - old_node.status_explanation = why_wrong_expectations; + old_node.status_explanation = "Changed parameter number: " + + old_node.info.param_num + " and " + new_node.info.param_num; } } old_node.unmatched_new_nodes = unmatched_new_nodes_list; @@ -784,7 +796,7 @@ Debug.LiveEdit = new function() { textually_unmatched_new_nodes_list; } - ProcessChildren(old_code_tree, new_code_tree); + ProcessNode(old_code_tree, new_code_tree); old_code_tree.corresponding_node = new_code_tree; old_code_tree.textual_corresponding_node = new_code_tree; @@ -856,7 +868,7 @@ Debug.LiveEdit = new function() { this.raw_array = raw_array; } - // Changes positions (including all statments) in function. + // Changes positions (including all statements) in function. function PatchPositions(old_info_node, diff_array, report_array) { if (old_info_node.live_shared_function_infos) { old_info_node.live_shared_function_infos.forEach(function (info) { @@ -878,15 +890,9 @@ Debug.LiveEdit = new function() { return script.name + " (old)"; } - // Compares a function interface old and new version, whether it + // Compares a function scope heap structure, old and new version, whether it // changed or not. Returns explanation if they differ. - function WhyFunctionExpectationsDiffer(function_info1, function_info2) { - // Check that function has the same number of parameters (there may exist - // an adapter, that won't survive function parameter number change). - if (function_info1.param_num != function_info2.param_num) { - return "Changed parameter number: " + function_info1.param_num + - " and " + function_info2.param_num; - } + function IsFunctionContextLocalsChanged(function_info1, function_info2) { var scope_info1 = function_info1.scope_info; var scope_info2 = function_info2.scope_info; @@ -905,8 +911,8 @@ Debug.LiveEdit = new function() { } if (scope_info1_text != scope_info2_text) { - return "Incompatible variable maps: [" + scope_info1_text + - "] and [" + scope_info2_text + "]"; + return "Variable map changed: [" + scope_info1_text + + "] => [" + scope_info2_text + "]"; } // No differences. Return undefined. return; diff --git a/deps/v8/src/liveedit.cc b/deps/v8/src/liveedit.cc index feaafd471e..3d459d4ffb 100644 --- a/deps/v8/src/liveedit.cc +++ b/deps/v8/src/liveedit.cc @@ -731,8 +731,8 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> { Handle<JSValue> scope_wrapper = WrapInJSValue(code_scope_info); this->SetField(kCodeScopeInfoOffset_, scope_wrapper); } - void SetOuterScopeInfo(Handle<Object> scope_info_array) { - this->SetField(kOuterScopeInfoOffset_, scope_info_array); + void SetFunctionScopeInfo(Handle<Object> scope_info_array) { + this->SetField(kFunctionScopeInfoOffset_, scope_info_array); } void SetSharedFunctionInfo(Handle<SharedFunctionInfo> info) { Handle<JSValue> info_holder = WrapInJSValue(info); @@ -771,7 +771,7 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> { static const int kParamNumOffset_ = 3; static const int kCodeOffset_ = 4; static const int kCodeScopeInfoOffset_ = 5; - static const int kOuterScopeInfoOffset_ = 6; + static const int kFunctionScopeInfoOffset_ = 6; static const int kParentIndexOffset_ = 7; static const int kSharedFunctionInfoOffset_ = 8; static const int kLiteralNumOffset_ = 9; @@ -880,7 +880,7 @@ class FunctionInfoListener { Handle<Object> scope_info_list(SerializeFunctionScope(scope, zone), isolate()); - info.SetOuterScopeInfo(scope_info_list); + info.SetFunctionScopeInfo(scope_info_list); } Handle<JSArray> GetResult() { return result_; } @@ -897,14 +897,12 @@ class FunctionInfoListener { // Saves some description of scope. It stores name and indexes of // variables in the whole scope chain. Null-named slots delimit // scopes of this chain. - Scope* outer_scope = scope->outer_scope(); - if (outer_scope == NULL) { - return isolate()->heap()->undefined_value(); - } - do { - ZoneList<Variable*> stack_list(outer_scope->StackLocalCount(), zone); - ZoneList<Variable*> context_list(outer_scope->ContextLocalCount(), zone); - outer_scope->CollectStackAndContextLocals(&stack_list, &context_list); + Scope* current_scope = scope; + while (current_scope != NULL) { + ZoneList<Variable*> stack_list(current_scope->StackLocalCount(), zone); + ZoneList<Variable*> context_list( + current_scope->ContextLocalCount(), zone); + current_scope->CollectStackAndContextLocals(&stack_list, &context_list); context_list.Sort(&Variable::CompareIndex); for (int i = 0; i < context_list.length(); i++) { @@ -924,8 +922,8 @@ class FunctionInfoListener { isolate())); scope_info_length++; - outer_scope = outer_scope->outer_scope(); - } while (outer_scope != NULL); + current_scope = current_scope->outer_scope(); + } return *scope_info_list; } diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc index 0f0ad40398..b353f548fb 100644 --- a/deps/v8/src/log.cc +++ b/deps/v8/src/log.cc @@ -212,7 +212,7 @@ void CodeEventLogger::CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code, SharedFunctionInfo* shared, CompilationInfo* info, - Name* source, int line) { + Name* source, int line, int column) { name_buffer_->Init(tag); name_buffer_->AppendBytes(ComputeMarker(code)); name_buffer_->AppendString(shared->DebugName()); @@ -1232,10 +1232,11 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, SharedFunctionInfo* shared, CompilationInfo* info, Name* source, int line, int column) { - PROFILER_LOG(CodeCreateEvent(tag, code, shared, info, source, line)); + PROFILER_LOG(CodeCreateEvent(tag, code, shared, info, source, line, column)); if (!is_logging_code_events()) return; - CALL_LISTENERS(CodeCreateEvent(tag, code, shared, info, source, line)); + CALL_LISTENERS(CodeCreateEvent(tag, code, shared, info, source, line, + column)); if (!FLAG_log_code || !log_->IsEnabled()) return; Log::MessageBuilder msg(log_); @@ -1610,7 +1611,12 @@ void Logger::LogCodeObject(Object* object) { case Code::FUNCTION: case Code::OPTIMIZED_FUNCTION: return; // We log this later using LogCompiledFunctions. - case Code::BINARY_OP_IC: // fall through + case Code::BINARY_OP_IC: { + BinaryOpStub stub(code_object->extended_extra_ic_state()); + description = stub.GetName().Detach(); + tag = Logger::STUB_TAG; + break; + } case Code::COMPARE_IC: // fall through case Code::COMPARE_NIL_IC: // fall through case Code::TO_BOOLEAN_IC: // fall through @@ -1629,6 +1635,10 @@ void Logger::LogCodeObject(Object* object) { description = "A builtin from the snapshot"; tag = Logger::BUILTIN_TAG; break; + case Code::HANDLER: + description = "An IC handler from the snapshot"; + tag = Logger::HANDLER_TAG; + break; case Code::KEYED_LOAD_IC: description = "A keyed load IC from the snapshot"; tag = Logger::KEYED_LOAD_IC_TAG; @@ -1765,15 +1775,14 @@ void Logger::LogAccessorCallbacks() { static void AddIsolateIdIfNeeded(Isolate* isolate, StringStream* stream) { - if (isolate->IsDefaultIsolate()) return; + if (isolate->IsDefaultIsolate() || !FLAG_logfile_per_isolate) return; stream->Add("isolate-%p-", isolate); } static SmartArrayPointer<const char> PrepareLogFileName( Isolate* isolate, const char* file_name) { - if (strchr(file_name, '%') != NULL || - !isolate->IsDefaultIsolate()) { + if (strchr(file_name, '%') != NULL || !isolate->IsDefaultIsolate()) { // If there's a '%' in the log file name we have to expand // placeholders. HeapStringAllocator allocator; diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h index 81d45e507b..c0efd6504d 100644 --- a/deps/v8/src/log.h +++ b/deps/v8/src/log.h @@ -131,6 +131,7 @@ struct TickSample; V(CALLBACK_TAG, "Callback") \ V(EVAL_TAG, "Eval") \ V(FUNCTION_TAG, "Function") \ + V(HANDLER_TAG, "Handler") \ V(KEYED_LOAD_IC_TAG, "KeyedLoadIC") \ V(KEYED_LOAD_POLYMORPHIC_IC_TAG, "KeyedLoadPolymorphicIC") \ V(KEYED_EXTERNAL_ARRAY_LOAD_IC_TAG, "KeyedExternalArrayLoadIC") \ @@ -470,7 +471,7 @@ class CodeEventListener { SharedFunctionInfo* shared, CompilationInfo* info, Name* source, - int line) = 0; + int line, int column) = 0; virtual void CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code, int args_count) = 0; @@ -509,7 +510,7 @@ class CodeEventLogger : public CodeEventListener { SharedFunctionInfo* shared, CompilationInfo* info, Name* source, - int line); + int line, int column); virtual void RegExpCodeCreateEvent(Code* code, String* source); virtual void CallbackEvent(Name* name, Address entry_point) { } diff --git a/deps/v8/src/macros.py b/deps/v8/src/macros.py index d699c14621..1785d44a8c 100644 --- a/deps/v8/src/macros.py +++ b/deps/v8/src/macros.py @@ -157,6 +157,11 @@ macro TO_NUMBER_INLINE(arg) = (IS_NUMBER(%IS_VAR(arg)) ? arg : NonNumberToNumber macro TO_OBJECT_INLINE(arg) = (IS_SPEC_OBJECT(%IS_VAR(arg)) ? arg : ToObject(arg)); macro JSON_NUMBER_TO_STRING(arg) = ((%_IsSmi(%IS_VAR(arg)) || arg - arg == 0) ? %_NumberToString(arg) : "null"); +# Constants. The compiler constant folds them. +const NAN = $NaN; +const INFINITY = (1/0); +const UNDEFINED = (void 0); + # Macros implemented in Python. python macro CHAR_CODE(str) = ord(str[1]); diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc index 263de4878f..b75ddb382b 100644 --- a/deps/v8/src/mark-compact.cc +++ b/deps/v8/src/mark-compact.cc @@ -38,7 +38,6 @@ #include "ic-inl.h" #include "incremental-marking.h" #include "mark-compact.h" -#include "marking-thread.h" #include "objects-visiting.h" #include "objects-visiting-inl.h" #include "stub-cache.h" @@ -92,10 +91,8 @@ class VerifyMarkingVisitor: public ObjectVisitor { void VisitEmbeddedPointer(RelocInfo* rinfo) { ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT); - if (!FLAG_weak_embedded_maps_in_optimized_code || !FLAG_collect_maps || - rinfo->host()->kind() != Code::OPTIMIZED_FUNCTION || - !rinfo->target_object()->IsMap() || - !Map::cast(rinfo->target_object())->CanTransition()) { + if (!Code::IsWeakEmbeddedObject(rinfo->host()->kind(), + rinfo->target_object())) { VisitPointer(rinfo->target_object_address()); } } @@ -408,6 +405,8 @@ void MarkCompactCollector::CollectGarbage() { ASSERT(state_ == PREPARE_GC); ASSERT(encountered_weak_collections_ == Smi::FromInt(0)); + heap()->allocation_mementos_found_ = 0; + MarkLiveObjects(); ASSERT(heap_->incremental_marking()->IsStopped()); @@ -432,9 +431,8 @@ void MarkCompactCollector::CollectGarbage() { #endif #ifdef VERIFY_HEAP - if (FLAG_collect_maps && FLAG_weak_embedded_maps_in_optimized_code && - heap()->weak_embedded_maps_verification_enabled()) { - VerifyWeakEmbeddedMapsInOptimizedCode(); + if (heap()->weak_embedded_objects_verification_enabled()) { + VerifyWeakEmbeddedObjectsInOptimizedCode(); } if (FLAG_collect_maps && FLAG_omit_map_checks_for_leaf_maps) { VerifyOmittedMapChecks(); @@ -450,6 +448,11 @@ void MarkCompactCollector::CollectGarbage() { marking_parity_ = EVEN_MARKING_PARITY; } + if (FLAG_trace_track_allocation_sites && + heap()->allocation_mementos_found_ > 0) { + PrintF("AllocationMementos found during mark-sweep = %d\n", + heap()->allocation_mementos_found_); + } tracer_ = NULL; } @@ -495,7 +498,7 @@ void MarkCompactCollector::VerifyMarkbitsAreClean() { } -void MarkCompactCollector::VerifyWeakEmbeddedMapsInOptimizedCode() { +void MarkCompactCollector::VerifyWeakEmbeddedObjectsInOptimizedCode() { HeapObjectIterator code_iterator(heap()->code_space()); for (HeapObject* obj = code_iterator.Next(); obj != NULL; @@ -503,7 +506,7 @@ void MarkCompactCollector::VerifyWeakEmbeddedMapsInOptimizedCode() { Code* code = Code::cast(obj); if (code->kind() != Code::OPTIMIZED_FUNCTION) continue; if (WillBeDeoptimized(code)) continue; - code->VerifyEmbeddedMapsDependency(); + code->VerifyEmbeddedObjectsDependency(); } } @@ -601,20 +604,6 @@ bool MarkCompactCollector::IsConcurrentSweepingInProgress() { } -void MarkCompactCollector::MarkInParallel() { - for (int i = 0; i < FLAG_marking_threads; i++) { - isolate()->marking_threads()[i]->StartMarking(); - } -} - - -void MarkCompactCollector::WaitUntilMarkingCompleted() { - for (int i = 0; i < FLAG_marking_threads; i++) { - isolate()->marking_threads()[i]->WaitForMarkingThread(); - } -} - - bool Marking::TransferMark(Address old_start, Address new_start) { // This is only used when resizing an object. ASSERT(MemoryChunk::FromAddress(old_start) == @@ -1481,7 +1470,7 @@ class MarkCompactMarkingVisitor // Mark the backing hash table without pushing it on the marking stack. Object* table_object = weak_collection->table(); if (!table_object->IsHashTable()) return; - ObjectHashTable* table = ObjectHashTable::cast(table_object); + WeakHashTable* table = WeakHashTable::cast(table_object); Object** table_slot = HeapObject::RawField(weak_collection, JSWeakCollection::kTableOffset); MarkBit table_mark = Marking::MarkBitFrom(table); @@ -1581,13 +1570,11 @@ void MarkCompactMarkingVisitor::ObjectStatsCountFixedArray( fixed_array->map() != heap->fixed_double_array_map() && fixed_array != heap->empty_fixed_array()) { if (fixed_array->IsDictionary()) { - heap->RecordObjectStats(FIXED_ARRAY_TYPE, - dictionary_type, - fixed_array->Size()); + heap->RecordFixedArraySubTypeStats(dictionary_type, + fixed_array->Size()); } else { - heap->RecordObjectStats(FIXED_ARRAY_TYPE, - fast_type, - fixed_array->Size()); + heap->RecordFixedArraySubTypeStats(fast_type, + fixed_array->Size()); } } } @@ -1597,7 +1584,7 @@ void MarkCompactMarkingVisitor::ObjectStatsVisitBase( MarkCompactMarkingVisitor::VisitorId id, Map* map, HeapObject* obj) { Heap* heap = map->GetHeap(); int object_size = obj->Size(); - heap->RecordObjectStats(map->instance_type(), -1, object_size); + heap->RecordObjectStats(map->instance_type(), object_size); non_count_table_.GetVisitorById(id)(map, obj); if (obj->IsJSObject()) { JSObject* object = JSObject::cast(obj); @@ -1630,25 +1617,20 @@ class MarkCompactMarkingVisitor::ObjectStatsTracker< if (map_obj->owns_descriptors() && array != heap->empty_descriptor_array()) { int fixed_array_size = array->Size(); - heap->RecordObjectStats(FIXED_ARRAY_TYPE, - DESCRIPTOR_ARRAY_SUB_TYPE, - fixed_array_size); + heap->RecordFixedArraySubTypeStats(DESCRIPTOR_ARRAY_SUB_TYPE, + fixed_array_size); } if (map_obj->HasTransitionArray()) { int fixed_array_size = map_obj->transitions()->Size(); - heap->RecordObjectStats(FIXED_ARRAY_TYPE, - TRANSITION_ARRAY_SUB_TYPE, - fixed_array_size); + heap->RecordFixedArraySubTypeStats(TRANSITION_ARRAY_SUB_TYPE, + fixed_array_size); } if (map_obj->has_code_cache()) { CodeCache* cache = CodeCache::cast(map_obj->code_cache()); - heap->RecordObjectStats( - FIXED_ARRAY_TYPE, - MAP_CODE_CACHE_SUB_TYPE, - cache->default_cache()->Size()); + heap->RecordFixedArraySubTypeStats(MAP_CODE_CACHE_SUB_TYPE, + cache->default_cache()->Size()); if (!cache->normal_type_cache()->IsUndefined()) { - heap->RecordObjectStats( - FIXED_ARRAY_TYPE, + heap->RecordFixedArraySubTypeStats( MAP_CODE_CACHE_SUB_TYPE, FixedArray::cast(cache->normal_type_cache())->Size()); } @@ -1666,7 +1648,9 @@ class MarkCompactMarkingVisitor::ObjectStatsTracker< Heap* heap = map->GetHeap(); int object_size = obj->Size(); ASSERT(map->instance_type() == CODE_TYPE); - heap->RecordObjectStats(CODE_TYPE, Code::cast(obj)->kind(), object_size); + Code* code_obj = Code::cast(obj); + heap->RecordCodeSubTypeStats(code_obj->kind(), code_obj->GetAge(), + object_size); ObjectStatsVisitBase(kVisitCode, map, obj); } }; @@ -1680,8 +1664,7 @@ class MarkCompactMarkingVisitor::ObjectStatsTracker< Heap* heap = map->GetHeap(); SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj); if (sfi->scope_info() != heap->empty_fixed_array()) { - heap->RecordObjectStats( - FIXED_ARRAY_TYPE, + heap->RecordFixedArraySubTypeStats( SCOPE_INFO_SUB_TYPE, FixedArray::cast(sfi->scope_info())->Size()); } @@ -1698,8 +1681,7 @@ class MarkCompactMarkingVisitor::ObjectStatsTracker< Heap* heap = map->GetHeap(); FixedArray* fixed_array = FixedArray::cast(obj); if (fixed_array == heap->string_table()) { - heap->RecordObjectStats( - FIXED_ARRAY_TYPE, + heap->RecordFixedArraySubTypeStats( STRING_TABLE_SUB_TYPE, fixed_array->Size()); } @@ -2017,6 +1999,13 @@ int MarkCompactCollector::DiscoverAndPromoteBlackObjectsOnPage( int size = object->Size(); survivors_size += size; + if (FLAG_trace_track_allocation_sites && object->IsJSObject()) { + if (AllocationMemento::FindForJSObject(JSObject::cast(object), true) + != NULL) { + heap()->allocation_mementos_found_++; + } + } + offset++; current_cell >>= 1; // Aggressively promote young survivors to the old space. @@ -2116,6 +2105,8 @@ void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) { // Handle the string table specially. MarkStringTable(visitor); + MarkWeakObjectToCodeTable(); + // There may be overflowed objects in the heap. Visit them now. while (marking_deque_.overflowed()) { RefillMarkingDeque(); @@ -2156,6 +2147,16 @@ void MarkCompactCollector::MarkImplicitRefGroups() { } +void MarkCompactCollector::MarkWeakObjectToCodeTable() { + HeapObject* weak_object_to_code_table = + HeapObject::cast(heap()->weak_object_to_code_table()); + if (!IsMarked(weak_object_to_code_table)) { + MarkBit mark = Marking::MarkBitFrom(weak_object_to_code_table); + SetMark(weak_object_to_code_table, mark); + } +} + + // Mark all objects reachable from the objects on the marking stack. // Before: the marking stack contains zero or more heap object pointers. // After: the marking stack is empty, and all objects reachable from the @@ -2523,7 +2524,8 @@ void MarkCompactCollector::ClearNonLiveReferences() { if (map_mark.Get()) { ClearNonLiveDependentCode(map->dependent_code()); } else { - ClearAndDeoptimizeDependentCode(map); + ClearAndDeoptimizeDependentCode(map->dependent_code()); + map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array())); } } @@ -2537,6 +2539,31 @@ void MarkCompactCollector::ClearNonLiveReferences() { ClearNonLiveDependentCode(PropertyCell::cast(cell)->dependent_code()); } } + + if (heap_->weak_object_to_code_table()->IsHashTable()) { + WeakHashTable* table = + WeakHashTable::cast(heap_->weak_object_to_code_table()); + uint32_t capacity = table->Capacity(); + for (uint32_t i = 0; i < capacity; i++) { + uint32_t key_index = table->EntryToIndex(i); + Object* key = table->get(key_index); + if (!table->IsKey(key)) continue; + uint32_t value_index = table->EntryToValueIndex(i); + Object* value = table->get(value_index); + if (IsMarked(key)) { + if (!IsMarked(value)) { + HeapObject* obj = HeapObject::cast(value); + MarkBit mark = Marking::MarkBitFrom(obj); + SetMark(obj, mark); + } + ClearNonLiveDependentCode(DependentCode::cast(value)); + } else { + ClearAndDeoptimizeDependentCode(DependentCode::cast(value)); + table->set(key_index, heap_->the_hole_value()); + table->set(value_index, heap_->the_hole_value()); + } + } + } } @@ -2602,9 +2629,9 @@ void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map, } -void MarkCompactCollector::ClearAndDeoptimizeDependentCode(Map* map) { +void MarkCompactCollector::ClearAndDeoptimizeDependentCode( + DependentCode* entries) { DisallowHeapAllocation no_allocation; - DependentCode* entries = map->dependent_code(); DependentCode::GroupStartIndexes starts(entries); int number_of_entries = starts.number_of_entries(); if (number_of_entries == 0) return; @@ -2620,7 +2647,6 @@ void MarkCompactCollector::ClearAndDeoptimizeDependentCode(Map* map) { } entries->clear_at(i); } - map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array())); } @@ -2726,10 +2752,12 @@ void MarkCompactCollector::MigrateObject(Address dst, Address src, int size, AllocationSpace dest) { - HEAP_PROFILE(heap(), ObjectMoveEvent(src, dst)); - // TODO(hpayer): Replace these checks with asserts. - CHECK(heap()->AllowedToBeMigrated(HeapObject::FromAddress(src), dest)); - CHECK(dest != LO_SPACE && size <= Page::kMaxNonCodeHeapObjectSize); + HeapProfiler* heap_profiler = heap()->isolate()->heap_profiler(); + if (heap_profiler->is_profiling()) { + heap_profiler->ObjectMoveEvent(src, dst, size); + } + ASSERT(heap()->AllowedToBeMigrated(HeapObject::FromAddress(src), dest)); + ASSERT(dest != LO_SPACE && size <= Page::kMaxNonCodeHeapObjectSize); if (dest == OLD_POINTER_SPACE) { Address src_slot = src; Address dst_slot = dst; @@ -2910,7 +2938,9 @@ bool MarkCompactCollector::TryPromoteObject(HeapObject* object, ASSERT(target_space == heap()->old_pointer_space() || target_space == heap()->old_data_space()); Object* result; - MaybeObject* maybe_result = target_space->AllocateRaw(object_size); + MaybeObject* maybe_result = target_space->AllocateRaw( + object_size, + PagedSpace::MOVE_OBJECT); if (maybe_result->ToObject(&result)) { HeapObject* target = HeapObject::cast(result); MigrateObject(target->address(), @@ -2983,7 +3013,7 @@ void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) { int size = object->Size(); - MaybeObject* target = space->AllocateRaw(size); + MaybeObject* target = space->AllocateRaw(size, PagedSpace::MOVE_OBJECT); if (target->IsFailure()) { // OS refused to give us memory. V8::FatalProcessOutOfMemory("Evacuation"); @@ -3459,6 +3489,13 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { updating_visitor.VisitPointer(heap_->native_contexts_list_address()); heap_->string_table()->Iterate(&updating_visitor); + updating_visitor.VisitPointer(heap_->weak_object_to_code_table_address()); + if (heap_->weak_object_to_code_table()->IsHashTable()) { + WeakHashTable* table = + WeakHashTable::cast(heap_->weak_object_to_code_table()); + table->Iterate(&updating_visitor); + table->Rehash(heap_->undefined_value()); + } // Update pointers from external string table. heap_->UpdateReferencesInExternalStringTable( diff --git a/deps/v8/src/mark-compact.h b/deps/v8/src/mark-compact.h index df2f782113..aea5e1cf66 100644 --- a/deps/v8/src/mark-compact.h +++ b/deps/v8/src/mark-compact.h @@ -637,7 +637,7 @@ class MarkCompactCollector { void VerifyMarkbitsAreClean(); static void VerifyMarkbitsAreClean(PagedSpace* space); static void VerifyMarkbitsAreClean(NewSpace* space); - void VerifyWeakEmbeddedMapsInOptimizedCode(); + void VerifyWeakEmbeddedObjectsInOptimizedCode(); void VerifyOmittedMapChecks(); #endif @@ -735,10 +735,9 @@ class MarkCompactCollector { return sequential_sweeping_; } - // Parallel marking support. - void MarkInParallel(); - - void WaitUntilMarkingCompleted(); + // Mark the global table which maps weak objects to dependent code without + // marking its contents. + void MarkWeakObjectToCodeTable(); private: MarkCompactCollector(); @@ -889,7 +888,7 @@ class MarkCompactCollector { void ClearNonLivePrototypeTransitions(Map* map); void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark); - void ClearAndDeoptimizeDependentCode(Map* map); + void ClearAndDeoptimizeDependentCode(DependentCode* dependent_code); void ClearNonLiveDependentCode(DependentCode* dependent_code); // Marking detaches initial maps from SharedFunctionInfo objects diff --git a/deps/v8/src/math.js b/deps/v8/src/math.js index 9ba1934b85..efab63a186 100644 --- a/deps/v8/src/math.js +++ b/deps/v8/src/math.js @@ -45,59 +45,51 @@ var $Math = new MathConstructor(); // ECMA 262 - 15.8.2.1 function MathAbs(x) { if (%_IsSmi(x)) return x >= 0 ? x : -x; - if (!IS_NUMBER(x)) x = NonNumberToNumber(x); + x = TO_NUMBER_INLINE(x); if (x === 0) return 0; // To handle -0. return x > 0 ? x : -x; } // ECMA 262 - 15.8.2.2 function MathAcos(x) { - if (!IS_NUMBER(x)) x = NonNumberToNumber(x); - return %Math_acos(x); + return %Math_acos(TO_NUMBER_INLINE(x)); } // ECMA 262 - 15.8.2.3 function MathAsin(x) { - if (!IS_NUMBER(x)) x = NonNumberToNumber(x); - return %Math_asin(x); + return %Math_asin(TO_NUMBER_INLINE(x)); } // ECMA 262 - 15.8.2.4 function MathAtan(x) { - if (!IS_NUMBER(x)) x = NonNumberToNumber(x); - return %Math_atan(x); + return %Math_atan(TO_NUMBER_INLINE(x)); } // ECMA 262 - 15.8.2.5 // The naming of y and x matches the spec, as does the order in which // ToNumber (valueOf) is called. function MathAtan2(y, x) { - if (!IS_NUMBER(y)) y = NonNumberToNumber(y); - if (!IS_NUMBER(x)) x = NonNumberToNumber(x); - return %Math_atan2(y, x); + return %Math_atan2(TO_NUMBER_INLINE(y), TO_NUMBER_INLINE(x)); } // ECMA 262 - 15.8.2.6 function MathCeil(x) { - if (!IS_NUMBER(x)) x = NonNumberToNumber(x); - return %Math_ceil(x); + return %Math_ceil(TO_NUMBER_INLINE(x)); } // ECMA 262 - 15.8.2.7 function MathCos(x) { - if (!IS_NUMBER(x)) x = NonNumberToNumber(x); - return %_MathCos(x); + return %_MathCos(TO_NUMBER_INLINE(x)); } // ECMA 262 - 15.8.2.8 function MathExp(x) { - if (!IS_NUMBER(x)) x = NonNumberToNumber(x); - return %Math_exp(x); + return %Math_exp(TO_NUMBER_INLINE(x)); } // ECMA 262 - 15.8.2.9 function MathFloor(x) { - if (!IS_NUMBER(x)) x = NonNumberToNumber(x); + x = TO_NUMBER_INLINE(x); // It's more common to call this with a positive number that's out // of range than negative numbers; check the upper bound first. if (x < 0x80000000 && x > 0) { @@ -113,16 +105,15 @@ function MathFloor(x) { // ECMA 262 - 15.8.2.10 function MathLog(x) { - if (!IS_NUMBER(x)) x = NonNumberToNumber(x); - return %_MathLog(x); + return %_MathLog(TO_NUMBER_INLINE(x)); } // ECMA 262 - 15.8.2.11 function MathMax(arg1, arg2) { // length == 2 var length = %_ArgumentsLength(); if (length == 2) { - if (!IS_NUMBER(arg1)) arg1 = NonNumberToNumber(arg1); - if (!IS_NUMBER(arg2)) arg2 = NonNumberToNumber(arg2); + arg1 = TO_NUMBER_INLINE(arg1); + arg2 = TO_NUMBER_INLINE(arg2); if (arg2 > arg1) return arg2; if (arg1 > arg2) return arg1; if (arg1 == arg2) { @@ -131,9 +122,9 @@ function MathMax(arg1, arg2) { // length == 2 return (arg1 == 0 && !%_IsSmi(arg1) && 1 / arg1 < 0) ? arg2 : arg1; } // All comparisons failed, one of the arguments must be NaN. - return 0/0; // Compiler constant-folds this to NaN. + return NAN; } - var r = -1/0; // Compiler constant-folds this to -Infinity. + var r = -INFINITY; for (var i = 0; i < length; i++) { var n = %_Arguments(i); if (!IS_NUMBER(n)) n = NonNumberToNumber(n); @@ -151,8 +142,8 @@ function MathMax(arg1, arg2) { // length == 2 function MathMin(arg1, arg2) { // length == 2 var length = %_ArgumentsLength(); if (length == 2) { - if (!IS_NUMBER(arg1)) arg1 = NonNumberToNumber(arg1); - if (!IS_NUMBER(arg2)) arg2 = NonNumberToNumber(arg2); + arg1 = TO_NUMBER_INLINE(arg1); + arg2 = TO_NUMBER_INLINE(arg2); if (arg2 > arg1) return arg1; if (arg1 > arg2) return arg2; if (arg1 == arg2) { @@ -161,9 +152,9 @@ function MathMin(arg1, arg2) { // length == 2 return (arg1 == 0 && !%_IsSmi(arg1) && 1 / arg1 < 0) ? arg1 : arg2; } // All comparisons failed, one of the arguments must be NaN. - return 0/0; // Compiler constant-folds this to NaN. + return NAN; } - var r = 1/0; // Compiler constant-folds this to Infinity. + var r = INFINITY; for (var i = 0; i < length; i++) { var n = %_Arguments(i); if (!IS_NUMBER(n)) n = NonNumberToNumber(n); @@ -179,9 +170,7 @@ function MathMin(arg1, arg2) { // length == 2 // ECMA 262 - 15.8.2.13 function MathPow(x, y) { - if (!IS_NUMBER(x)) x = NonNumberToNumber(x); - if (!IS_NUMBER(y)) y = NonNumberToNumber(y); - return %_MathPow(x, y); + return %_MathPow(TO_NUMBER_INLINE(x), TO_NUMBER_INLINE(y)); } // ECMA 262 - 15.8.2.14 @@ -191,33 +180,27 @@ function MathRandom() { // ECMA 262 - 15.8.2.15 function MathRound(x) { - if (!IS_NUMBER(x)) x = NonNumberToNumber(x); - return %RoundNumber(x); + return %RoundNumber(TO_NUMBER_INLINE(x)); } // ECMA 262 - 15.8.2.16 function MathSin(x) { - if (!IS_NUMBER(x)) x = NonNumberToNumber(x); - return %_MathSin(x); + return %_MathSin(TO_NUMBER_INLINE(x)); } // ECMA 262 - 15.8.2.17 function MathSqrt(x) { - if (!IS_NUMBER(x)) x = NonNumberToNumber(x); - return %_MathSqrt(x); + return %_MathSqrt(TO_NUMBER_INLINE(x)); } // ECMA 262 - 15.8.2.18 function MathTan(x) { - if (!IS_NUMBER(x)) x = NonNumberToNumber(x); - return %_MathTan(x); + return %_MathTan(TO_NUMBER_INLINE(x)); } // Non-standard extension. function MathImul(x, y) { - if (!IS_NUMBER(x)) x = NonNumberToNumber(x); - if (!IS_NUMBER(y)) y = NonNumberToNumber(y); - return %NumberImul(x, y); + return %NumberImul(TO_NUMBER_INLINE(x), TO_NUMBER_INLINE(y)); } diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js index 2debbf8654..0a301228d7 100644 --- a/deps/v8/src/messages.js +++ b/deps/v8/src/messages.js @@ -796,7 +796,7 @@ function CallSite(receiver, fun, pos, strict_mode) { } function CallSiteGetThis() { - return this[CallSiteStrictModeKey] ? void 0 : this[CallSiteReceiverKey]; + return this[CallSiteStrictModeKey] ? UNDEFINED : this[CallSiteReceiverKey]; } function CallSiteGetTypeName() { @@ -826,7 +826,7 @@ function CallSiteGetScriptNameOrSourceURL() { } function CallSiteGetFunction() { - return this[CallSiteStrictModeKey] ? void 0 : this[CallSiteFunctionKey]; + return this[CallSiteStrictModeKey] ? UNDEFINED : this[CallSiteFunctionKey]; } function CallSiteGetFunctionName() { @@ -1092,7 +1092,7 @@ function FormatStackTrace(obj, error_string, frames) { var array = []; %MoveArrayContents(frames, array); formatting_custom_stack_trace = true; - var stack_trace = void 0; + var stack_trace = UNDEFINED; try { stack_trace = $Error.prepareStackTrace(obj, array); } catch (e) { @@ -1160,7 +1160,7 @@ function captureStackTrace(obj, cons_opt) { // Turn this accessor into a data property. %DefineOrRedefineDataProperty(obj, 'stack', result, NONE); // Release context values. - stack = error_string = void 0; + stack = error_string = UNDEFINED; return result; }; @@ -1171,7 +1171,7 @@ function captureStackTrace(obj, cons_opt) { %DefineOrRedefineDataProperty(this, 'stack', v, NONE); if (this === obj) { // Release context values if holder is the same as the receiver. - stack = error_string = void 0; + stack = error_string = UNDEFINED; } }; @@ -1213,7 +1213,7 @@ function SetUpError() { // Define all the expected properties directly on the error // object. This avoids going through getters and setters defined // on prototype objects. - %IgnoreAttributesAndSetProperty(this, 'stack', void 0, DONT_ENUM); + %IgnoreAttributesAndSetProperty(this, 'stack', UNDEFINED, DONT_ENUM); if (!IS_UNDEFINED(m)) { %IgnoreAttributesAndSetProperty( this, 'message', ToString(m), DONT_ENUM); @@ -1251,7 +1251,7 @@ function GetPropertyWithoutInvokingMonkeyGetters(error, name) { while (error && !%HasLocalProperty(error, name)) { error = %GetPrototype(error); } - if (error === null) return void 0; + if (IS_NULL(error)) return UNDEFINED; if (!IS_OBJECT(error)) return error[name]; // If the property is an accessor on one of the predefined errors that can be // generated statically by the compiler, don't touch it. This is to address @@ -1260,11 +1260,11 @@ function GetPropertyWithoutInvokingMonkeyGetters(error, name) { if (desc && desc[IS_ACCESSOR_INDEX]) { var isName = name === "name"; if (error === $ReferenceError.prototype) - return isName ? "ReferenceError" : void 0; + return isName ? "ReferenceError" : UNDEFINED; if (error === $SyntaxError.prototype) - return isName ? "SyntaxError" : void 0; + return isName ? "SyntaxError" : UNDEFINED; if (error === $TypeError.prototype) - return isName ? "TypeError" : void 0; + return isName ? "TypeError" : UNDEFINED; } // Otherwise, read normally. return error[name]; diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h index 2fa6804d19..de91051ed0 100644 --- a/deps/v8/src/mips/assembler-mips-inl.h +++ b/deps/v8/src/mips/assembler-mips-inl.h @@ -261,6 +261,13 @@ void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) { static const int kNoCodeAgeSequenceLength = 7; + +Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) { + UNREACHABLE(); // This should never be reached on Arm. + return Handle<Object>(); +} + + Code* RelocInfo::code_age_stub() { ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); return Code::GetCodeFromTargetAddress( diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc index 345b642454..0972a8295c 100644 --- a/deps/v8/src/mips/assembler-mips.cc +++ b/deps/v8/src/mips/assembler-mips.cc @@ -48,6 +48,7 @@ bool CpuFeatures::initialized_ = false; #endif unsigned CpuFeatures::supported_ = 0; unsigned CpuFeatures::found_by_runtime_probing_only_ = 0; +unsigned CpuFeatures::cross_compile_ = 0; ExternalReference ExternalReference::cpu_features() { diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h index cb0896a8de..2468c3c340 100644 --- a/deps/v8/src/mips/assembler-mips.h +++ b/deps/v8/src/mips/assembler-mips.h @@ -72,18 +72,25 @@ namespace internal { // Core register. struct Register { static const int kNumRegisters = v8::internal::kNumRegisters; - static const int kMaxNumAllocatableRegisters = 14; // v0 through t7. + static const int kMaxNumAllocatableRegisters = 14; // v0 through t6 and cp. static const int kSizeInBytes = 4; + static const int kCpRegister = 23; // cp (s7) is the 23rd register. inline static int NumAllocatableRegisters(); static int ToAllocationIndex(Register reg) { - return reg.code() - 2; // zero_reg and 'at' are skipped. + ASSERT((reg.code() - 2) < (kMaxNumAllocatableRegisters - 1) || + reg.is(from_code(kCpRegister))); + return reg.is(from_code(kCpRegister)) ? + kMaxNumAllocatableRegisters - 1 : // Return last index for 'cp'. + reg.code() - 2; // zero_reg and 'at' are skipped. } static Register FromAllocationIndex(int index) { ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters); - return from_code(index + 2); // zero_reg and 'at' are skipped. + return index == kMaxNumAllocatableRegisters - 1 ? + from_code(kCpRegister) : // Last index is always the 'cp' register. + from_code(index + 2); // zero_reg and 'at' are skipped. } static const char* AllocationIndexToString(int index) { @@ -102,7 +109,7 @@ struct Register { "t4", "t5", "t6", - "t7", + "s7", }; return names[index]; } @@ -404,28 +411,49 @@ class CpuFeatures : public AllStatic { // Check whether a feature is supported by the target CPU. static bool IsSupported(CpuFeature f) { ASSERT(initialized_); - return (supported_ & (1u << f)) != 0; + return Check(f, supported_); } static bool IsFoundByRuntimeProbingOnly(CpuFeature f) { ASSERT(initialized_); - return (found_by_runtime_probing_only_ & - (static_cast<uint64_t>(1) << f)) != 0; + return Check(f, found_by_runtime_probing_only_); } static bool IsSafeForSnapshot(CpuFeature f) { - return (IsSupported(f) && + return Check(f, cross_compile_) || + (IsSupported(f) && (!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f))); } + static bool VerifyCrossCompiling() { + return cross_compile_ == 0; + } + + static bool VerifyCrossCompiling(CpuFeature f) { + unsigned mask = flag2set(f); + return cross_compile_ == 0 || + (cross_compile_ & mask) == mask; + } + private: + static bool Check(CpuFeature f, unsigned set) { + return (set & flag2set(f)) != 0; + } + + static unsigned flag2set(CpuFeature f) { + return 1u << f; + } + #ifdef DEBUG static bool initialized_; #endif static unsigned supported_; static unsigned found_by_runtime_probing_only_; + static unsigned cross_compile_; + friend class ExternalReference; + friend class PlatformFeatureScope; DISALLOW_COPY_AND_ASSIGN(CpuFeatures); }; diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc index 3aabd97b97..0b495831b9 100644 --- a/deps/v8/src/mips/builtins-mips.cc +++ b/deps/v8/src/mips/builtins-mips.cc @@ -201,14 +201,12 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { Register argument = a2; Label not_cached, argument_is_string; - NumberToStringStub::GenerateLookupNumberStringCache( - masm, - a0, // Input. - argument, // Result. - a3, // Scratch. - t0, // Scratch. - t1, // Scratch. - ¬_cached); + __ LookupNumberStringCache(a0, // Input. + argument, // Result. + a3, // Scratch. + t0, // Scratch. + t1, // Scratch. + ¬_cached); __ IncrementCounter(counters->string_ctor_cached_number(), 1, a3, t0); __ bind(&argument_is_string); @@ -833,14 +831,15 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) { // The following registers must be saved and restored when calling through to // the runtime: // a0 - contains return address (beginning of patch sequence) - // a1 - function object + // a1 - isolate RegList saved_regs = (a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit(); FrameScope scope(masm, StackFrame::MANUAL); __ MultiPush(saved_regs); - __ PrepareCallCFunction(1, 0, a1); + __ PrepareCallCFunction(1, 0, a2); + __ li(a1, Operand(ExternalReference::isolate_address(masm->isolate()))); __ CallCFunction( - ExternalReference::get_make_code_young_function(masm->isolate()), 1); + ExternalReference::get_make_code_young_function(masm->isolate()), 2); __ MultiPop(saved_regs); __ Jump(a0); } @@ -858,6 +857,49 @@ CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR) #undef DEFINE_CODE_AGE_BUILTIN_GENERATOR +void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) { + // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact + // that make_code_young doesn't do any garbage collection which allows us to + // save/restore the registers without worrying about which of them contain + // pointers. + + __ mov(a0, ra); + // Adjust a0 to point to the head of the PlatformCodeAge sequence + __ Subu(a0, a0, + Operand((kNoCodeAgeSequenceLength - 1) * Assembler::kInstrSize)); + // Restore the original return address of the function + __ mov(ra, at); + + // The following registers must be saved and restored when calling through to + // the runtime: + // a0 - contains return address (beginning of patch sequence) + // a1 - isolate + RegList saved_regs = + (a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit(); + FrameScope scope(masm, StackFrame::MANUAL); + __ MultiPush(saved_regs); + __ PrepareCallCFunction(1, 0, a2); + __ li(a1, Operand(ExternalReference::isolate_address(masm->isolate()))); + __ CallCFunction( + ExternalReference::get_mark_code_as_executed_function(masm->isolate()), + 2); + __ MultiPop(saved_regs); + + // Perform prologue operations usually performed by the young code stub. + __ Push(ra, fp, cp, a1); + __ Addu(fp, sp, Operand(2 * kPointerSize)); + + // Jump to point after the code-age stub. + __ Addu(a0, a0, Operand((kNoCodeAgeSequenceLength) * Assembler::kInstrSize)); + __ Jump(a0); +} + + +void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) { + GenerateMakeCodeYoungAgainCommon(masm); +} + + void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { { FrameScope scope(masm, StackFrame::INTERNAL); @@ -925,23 +967,6 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) { } -void Builtins::Generate_NotifyOSR(MacroAssembler* masm) { - // For now, we are relying on the fact that Runtime::NotifyOSR - // doesn't do any garbage collection which allows us to save/restore - // the registers without worrying about which of them contain - // pointers. This seems a bit fragile. - RegList saved_regs = - (kJSCallerSaved | kCalleeSaved | ra.bit() | fp.bit()) & ~sp.bit(); - __ MultiPush(saved_regs); - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ CallRuntime(Runtime::kNotifyOSR, 0); - } - __ MultiPop(saved_regs); - __ Ret(); -} - - void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { // Lookup the function in the JavaScript frame. __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); @@ -984,6 +1009,23 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { } +void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) { + // We check the stack limit as indicator that recompilation might be done. + Label ok; + __ LoadRoot(at, Heap::kStackLimitRootIndex); + __ Branch(&ok, hs, sp, Operand(at)); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ CallRuntime(Runtime::kStackGuard, 0); + } + __ Jump(masm->isolate()->builtins()->OnStackReplacement(), + RelocInfo::CODE_TARGET); + + __ bind(&ok); + __ Ret(); +} + + void Builtins::Generate_FunctionCall(MacroAssembler* masm) { // 1. Make sure we have at least one argument. // a0: actual number of arguments diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc index 0589bf0162..e334b2896e 100644 --- a/deps/v8/src/mips/code-stubs-mips.cc +++ b/deps/v8/src/mips/code-stubs-mips.cc @@ -60,6 +60,17 @@ void ToNumberStub::InitializeInterfaceDescriptor( } +void NumberToStringStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + static Register registers[] = { a0 }; + descriptor->register_param_count_ = 1; + descriptor->register_params_ = registers; + descriptor->deoptimization_handler_ = + Runtime::FunctionForId(Runtime::kNumberToString)->entry; +} + + void FastCloneShallowArrayStub::InitializeInterfaceDescriptor( Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { @@ -78,7 +89,7 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor( descriptor->register_param_count_ = 4; descriptor->register_params_ = registers; descriptor->deoptimization_handler_ = - Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry; + Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry; } @@ -171,7 +182,7 @@ static void InitializeArrayConstructorDescriptor( descriptor->register_param_count_ = 2; if (constant_stack_parameter_count != 0) { // stack param count needs (constructor pointer, and single argument) - descriptor->stack_parameter_count_ = &a0; + descriptor->stack_parameter_count_ = a0; } descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count; descriptor->register_params_ = registers; @@ -193,7 +204,7 @@ static void InitializeInternalArrayConstructorDescriptor( if (constant_stack_parameter_count != 0) { // Stack param count needs (constructor pointer, and single argument). - descriptor->stack_parameter_count_ = &a0; + descriptor->stack_parameter_count_ = a0; } descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count; descriptor->register_params_ = registers; @@ -536,23 +547,27 @@ void DoubleToIStub::Generate(MacroAssembler* masm) { GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch); Register scratch3 = GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch2); - DoubleRegister double_scratch = kLithiumScratchDouble.low(); - DoubleRegister double_input = f12; + DoubleRegister double_scratch = kLithiumScratchDouble; __ Push(scratch, scratch2, scratch3); - __ ldc1(double_input, MemOperand(input_reg, double_offset)); - if (!skip_fastpath()) { + // Load double input. + __ ldc1(double_scratch, MemOperand(input_reg, double_offset)); + // Clear cumulative exception flags and save the FCSR. __ cfc1(scratch2, FCSR); __ ctc1(zero_reg, FCSR); + // Try a conversion to a signed integer. - __ trunc_w_d(double_scratch, double_input); + __ Trunc_w_d(double_scratch, double_scratch); + // Move the converted value into the result register. __ mfc1(result_reg, double_scratch); + // Retrieve and restore the FCSR. __ cfc1(scratch, FCSR); __ ctc1(scratch2, FCSR); + // Check for overflow and NaNs. __ And( scratch, scratch, @@ -565,7 +580,9 @@ void DoubleToIStub::Generate(MacroAssembler* masm) { // Load the double value and perform a manual truncation. Register input_high = scratch2; Register input_low = scratch3; - __ Move(input_low, input_high, double_input); + + __ lw(input_low, MemOperand(input_reg, double_offset)); + __ lw(input_high, MemOperand(input_reg, double_offset + kIntSize)); Label normal_exponent, restore_sign; // Extract the biased exponent in result. @@ -994,105 +1011,6 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm, } -void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, - Register object, - Register result, - Register scratch1, - Register scratch2, - Register scratch3, - Label* not_found) { - // Use of registers. Register result is used as a temporary. - Register number_string_cache = result; - Register mask = scratch3; - - // Load the number string cache. - __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); - - // Make the hash mask from the length of the number string cache. It - // contains two elements (number and string) for each cache entry. - __ lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset)); - // Divide length by two (length is a smi). - __ sra(mask, mask, kSmiTagSize + 1); - __ Addu(mask, mask, -1); // Make mask. - - // Calculate the entry in the number string cache. The hash value in the - // number string cache for smis is just the smi value, and the hash for - // doubles is the xor of the upper and lower words. See - // Heap::GetNumberStringCache. - Isolate* isolate = masm->isolate(); - Label is_smi; - Label load_result_from_cache; - __ JumpIfSmi(object, &is_smi); - __ CheckMap(object, - scratch1, - Heap::kHeapNumberMapRootIndex, - not_found, - DONT_DO_SMI_CHECK); - - STATIC_ASSERT(8 == kDoubleSize); - __ Addu(scratch1, - object, - Operand(HeapNumber::kValueOffset - kHeapObjectTag)); - __ lw(scratch2, MemOperand(scratch1, kPointerSize)); - __ lw(scratch1, MemOperand(scratch1, 0)); - __ Xor(scratch1, scratch1, Operand(scratch2)); - __ And(scratch1, scratch1, Operand(mask)); - - // Calculate address of entry in string cache: each entry consists - // of two pointer sized fields. - __ sll(scratch1, scratch1, kPointerSizeLog2 + 1); - __ Addu(scratch1, number_string_cache, scratch1); - - Register probe = mask; - __ lw(probe, - FieldMemOperand(scratch1, FixedArray::kHeaderSize)); - __ JumpIfSmi(probe, not_found); - __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset)); - __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset)); - __ BranchF(&load_result_from_cache, NULL, eq, f12, f14); - __ Branch(not_found); - - __ bind(&is_smi); - Register scratch = scratch1; - __ sra(scratch, object, 1); // Shift away the tag. - __ And(scratch, mask, Operand(scratch)); - - // Calculate address of entry in string cache: each entry consists - // of two pointer sized fields. - __ sll(scratch, scratch, kPointerSizeLog2 + 1); - __ Addu(scratch, number_string_cache, scratch); - - // Check if the entry is the smi we are looking for. - __ lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize)); - __ Branch(not_found, ne, object, Operand(probe)); - - // Get the result from the cache. - __ bind(&load_result_from_cache); - __ lw(result, - FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize)); - - __ IncrementCounter(isolate->counters()->number_to_string_native(), - 1, - scratch1, - scratch2); -} - - -void NumberToStringStub::Generate(MacroAssembler* masm) { - Label runtime; - - __ lw(a1, MemOperand(sp, 0)); - - // Generate code to lookup number in the number string cache. - GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, &runtime); - __ DropAndRet(1); - - __ bind(&runtime); - // Handle number to string in the runtime system if not found in the cache. - __ TailCallRuntime(Runtime::kNumberToString, 1, 1); -} - - static void ICCompareStub_CheckInputType(MacroAssembler* masm, Register input, Register scratch, @@ -1316,958 +1234,18 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { } -// Generates code to call a C function to do a double operation. -// This code never falls through, but returns with a heap number containing -// the result in v0. -// Register heap_number_result must be a heap number in which the -// result of the operation will be stored. -// Requires the following layout on entry: -// a0: Left value (least significant part of mantissa). -// a1: Left value (sign, exponent, top of mantissa). -// a2: Right value (least significant part of mantissa). -// a3: Right value (sign, exponent, top of mantissa). -static void CallCCodeForDoubleOperation(MacroAssembler* masm, - Token::Value op, - Register heap_number_result, - Register scratch) { - // Assert that heap_number_result is saved. - // We currently always use s0 to pass it. - ASSERT(heap_number_result.is(s0)); - - // Push the current return address before the C call. - __ push(ra); - __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments. - { - AllowExternalCallThatCantCauseGC scope(masm); - __ CallCFunction( - ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2); - } - // Store answer in the overwritable heap number. - // Double returned in register f0. - __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); - // Place heap_number_result in v0 and return to the pushed return address. - __ pop(ra); - __ Ret(USE_DELAY_SLOT); - __ mov(v0, heap_number_result); -} - - -void BinaryOpStub::Initialize() { - platform_specific_bit_ = true; // FPU is a base requirement for V8. -} - - -void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { - Label get_result; - - __ Push(a1, a0); - - __ li(a2, Operand(Smi::FromInt(MinorKey()))); - __ push(a2); - - __ TailCallExternalReference( - ExternalReference(IC_Utility(IC::kBinaryOp_Patch), - masm->isolate()), - 3, - 1); -} - - -void BinaryOpStub::GenerateTypeTransitionWithSavedArgs( - MacroAssembler* masm) { - UNIMPLEMENTED(); -} - - -void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm, - Token::Value op) { - Register left = a1; - Register right = a0; - - Register scratch1 = t0; - Register scratch2 = t1; - - ASSERT(right.is(a0)); - STATIC_ASSERT(kSmiTag == 0); - - Label not_smi_result; - switch (op) { - case Token::ADD: - __ AdduAndCheckForOverflow(v0, left, right, scratch1); - __ RetOnNoOverflow(scratch1); - // No need to revert anything - right and left are intact. - break; - case Token::SUB: - __ SubuAndCheckForOverflow(v0, left, right, scratch1); - __ RetOnNoOverflow(scratch1); - // No need to revert anything - right and left are intact. - break; - case Token::MUL: { - // Remove tag from one of the operands. This way the multiplication result - // will be a smi if it fits the smi range. - __ SmiUntag(scratch1, right); - // Do multiplication. - // lo = lower 32 bits of scratch1 * left. - // hi = higher 32 bits of scratch1 * left. - __ Mult(left, scratch1); - // Check for overflowing the smi range - no overflow if higher 33 bits of - // the result are identical. - __ mflo(scratch1); - __ mfhi(scratch2); - __ sra(scratch1, scratch1, 31); - __ Branch(¬_smi_result, ne, scratch1, Operand(scratch2)); - // Go slow on zero result to handle -0. - __ mflo(v0); - __ Ret(ne, v0, Operand(zero_reg)); - // We need -0 if we were multiplying a negative number with 0 to get 0. - // We know one of them was zero. - __ Addu(scratch2, right, left); - Label skip; - // ARM uses the 'pl' condition, which is 'ge'. - // Negating it results in 'lt'. - __ Branch(&skip, lt, scratch2, Operand(zero_reg)); - ASSERT(Smi::FromInt(0) == 0); - __ Ret(USE_DELAY_SLOT); - __ mov(v0, zero_reg); // Return smi 0 if the non-zero one was positive. - __ bind(&skip); - // We fall through here if we multiplied a negative number with 0, because - // that would mean we should produce -0. - } - break; - case Token::DIV: { - Label done; - __ SmiUntag(scratch2, right); - __ SmiUntag(scratch1, left); - __ Div(scratch1, scratch2); - // A minor optimization: div may be calculated asynchronously, so we check - // for division by zero before getting the result. - __ Branch(¬_smi_result, eq, scratch2, Operand(zero_reg)); - // If the result is 0, we need to make sure the dividsor (right) is - // positive, otherwise it is a -0 case. - // Quotient is in 'lo', remainder is in 'hi'. - // Check for no remainder first. - __ mfhi(scratch1); - __ Branch(¬_smi_result, ne, scratch1, Operand(zero_reg)); - __ mflo(scratch1); - __ Branch(&done, ne, scratch1, Operand(zero_reg)); - __ Branch(¬_smi_result, lt, scratch2, Operand(zero_reg)); - __ bind(&done); - // Check that the signed result fits in a Smi. - __ Addu(scratch2, scratch1, Operand(0x40000000)); - __ Branch(¬_smi_result, lt, scratch2, Operand(zero_reg)); - __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot. - __ SmiTag(v0, scratch1); - } - break; - case Token::MOD: { - Label done; - __ SmiUntag(scratch2, right); - __ SmiUntag(scratch1, left); - __ Div(scratch1, scratch2); - // A minor optimization: div may be calculated asynchronously, so we check - // for division by 0 before calling mfhi. - // Check for zero on the right hand side. - __ Branch(¬_smi_result, eq, scratch2, Operand(zero_reg)); - // If the result is 0, we need to make sure the dividend (left) is - // positive (or 0), otherwise it is a -0 case. - // Remainder is in 'hi'. - __ mfhi(scratch2); - __ Branch(&done, ne, scratch2, Operand(zero_reg)); - __ Branch(¬_smi_result, lt, scratch1, Operand(zero_reg)); - __ bind(&done); - // Check that the signed result fits in a Smi. - __ Addu(scratch1, scratch2, Operand(0x40000000)); - __ Branch(¬_smi_result, lt, scratch1, Operand(zero_reg)); - __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot. - __ SmiTag(v0, scratch2); - } - break; - case Token::BIT_OR: - __ Ret(USE_DELAY_SLOT); - __ or_(v0, left, right); - break; - case Token::BIT_AND: - __ Ret(USE_DELAY_SLOT); - __ and_(v0, left, right); - break; - case Token::BIT_XOR: - __ Ret(USE_DELAY_SLOT); - __ xor_(v0, left, right); - break; - case Token::SAR: - // Remove tags from right operand. - __ GetLeastBitsFromSmi(scratch1, right, 5); - __ srav(scratch1, left, scratch1); - // Smi tag result. - __ And(v0, scratch1, ~kSmiTagMask); - __ Ret(); - break; - case Token::SHR: - // Remove tags from operands. We can't do this on a 31 bit number - // because then the 0s get shifted into bit 30 instead of bit 31. - __ SmiUntag(scratch1, left); - __ GetLeastBitsFromSmi(scratch2, right, 5); - __ srlv(v0, scratch1, scratch2); - // Unsigned shift is not allowed to produce a negative number, so - // check the sign bit and the sign bit after Smi tagging. - __ And(scratch1, v0, Operand(0xc0000000)); - __ Branch(¬_smi_result, ne, scratch1, Operand(zero_reg)); - // Smi tag result. - __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot. - __ SmiTag(v0); - break; - case Token::SHL: - // Remove tags from operands. - __ SmiUntag(scratch1, left); - __ GetLeastBitsFromSmi(scratch2, right, 5); - __ sllv(scratch1, scratch1, scratch2); - // Check that the signed result fits in a Smi. - __ Addu(scratch2, scratch1, Operand(0x40000000)); - __ Branch(¬_smi_result, lt, scratch2, Operand(zero_reg)); - __ Ret(USE_DELAY_SLOT); - __ SmiTag(v0, scratch1); // SmiTag emits one instruction in delay slot. - break; - default: - UNREACHABLE(); - } - __ bind(¬_smi_result); -} - - -void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, - Register result, - Register heap_number_map, - Register scratch1, - Register scratch2, - Label* gc_required, - OverwriteMode mode); - - -void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, - BinaryOpIC::TypeInfo left_type, - BinaryOpIC::TypeInfo right_type, - bool smi_operands, - Label* not_numbers, - Label* gc_required, - Label* miss, - Token::Value op, - OverwriteMode mode) { - Register left = a1; - Register right = a0; - Register scratch1 = t3; - Register scratch2 = t5; - - ASSERT(smi_operands || (not_numbers != NULL)); - if (smi_operands) { - __ AssertSmi(left); - __ AssertSmi(right); - } - if (left_type == BinaryOpIC::SMI) { - __ JumpIfNotSmi(left, miss); - } - if (right_type == BinaryOpIC::SMI) { - __ JumpIfNotSmi(right, miss); - } - - Register heap_number_map = t2; - __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); - - switch (op) { - case Token::ADD: - case Token::SUB: - case Token::MUL: - case Token::DIV: - case Token::MOD: { - // Allocate new heap number for result. - Register result = s0; - BinaryOpStub_GenerateHeapResultAllocation( - masm, result, heap_number_map, scratch1, scratch2, gc_required, mode); - - // Load left and right operands into f12 and f14. - if (smi_operands) { - __ SmiUntag(scratch1, a0); - __ mtc1(scratch1, f14); - __ cvt_d_w(f14, f14); - __ SmiUntag(scratch1, a1); - __ mtc1(scratch1, f12); - __ cvt_d_w(f12, f12); - } else { - // Load right operand to f14. - if (right_type == BinaryOpIC::INT32) { - __ LoadNumberAsInt32Double( - right, f14, heap_number_map, scratch1, scratch2, f2, miss); - } else { - Label* fail = (right_type == BinaryOpIC::NUMBER) ? miss : not_numbers; - __ LoadNumber(right, f14, heap_number_map, scratch1, fail); - } - // Load left operand to f12 or a0/a1. This keeps a0/a1 intact if it - // jumps to |miss|. - if (left_type == BinaryOpIC::INT32) { - __ LoadNumberAsInt32Double( - left, f12, heap_number_map, scratch1, scratch2, f2, miss); - } else { - Label* fail = (left_type == BinaryOpIC::NUMBER) ? miss : not_numbers; - __ LoadNumber(left, f12, heap_number_map, scratch1, fail); - } - } - - // Calculate the result. - if (op != Token::MOD) { - // Using FPU registers: - // f12: Left value. - // f14: Right value. - switch (op) { - case Token::ADD: - __ add_d(f10, f12, f14); - break; - case Token::SUB: - __ sub_d(f10, f12, f14); - break; - case Token::MUL: - __ mul_d(f10, f12, f14); - break; - case Token::DIV: - __ div_d(f10, f12, f14); - break; - default: - UNREACHABLE(); - } - - // ARM uses a workaround here because of the unaligned HeapNumber - // kValueOffset. On MIPS this workaround is built into sdc1 so - // there's no point in generating even more instructions. - __ sdc1(f10, FieldMemOperand(result, HeapNumber::kValueOffset)); - __ Ret(USE_DELAY_SLOT); - __ mov(v0, result); - } else { - // Call the C function to handle the double operation. - CallCCodeForDoubleOperation(masm, op, result, scratch1); - if (FLAG_debug_code) { - __ stop("Unreachable code."); - } - } - break; - } - case Token::BIT_OR: - case Token::BIT_XOR: - case Token::BIT_AND: - case Token::SAR: - case Token::SHR: - case Token::SHL: { - if (smi_operands) { - __ SmiUntag(a3, left); - __ SmiUntag(a2, right); - } else { - // Convert operands to 32-bit integers. Right in a2 and left in a3. - __ TruncateNumberToI(left, a3, heap_number_map, scratch1, not_numbers); - __ TruncateNumberToI(right, a2, heap_number_map, scratch1, not_numbers); - } - Label result_not_a_smi; - switch (op) { - case Token::BIT_OR: - __ Or(a2, a3, Operand(a2)); - break; - case Token::BIT_XOR: - __ Xor(a2, a3, Operand(a2)); - break; - case Token::BIT_AND: - __ And(a2, a3, Operand(a2)); - break; - case Token::SAR: - // Use only the 5 least significant bits of the shift count. - __ GetLeastBitsFromInt32(a2, a2, 5); - __ srav(a2, a3, a2); - break; - case Token::SHR: - // Use only the 5 least significant bits of the shift count. - __ GetLeastBitsFromInt32(a2, a2, 5); - __ srlv(a2, a3, a2); - // SHR is special because it is required to produce a positive answer. - // The code below for writing into heap numbers isn't capable of - // writing the register as an unsigned int so we go to slow case if we - // hit this case. - __ Branch(&result_not_a_smi, lt, a2, Operand(zero_reg)); - break; - case Token::SHL: - // Use only the 5 least significant bits of the shift count. - __ GetLeastBitsFromInt32(a2, a2, 5); - __ sllv(a2, a3, a2); - break; - default: - UNREACHABLE(); - } - // Check that the *signed* result fits in a smi. - __ Addu(a3, a2, Operand(0x40000000)); - __ Branch(&result_not_a_smi, lt, a3, Operand(zero_reg)); - __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot. - __ SmiTag(v0, a2); - - // Allocate new heap number for result. - __ bind(&result_not_a_smi); - Register result = t1; - if (smi_operands) { - __ AllocateHeapNumber( - result, scratch1, scratch2, heap_number_map, gc_required); - } else { - BinaryOpStub_GenerateHeapResultAllocation( - masm, result, heap_number_map, scratch1, scratch2, gc_required, - mode); - } - - // a2: Answer as signed int32. - // t1: Heap number to write answer into. - - // Nothing can go wrong now, so move the heap number to v0, which is the - // result. - __ mov(v0, t1); - // Convert the int32 in a2 to the heap number in a0. As - // mentioned above SHR needs to always produce a positive result. - __ mtc1(a2, f0); - if (op == Token::SHR) { - __ Cvt_d_uw(f0, f0, f22); - } else { - __ cvt_d_w(f0, f0); - } - // ARM uses a workaround here because of the unaligned HeapNumber - // kValueOffset. On MIPS this workaround is built into sdc1 so - // there's no point in generating even more instructions. - __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset)); - __ Ret(); - break; - } - default: - UNREACHABLE(); - } -} - - -// Generate the smi code. If the operation on smis are successful this return is -// generated. If the result is not a smi and heap number allocation is not -// requested the code falls through. If number allocation is requested but a -// heap number cannot be allocated the code jumps to the label gc_required. -void BinaryOpStub_GenerateSmiCode( - MacroAssembler* masm, - Label* use_runtime, - Label* gc_required, - Token::Value op, - BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results, - OverwriteMode mode) { - Label not_smis; - - Register left = a1; - Register right = a0; - Register scratch1 = t3; - - // Perform combined smi check on both operands. - __ Or(scratch1, left, Operand(right)); - STATIC_ASSERT(kSmiTag == 0); - __ JumpIfNotSmi(scratch1, ¬_smis); - - // If the smi-smi operation results in a smi return is generated. - BinaryOpStub_GenerateSmiSmiOperation(masm, op); - - // If heap number results are possible generate the result in an allocated - // heap number. - if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) { - BinaryOpStub_GenerateFPOperation( - masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true, - use_runtime, gc_required, ¬_smis, op, mode); - } - __ bind(¬_smis); -} - - -void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { - Label right_arg_changed, call_runtime; - - if (op_ == Token::MOD && encoded_right_arg_.has_value) { - // It is guaranteed that the value will fit into a Smi, because if it - // didn't, we wouldn't be here, see BinaryOp_Patch. - __ Branch(&right_arg_changed, - ne, - a0, - Operand(Smi::FromInt(fixed_right_arg_value()))); - } - - if (result_type_ == BinaryOpIC::UNINITIALIZED || - result_type_ == BinaryOpIC::SMI) { - // Only allow smi results. - BinaryOpStub_GenerateSmiCode( - masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS, mode_); - } else { - // Allow heap number result and don't make a transition if a heap number - // cannot be allocated. - BinaryOpStub_GenerateSmiCode( - masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, - mode_); - } - - // Code falls through if the result is not returned as either a smi or heap - // number. - __ bind(&right_arg_changed); - GenerateTypeTransition(masm); - - __ bind(&call_runtime); - { - FrameScope scope(masm, StackFrame::INTERNAL); - GenerateRegisterArgsPush(masm); - GenerateCallRuntime(masm); - } - __ Ret(); -} - - -void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { - Label call_runtime; - ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING); - ASSERT(op_ == Token::ADD); - // If both arguments are strings, call the string add stub. - // Otherwise, do a transition. - - // Registers containing left and right operands respectively. - Register left = a1; - Register right = a0; - - // Test if left operand is a string. - __ JumpIfSmi(left, &call_runtime); - __ GetObjectType(left, a2, a2); - __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE)); - - // Test if right operand is a string. - __ JumpIfSmi(right, &call_runtime); - __ GetObjectType(right, a2, a2); - __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE)); - - StringAddStub string_add_stub( - (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME)); - GenerateRegisterArgsPush(masm); - __ TailCallStub(&string_add_stub); - - __ bind(&call_runtime); - GenerateTypeTransition(masm); -} - - -void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { - ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32); - - Register left = a1; - Register right = a0; - Register scratch1 = t3; - Register scratch2 = t5; - FPURegister double_scratch = f0; - FPURegister single_scratch = f6; - - Register heap_number_result = no_reg; - Register heap_number_map = t2; - __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); - - Label call_runtime; - // Labels for type transition, used for wrong input or output types. - // Both label are currently actually bound to the same position. We use two - // different label to differentiate the cause leading to type transition. - Label transition; - - // Smi-smi fast case. - Label skip; - __ Or(scratch1, left, right); - __ JumpIfNotSmi(scratch1, &skip); - BinaryOpStub_GenerateSmiSmiOperation(masm, op_); - // Fall through if the result is not a smi. - __ bind(&skip); - - switch (op_) { - case Token::ADD: - case Token::SUB: - case Token::MUL: - case Token::DIV: - case Token::MOD: { - // It could be that only SMIs have been seen at either the left - // or the right operand. For precise type feedback, patch the IC - // again if this changes. - if (left_type_ == BinaryOpIC::SMI) { - __ JumpIfNotSmi(left, &transition); - } - if (right_type_ == BinaryOpIC::SMI) { - __ JumpIfNotSmi(right, &transition); - } - // Load both operands and check that they are 32-bit integer. - // Jump to type transition if they are not. The registers a0 and a1 (right - // and left) are preserved for the runtime call. - - __ LoadNumberAsInt32Double( - right, f14, heap_number_map, scratch1, scratch2, f2, &transition); - __ LoadNumberAsInt32Double( - left, f12, heap_number_map, scratch1, scratch2, f2, &transition); - - if (op_ != Token::MOD) { - Label return_heap_number; - switch (op_) { - case Token::ADD: - __ add_d(f10, f12, f14); - break; - case Token::SUB: - __ sub_d(f10, f12, f14); - break; - case Token::MUL: - __ mul_d(f10, f12, f14); - break; - case Token::DIV: - __ div_d(f10, f12, f14); - break; - default: - UNREACHABLE(); - } - - if (result_type_ <= BinaryOpIC::INT32) { - Register except_flag = scratch2; - const FPURoundingMode kRoundingMode = op_ == Token::DIV ? - kRoundToMinusInf : kRoundToZero; - const CheckForInexactConversion kConversion = op_ == Token::DIV ? - kCheckForInexactConversion : kDontCheckForInexactConversion; - __ EmitFPUTruncate(kRoundingMode, - scratch1, - f10, - at, - f16, - except_flag, - kConversion); - // If except_flag != 0, result does not fit in a 32-bit integer. - __ Branch(&transition, ne, except_flag, Operand(zero_reg)); - // Try to tag the result as a Smi, return heap number on overflow. - __ SmiTagCheckOverflow(scratch1, scratch1, scratch2); - __ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg)); - // Check for minus zero, transition in that case (because we need - // to return a heap number). - Label not_zero; - ASSERT(kSmiTag == 0); - __ Branch(¬_zero, ne, scratch1, Operand(zero_reg)); - __ mfc1(scratch2, f11); - __ And(scratch2, scratch2, HeapNumber::kSignMask); - __ Branch(&transition, ne, scratch2, Operand(zero_reg)); - __ bind(¬_zero); - - __ Ret(USE_DELAY_SLOT); - __ mov(v0, scratch1); - } - - __ bind(&return_heap_number); - // Return a heap number, or fall through to type transition or runtime - // call if we can't. - // We are using FPU registers so s0 is available. - heap_number_result = s0; - BinaryOpStub_GenerateHeapResultAllocation(masm, - heap_number_result, - heap_number_map, - scratch1, - scratch2, - &call_runtime, - mode_); - __ sdc1(f10, - FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); - __ Ret(USE_DELAY_SLOT); - __ mov(v0, heap_number_result); - - // A DIV operation expecting an integer result falls through - // to type transition. - - } else { - if (encoded_right_arg_.has_value) { - __ Move(f16, fixed_right_arg_value()); - __ BranchF(&transition, NULL, ne, f14, f16); - } - - Label pop_and_call_runtime; - - // Allocate a heap number to store the result. - heap_number_result = s0; - BinaryOpStub_GenerateHeapResultAllocation(masm, - heap_number_result, - heap_number_map, - scratch1, - scratch2, - &pop_and_call_runtime, - mode_); - - // Call the C function to handle the double operation. - CallCCodeForDoubleOperation(masm, op_, heap_number_result, scratch1); - if (FLAG_debug_code) { - __ stop("Unreachable code."); - } - - __ bind(&pop_and_call_runtime); - __ Drop(2); - __ Branch(&call_runtime); - } - - break; - } - - case Token::BIT_OR: - case Token::BIT_XOR: - case Token::BIT_AND: - case Token::SAR: - case Token::SHR: - case Token::SHL: { - Label return_heap_number; - // Convert operands to 32-bit integers. Right in a2 and left in a3. The - // registers a0 and a1 (right and left) are preserved for the runtime - // call. - __ LoadNumberAsInt32( - left, a3, heap_number_map, scratch1, scratch2, f0, f2, &transition); - __ LoadNumberAsInt32( - right, a2, heap_number_map, scratch1, scratch2, f0, f2, &transition); - - // The ECMA-262 standard specifies that, for shift operations, only the - // 5 least significant bits of the shift value should be used. - switch (op_) { - case Token::BIT_OR: - __ Or(a2, a3, Operand(a2)); - break; - case Token::BIT_XOR: - __ Xor(a2, a3, Operand(a2)); - break; - case Token::BIT_AND: - __ And(a2, a3, Operand(a2)); - break; - case Token::SAR: - __ And(a2, a2, Operand(0x1f)); - __ srav(a2, a3, a2); - break; - case Token::SHR: - __ And(a2, a2, Operand(0x1f)); - __ srlv(a2, a3, a2); - // SHR is special because it is required to produce a positive answer. - // We only get a negative result if the shift value (a2) is 0. - // This result cannot be respresented as a signed 32-bit integer, try - // to return a heap number if we can. - __ Branch((result_type_ <= BinaryOpIC::INT32) - ? &transition - : &return_heap_number, - lt, - a2, - Operand(zero_reg)); - break; - case Token::SHL: - __ And(a2, a2, Operand(0x1f)); - __ sllv(a2, a3, a2); - break; - default: - UNREACHABLE(); - } - - // Check if the result fits in a smi. - __ Addu(scratch1, a2, Operand(0x40000000)); - // If not try to return a heap number. (We know the result is an int32.) - __ Branch(&return_heap_number, lt, scratch1, Operand(zero_reg)); - // Tag the result and return. - __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot. - __ SmiTag(v0, a2); - - __ bind(&return_heap_number); - heap_number_result = t1; - BinaryOpStub_GenerateHeapResultAllocation(masm, - heap_number_result, - heap_number_map, - scratch1, - scratch2, - &call_runtime, - mode_); - - if (op_ != Token::SHR) { - // Convert the result to a floating point value. - __ mtc1(a2, double_scratch); - __ cvt_d_w(double_scratch, double_scratch); - } else { - // The result must be interpreted as an unsigned 32-bit integer. - __ mtc1(a2, double_scratch); - __ Cvt_d_uw(double_scratch, double_scratch, single_scratch); - } - - // Store the result. - __ sdc1(double_scratch, - FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); - __ Ret(USE_DELAY_SLOT); - __ mov(v0, heap_number_result); - - break; - } - - default: - UNREACHABLE(); - } - - // We never expect DIV to yield an integer result, so we always generate - // type transition code for DIV operations expecting an integer result: the - // code will fall through to this type transition. - if (transition.is_linked() || - ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) { - __ bind(&transition); - GenerateTypeTransition(masm); - } - - __ bind(&call_runtime); - { - FrameScope scope(masm, StackFrame::INTERNAL); - GenerateRegisterArgsPush(masm); - GenerateCallRuntime(masm); - } - __ Ret(); -} - - -void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { - Label call_runtime; - - if (op_ == Token::ADD) { - // Handle string addition here, because it is the only operation - // that does not do a ToNumber conversion on the operands. - GenerateAddStrings(masm); - } - - // Convert oddball arguments to numbers. - Label check, done; - __ LoadRoot(t0, Heap::kUndefinedValueRootIndex); - __ Branch(&check, ne, a1, Operand(t0)); - if (Token::IsBitOp(op_)) { - __ li(a1, Operand(Smi::FromInt(0))); - } else { - __ LoadRoot(a1, Heap::kNanValueRootIndex); - } - __ jmp(&done); - __ bind(&check); - __ LoadRoot(t0, Heap::kUndefinedValueRootIndex); - __ Branch(&done, ne, a0, Operand(t0)); - if (Token::IsBitOp(op_)) { - __ li(a0, Operand(Smi::FromInt(0))); - } else { - __ LoadRoot(a0, Heap::kNanValueRootIndex); - } - __ bind(&done); - - GenerateNumberStub(masm); -} - - -void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) { - Label call_runtime, transition; - BinaryOpStub_GenerateFPOperation( - masm, left_type_, right_type_, false, - &transition, &call_runtime, &transition, op_, mode_); - - __ bind(&transition); - GenerateTypeTransition(masm); - - __ bind(&call_runtime); - { - FrameScope scope(masm, StackFrame::INTERNAL); - GenerateRegisterArgsPush(masm); - GenerateCallRuntime(masm); - } - __ Ret(); -} - - -void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { - Label call_runtime, call_string_add_or_runtime, transition; - - BinaryOpStub_GenerateSmiCode( - masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_); - - BinaryOpStub_GenerateFPOperation( - masm, left_type_, right_type_, false, - &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_); - - __ bind(&transition); - GenerateTypeTransition(masm); - - __ bind(&call_string_add_or_runtime); - if (op_ == Token::ADD) { - GenerateAddStrings(masm); - } - - __ bind(&call_runtime); - { - FrameScope scope(masm, StackFrame::INTERNAL); - GenerateRegisterArgsPush(masm); - GenerateCallRuntime(masm); - } - __ Ret(); -} - - -void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { - ASSERT(op_ == Token::ADD); - Label left_not_string, call_runtime; - - Register left = a1; - Register right = a0; - - // Check if left argument is a string. - __ JumpIfSmi(left, &left_not_string); - __ GetObjectType(left, a2, a2); - __ Branch(&left_not_string, ge, a2, Operand(FIRST_NONSTRING_TYPE)); - - StringAddStub string_add_left_stub( - (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME)); - GenerateRegisterArgsPush(masm); - __ TailCallStub(&string_add_left_stub); - - // Left operand is not a string, test right. - __ bind(&left_not_string); - __ JumpIfSmi(right, &call_runtime); - __ GetObjectType(right, a2, a2); - __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE)); - - StringAddStub string_add_right_stub( - (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME)); - GenerateRegisterArgsPush(masm); - __ TailCallStub(&string_add_right_stub); - - // At least one argument is not a string. - __ bind(&call_runtime); -} - - -void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, - Register result, - Register heap_number_map, - Register scratch1, - Register scratch2, - Label* gc_required, - OverwriteMode mode) { - // Code below will scratch result if allocation fails. To keep both arguments - // intact for the runtime call result cannot be one of these. - ASSERT(!result.is(a0) && !result.is(a1)); - - if (mode == OVERWRITE_LEFT || mode == OVERWRITE_RIGHT) { - Label skip_allocation, allocated; - Register overwritable_operand = mode == OVERWRITE_LEFT ? a1 : a0; - // If the overwritable operand is already an object, we skip the - // allocation of a heap number. - __ JumpIfNotSmi(overwritable_operand, &skip_allocation); - // Allocate a heap number for the result. - __ AllocateHeapNumber( - result, scratch1, scratch2, heap_number_map, gc_required); - __ Branch(&allocated); - __ bind(&skip_allocation); - // Use object holding the overwritable operand for result. - __ mov(result, overwritable_operand); - __ bind(&allocated); - } else { - ASSERT(mode == NO_OVERWRITE); - __ AllocateHeapNumber( - result, scratch1, scratch2, heap_number_map, gc_required); - } -} - - -void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { - __ Push(a1, a0); +void BinaryOpStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + static Register registers[] = { a1, a0 }; + descriptor->register_param_count_ = 2; + descriptor->register_params_ = registers; + descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss); + descriptor->SetMissHandler( + ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate)); } - void TranscendentalCacheStub::Generate(MacroAssembler* masm) { // Untagged case: double input in f4, double result goes // into f4. @@ -2737,6 +1715,7 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate); ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); CreateAllocationSiteStub::GenerateAheadOfTime(isolate); + BinaryOpStub::GenerateAheadOfTime(isolate); } @@ -2795,8 +1774,9 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, if (do_gc) { // Move result passed in v0 into a0 to call PerformGC. __ mov(a0, v0); - __ PrepareCallCFunction(1, 0, a1); - __ CallCFunction(ExternalReference::perform_gc_function(isolate), 1, 0); + __ PrepareCallCFunction(2, 0, a1); + __ li(a1, Operand(ExternalReference::isolate_address(masm->isolate()))); + __ CallCFunction(ExternalReference::perform_gc_function(isolate), 2, 0); } ExternalReference scope_depth = @@ -2875,7 +1855,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, // v0:v1: result // sp: stack pointer // fp: frame pointer - __ LeaveExitFrame(save_doubles_, s0, true); + __ LeaveExitFrame(save_doubles_, s0, true, EMIT_RETURN); // Check if we should retry or throw exception. Label retry; @@ -3408,8 +2388,7 @@ void StringLengthStub::Generate(MacroAssembler* masm) { receiver = a0; } - StubCompiler::GenerateLoadStringLength(masm, receiver, a3, t0, &miss, - support_wrapper_); + StubCompiler::GenerateLoadStringLength(masm, receiver, a3, t0, &miss); __ bind(&miss); StubCompiler::TailCallBuiltin( @@ -4156,7 +3135,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { DirectCEntryStub stub; stub.GenerateCall(masm, t9); - __ LeaveExitFrame(false, no_reg); + __ LeaveExitFrame(false, no_reg, true); // v0: result // subject: subject string (callee saved) @@ -4424,6 +3403,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) { // Cache the called function in a global property cell. Cache states // are uninitialized, monomorphic (indicated by a JSFunction), and // megamorphic. + // a0 : number of arguments to the construct function // a1 : the function to call // a2 : cache cell for call target Label initialize, done, miss, megamorphic, not_array_function; @@ -4444,9 +3424,6 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) { // If we didn't have a matching function, and we didn't find the megamorph // sentinel, then we have in the cell either some other function or an // AllocationSite. Do a map check on the object in a3. - Handle<Map> allocation_site_map( - masm->isolate()->heap()->allocation_site_map(), - masm->isolate()); __ lw(t1, FieldMemOperand(a3, 0)); __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex); __ Branch(&miss, ne, t1, Operand(at)); @@ -4485,6 +3462,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) { 1 << 5 | // a1 1 << 6; // a2 + // Arguments register must be smi-tagged to call out. __ SmiTag(a0); __ MultiPush(kSavedRegs); @@ -5803,33 +4781,11 @@ void StringAddStub::Generate(MacroAssembler* masm) { // Just jump to runtime to add the two strings. __ bind(&call_runtime); - if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) { - GenerateRegisterArgsPop(masm); - // Build a frame. - { - FrameScope scope(masm, StackFrame::INTERNAL); - GenerateRegisterArgsPush(masm); - __ CallRuntime(Runtime::kStringAdd, 2); - } - __ Ret(); - } else { - __ TailCallRuntime(Runtime::kStringAdd, 2, 1); - } + __ TailCallRuntime(Runtime::kStringAdd, 2, 1); if (call_builtin.is_linked()) { __ bind(&call_builtin); - if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) { - GenerateRegisterArgsPop(masm); - // Build a frame. - { - FrameScope scope(masm, StackFrame::INTERNAL); - GenerateRegisterArgsPush(masm); - __ InvokeBuiltin(builtin_id, CALL_FUNCTION); - } - __ Ret(); - } else { - __ InvokeBuiltin(builtin_id, JUMP_FUNCTION); - } + __ InvokeBuiltin(builtin_id, JUMP_FUNCTION); } } @@ -5863,13 +4819,7 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm, // Check the number to string cache. __ bind(¬_string); // Puts the cached result into scratch1. - NumberToStringStub::GenerateLookupNumberStringCache(masm, - arg, - scratch1, - scratch2, - scratch3, - scratch4, - slow); + __ LookupNumberStringCache(arg, scratch1, scratch2, scratch3, scratch4, slow); __ mov(arg, scratch1); __ sw(arg, MemOperand(sp, stack_offset)); __ bind(&done); @@ -6222,9 +5172,16 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) { void DirectCEntryStub::Generate(MacroAssembler* masm) { - // No need to pop or drop anything, LeaveExitFrame will restore the old - // stack, thus dropping the allocated space for the return value. - // The saved ra is after the reserved stack space for the 4 args. + // Make place for arguments to fit C calling convention. Most of the callers + // of DirectCEntryStub::GenerateCall are using EnterExitFrame/LeaveExitFrame + // so they handle stack restoring and we don't have to do that here. + // Any caller of DirectCEntryStub::GenerateCall must take care of dropping + // kCArgsSlotsSize stack space after the call. + __ Subu(sp, sp, Operand(kCArgsSlotsSize)); + // Place the return address on the stack, making the call + // GC safe. The RegExp backend also relies on this. + __ sw(ra, MemOperand(sp, kCArgsSlotsSize)); + __ Call(t9); // Call the C++ function. __ lw(t9, MemOperand(sp, kCArgsSlotsSize)); if (FLAG_debug_code && FLAG_enable_slow_asserts) { @@ -6241,33 +5198,11 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) { void DirectCEntryStub::GenerateCall(MacroAssembler* masm, Register target) { - __ Move(t9, target); - __ AssertStackIsAligned(); - // Allocate space for arg slots. - __ Subu(sp, sp, kCArgsSlotsSize); - - // Block the trampoline pool through the whole function to make sure the - // number of generated instructions is constant. - Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm); - - // We need to get the current 'pc' value, which is not available on MIPS. - Label find_ra; - masm->bal(&find_ra); // ra = pc + 8. - masm->nop(); // Branch delay slot nop. - masm->bind(&find_ra); - - const int kNumInstructionsToJump = 6; - masm->addiu(ra, ra, kNumInstructionsToJump * kPointerSize); - // Push return address (accessible to GC through exit frame pc). - // This spot for ra was reserved in EnterExitFrame. - masm->sw(ra, MemOperand(sp, kCArgsSlotsSize)); intptr_t loc = reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location()); - masm->li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE); - // Call the function. - masm->Jump(t9); - // Make sure the stored 'ra' points to this position. - ASSERT_EQ(kNumInstructionsToJump, masm->InstructionsGeneratedSince(&find_ra)); + __ Move(t9, target); + __ li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE); + __ Call(ra); } diff --git a/deps/v8/src/mips/code-stubs-mips.h b/deps/v8/src/mips/code-stubs-mips.h index 8c9d22ae5d..10531a8002 100644 --- a/deps/v8/src/mips/code-stubs-mips.h +++ b/deps/v8/src/mips/code-stubs-mips.h @@ -268,31 +268,6 @@ class WriteInt32ToHeapNumberStub : public PlatformCodeStub { }; -class NumberToStringStub: public PlatformCodeStub { - public: - NumberToStringStub() { } - - // Generate code to do a lookup in the number string cache. If the number in - // the register object is found in the cache the generated code falls through - // with the result in the result register. The object and the result register - // can be the same. If the number is not found in the cache the code jumps to - // the label not_found with only the content of register object unchanged. - static void GenerateLookupNumberStringCache(MacroAssembler* masm, - Register object, - Register result, - Register scratch1, - Register scratch2, - Register scratch3, - Label* not_found); - - private: - Major MajorKey() { return NumberToString; } - int MinorKey() { return 0; } - - void Generate(MacroAssembler* masm); -}; - - class RecordWriteStub: public PlatformCodeStub { public: RecordWriteStub(Register object, @@ -480,22 +455,6 @@ class RecordWriteStub: public PlatformCodeStub { }; -// Enter C code from generated RegExp code in a way that allows -// the C code to fix the return address in case of a GC. -// Currently only needed on ARM and MIPS. -class RegExpCEntryStub: public PlatformCodeStub { - public: - RegExpCEntryStub() {} - virtual ~RegExpCEntryStub() {} - void Generate(MacroAssembler* masm); - - private: - Major MajorKey() { return RegExpCEntry; } - int MinorKey() { return 0; } - - bool NeedsImmovableCode() { return true; } -}; - // Trampoline stub to call into native code. To call safely into native code // in the presence of compacting GC (which can move code objects) we need to // keep the code which called into native pinned in the memory. Currently the diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc index 5c847fc8f6..ec6649533f 100644 --- a/deps/v8/src/mips/codegen-mips.cc +++ b/deps/v8/src/mips/codegen-mips.cc @@ -156,8 +156,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( // ----------------------------------- if (mode == TRACK_ALLOCATION_SITE) { ASSERT(allocation_memento_found != NULL); - masm->TestJSArrayForAllocationMemento(a2, t0, eq, - allocation_memento_found); + __ JumpIfJSArrayHasAllocationMemento(a2, t0, allocation_memento_found); } // Set transitioned map. @@ -188,7 +187,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( Register scratch = t6; if (mode == TRACK_ALLOCATION_SITE) { - masm->TestJSArrayForAllocationMemento(a2, t0, eq, fail); + __ JumpIfJSArrayHasAllocationMemento(a2, t0, fail); } // Check for empty arrays, which only require a map transition and no changes @@ -316,7 +315,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( Label entry, loop, convert_hole, gc_required, only_change_map; if (mode == TRACK_ALLOCATION_SITE) { - masm->TestJSArrayForAllocationMemento(a2, t0, eq, fail); + __ JumpIfJSArrayHasAllocationMemento(a2, t0, fail); } // Check for empty arrays, which only require a map transition and no changes @@ -540,52 +539,67 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm, ASSERT(!temp2.is(temp3)); ASSERT(ExternalReference::math_exp_constants(0).address() != NULL); - Label done; + Label zero, infinity, done; __ li(temp3, Operand(ExternalReference::math_exp_constants(0))); __ ldc1(double_scratch1, ExpConstant(0, temp3)); - __ Move(result, kDoubleRegZero); - __ BranchF(&done, NULL, ge, double_scratch1, input); + __ BranchF(&zero, NULL, ge, double_scratch1, input); + __ ldc1(double_scratch2, ExpConstant(1, temp3)); - __ ldc1(result, ExpConstant(2, temp3)); - __ BranchF(&done, NULL, ge, input, double_scratch2); + __ BranchF(&infinity, NULL, ge, input, double_scratch2); + __ ldc1(double_scratch1, ExpConstant(3, temp3)); __ ldc1(result, ExpConstant(4, temp3)); __ mul_d(double_scratch1, double_scratch1, input); __ add_d(double_scratch1, double_scratch1, result); - __ Move(temp2, temp1, double_scratch1); + __ FmoveLow(temp2, double_scratch1); __ sub_d(double_scratch1, double_scratch1, result); __ ldc1(result, ExpConstant(6, temp3)); __ ldc1(double_scratch2, ExpConstant(5, temp3)); __ mul_d(double_scratch1, double_scratch1, double_scratch2); __ sub_d(double_scratch1, double_scratch1, input); __ sub_d(result, result, double_scratch1); - __ mul_d(input, double_scratch1, double_scratch1); - __ mul_d(result, result, input); - __ srl(temp1, temp2, 11); + __ mul_d(double_scratch2, double_scratch1, double_scratch1); + __ mul_d(result, result, double_scratch2); __ ldc1(double_scratch2, ExpConstant(7, temp3)); __ mul_d(result, result, double_scratch2); __ sub_d(result, result, double_scratch1); - __ ldc1(double_scratch2, ExpConstant(8, temp3)); + // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1. + ASSERT(*reinterpret_cast<double*> + (ExternalReference::math_exp_constants(8).address()) == 1); + __ Move(double_scratch2, 1); __ add_d(result, result, double_scratch2); - __ li(at, 0x7ff); - __ And(temp2, temp2, at); + __ srl(temp1, temp2, 11); + __ Ext(temp2, temp2, 0, 11); __ Addu(temp1, temp1, Operand(0x3ff)); - __ sll(temp1, temp1, 20); // Must not call ExpConstant() after overwriting temp3! __ li(temp3, Operand(ExternalReference::math_exp_log_table())); __ sll(at, temp2, 3); - __ addu(at, at, temp3); - __ lw(at, MemOperand(at)); - __ Addu(temp3, temp3, Operand(kPointerSize)); - __ sll(temp2, temp2, 3); - __ addu(temp2, temp2, temp3); - __ lw(temp2, MemOperand(temp2)); - __ Or(temp1, temp1, temp2); - __ Move(input, at, temp1); - __ mul_d(result, result, input); + __ Addu(temp3, temp3, Operand(at)); + __ lw(temp2, MemOperand(temp3, 0)); + __ lw(temp3, MemOperand(temp3, kPointerSize)); + // The first word is loaded is the lower number register. + if (temp2.code() < temp3.code()) { + __ sll(at, temp1, 20); + __ Or(temp1, temp3, at); + __ Move(double_scratch1, temp2, temp1); + } else { + __ sll(at, temp1, 20); + __ Or(temp1, temp2, at); + __ Move(double_scratch1, temp3, temp1); + } + __ mul_d(result, result, double_scratch1); + __ Branch(&done); + + __ bind(&zero); + __ Move(result, kDoubleRegZero); + __ Branch(&done); + + __ bind(&infinity); + __ ldc1(result, ExpConstant(2, temp3)); + __ bind(&done); } @@ -624,7 +638,7 @@ bool Code::IsYoungSequence(byte* sequence) { void Code::GetCodeAgeAndParity(byte* sequence, Age* age, MarkingParity* parity) { if (IsYoungSequence(sequence)) { - *age = kNoAge; + *age = kNoAgeCodeAge; *parity = NO_MARKING_PARITY; } else { Address target_address = Memory::Address_at( @@ -635,16 +649,17 @@ void Code::GetCodeAgeAndParity(byte* sequence, Age* age, } -void Code::PatchPlatformCodeAge(byte* sequence, +void Code::PatchPlatformCodeAge(Isolate* isolate, + byte* sequence, Code::Age age, MarkingParity parity) { uint32_t young_length; byte* young_sequence = GetNoCodeAgeSequence(&young_length); - if (age == kNoAge) { + if (age == kNoAgeCodeAge) { CopyBytes(sequence, young_sequence, young_length); CPU::FlushICache(sequence, young_length); } else { - Code* stub = GetCodeAgeStub(age, parity); + Code* stub = GetCodeAgeStub(isolate, age, parity); CodePatcher patcher(sequence, young_length / Assembler::kInstrSize); // Mark this code sequence for FindPlatformCodeAgeSequence() patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP); diff --git a/deps/v8/src/mips/codegen-mips.h b/deps/v8/src/mips/codegen-mips.h index 32d7d0d65c..822b94ad79 100644 --- a/deps/v8/src/mips/codegen-mips.h +++ b/deps/v8/src/mips/codegen-mips.h @@ -99,6 +99,7 @@ class StringCharLoadGenerator : public AllStatic { class MathExpGenerator : public AllStatic { public: + // Register input isn't modified. All other registers are clobbered. static void EmitMathExp(MacroAssembler* masm, DoubleRegister input, DoubleRegister result, diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc index 16f75b8632..d31990be5c 100644 --- a/deps/v8/src/mips/deoptimizer-mips.cc +++ b/deps/v8/src/mips/deoptimizer-mips.cc @@ -78,88 +78,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) { } -// This structure comes from FullCodeGenerator::EmitBackEdgeBookkeeping. -// The back edge bookkeeping code matches the pattern: -// -// sltu at, sp, t0 / slt at, a3, zero_reg (in case of count based interrupts) -// beq at, zero_reg, ok -// lui t9, <interrupt stub address> upper -// ori t9, <interrupt stub address> lower -// jalr t9 -// nop -// ok-label ----- pc_after points here -// -// We patch the code to the following form: -// -// addiu at, zero_reg, 1 -// beq at, zero_reg, ok ;; Not changed -// lui t9, <on-stack replacement address> upper -// ori t9, <on-stack replacement address> lower -// jalr t9 ;; Not changed -// nop ;; Not changed -// ok-label ----- pc_after points here - -void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code, - Address pc_after, - Code* replacement_code) { - static const int kInstrSize = Assembler::kInstrSize; - // Replace the sltu instruction with load-imm 1 to at, so beq is not taken. - CodePatcher patcher(pc_after - 6 * kInstrSize, 1); - patcher.masm()->addiu(at, zero_reg, 1); - // Replace the stack check address in the load-immediate (lui/ori pair) - // with the entry address of the replacement code. - Assembler::set_target_address_at(pc_after - 4 * kInstrSize, - replacement_code->entry()); - - unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch( - unoptimized_code, pc_after - 4 * kInstrSize, replacement_code); -} - - -void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code, - Address pc_after, - Code* interrupt_code) { - static const int kInstrSize = Assembler::kInstrSize; - // Restore the sltu instruction so beq can be taken again. - CodePatcher patcher(pc_after - 6 * kInstrSize, 1); - patcher.masm()->slt(at, a3, zero_reg); - // Restore the original call address. - Assembler::set_target_address_at(pc_after - 4 * kInstrSize, - interrupt_code->entry()); - - interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch( - unoptimized_code, pc_after - 4 * kInstrSize, interrupt_code); -} - - -#ifdef DEBUG -Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState( - Isolate* isolate, - Code* unoptimized_code, - Address pc_after) { - static const int kInstrSize = Assembler::kInstrSize; - ASSERT(Assembler::IsBeq(Assembler::instr_at(pc_after - 5 * kInstrSize))); - if (Assembler::IsAddImmediate( - Assembler::instr_at(pc_after - 6 * kInstrSize))) { - Code* osr_builtin = - isolate->builtins()->builtin(Builtins::kOnStackReplacement); - ASSERT(reinterpret_cast<uint32_t>( - Assembler::target_address_at(pc_after - 4 * kInstrSize)) == - reinterpret_cast<uint32_t>(osr_builtin->entry())); - return PATCHED_FOR_OSR; - } else { - // Get the interrupt stub code object to match against from cache. - Code* interrupt_builtin = - isolate->builtins()->builtin(Builtins::kInterruptCheck); - ASSERT(reinterpret_cast<uint32_t>( - Assembler::target_address_at(pc_after - 4 * kInstrSize)) == - reinterpret_cast<uint32_t>(interrupt_builtin->entry())); - return NOT_PATCHED; - } -} -#endif // DEBUG - - void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) { // Set the register values. The values are not important as there are no // callee saved registers in JavaScript frames, so all registers are @@ -186,10 +104,7 @@ void Deoptimizer::SetPlatformCompiledStubRegisters( ApiFunction function(descriptor->deoptimization_handler_); ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_); intptr_t handler = reinterpret_cast<intptr_t>(xref.address()); - int params = descriptor->register_param_count_; - if (descriptor->stack_parameter_count_ != NULL) { - params++; - } + int params = descriptor->environment_length(); output_frame->SetRegister(s0.code(), params); output_frame->SetRegister(s1.code(), (params - 1) * kPointerSize); output_frame->SetRegister(s2.code(), handler); diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc index df3f4170b1..cbd0788121 100644 --- a/deps/v8/src/mips/full-codegen-mips.cc +++ b/deps/v8/src/mips/full-codegen-mips.cc @@ -171,12 +171,7 @@ void FullCodeGenerator::Generate() { FrameScope frame_scope(masm_, StackFrame::MANUAL); info->set_prologue_offset(masm_->pc_offset()); - // The following three instructions must remain together and unmodified for - // code aging to work properly. - __ Push(ra, fp, cp, a1); - __ nop(Assembler::CODE_AGE_SEQUENCE_NOP); - // Adjust fp to point to caller's fp. - __ Addu(fp, sp, Operand(2 * kPointerSize)); + __ Prologue(BUILD_FUNCTION_FRAME); info->AddNoFrameRange(0, masm_->pc_offset()); { Comment cmnt(masm_, "[ Allocate locals"); @@ -1653,13 +1648,11 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { __ li(a0, Operand(Smi::FromInt(flags))); int properties_count = constant_properties->length() / 2; if ((FLAG_track_double_fields && expr->may_store_doubles()) || - expr->depth() > 1) { - __ Push(a3, a2, a1, a0); - __ CallRuntime(Runtime::kCreateObjectLiteral, 4); - } else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements || + expr->depth() > 1 || Serializer::enabled() || + flags != ObjectLiteral::kFastElements || properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) { __ Push(a3, a2, a1, a0); - __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4); + __ CallRuntime(Runtime::kCreateObjectLiteral, 4); } else { FastCloneShallowObjectStub stub(properties_count); __ CallStub(&stub); @@ -3613,8 +3606,9 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); ASSERT_EQ(args->length(), 1); - // Load the argument on the stack and call the stub. - VisitForStackValue(args->at(0)); + // Load the argument into a0 and call the stub. + VisitForAccumulatorValue(args->at(0)); + __ mov(a0, result_register()); NumberToStringStub stub; __ CallStub(&stub); @@ -4926,6 +4920,83 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit( #undef __ + +void BackEdgeTable::PatchAt(Code* unoptimized_code, + Address pc, + BackEdgeState target_state, + Code* replacement_code) { + static const int kInstrSize = Assembler::kInstrSize; + Address branch_address = pc - 6 * kInstrSize; + CodePatcher patcher(branch_address, 1); + + switch (target_state) { + case INTERRUPT: + // slt at, a3, zero_reg (in case of count based interrupts) + // beq at, zero_reg, ok + // lui t9, <interrupt stub address> upper + // ori t9, <interrupt stub address> lower + // jalr t9 + // nop + // ok-label ----- pc_after points here + patcher.masm()->slt(at, a3, zero_reg); + break; + case ON_STACK_REPLACEMENT: + case OSR_AFTER_STACK_CHECK: + // addiu at, zero_reg, 1 + // beq at, zero_reg, ok ;; Not changed + // lui t9, <on-stack replacement address> upper + // ori t9, <on-stack replacement address> lower + // jalr t9 ;; Not changed + // nop ;; Not changed + // ok-label ----- pc_after points here + patcher.masm()->addiu(at, zero_reg, 1); + break; + } + Address pc_immediate_load_address = pc - 4 * kInstrSize; + // Replace the stack check address in the load-immediate (lui/ori pair) + // with the entry address of the replacement code. + Assembler::set_target_address_at(pc_immediate_load_address, + replacement_code->entry()); + + unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch( + unoptimized_code, pc_immediate_load_address, replacement_code); +} + + +BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState( + Isolate* isolate, + Code* unoptimized_code, + Address pc) { + static const int kInstrSize = Assembler::kInstrSize; + Address branch_address = pc - 6 * kInstrSize; + Address pc_immediate_load_address = pc - 4 * kInstrSize; + + ASSERT(Assembler::IsBeq(Assembler::instr_at(pc - 5 * kInstrSize))); + if (!Assembler::IsAddImmediate(Assembler::instr_at(branch_address))) { + ASSERT(reinterpret_cast<uint32_t>( + Assembler::target_address_at(pc_immediate_load_address)) == + reinterpret_cast<uint32_t>( + isolate->builtins()->InterruptCheck()->entry())); + return INTERRUPT; + } + + ASSERT(Assembler::IsAddImmediate(Assembler::instr_at(branch_address))); + + if (reinterpret_cast<uint32_t>( + Assembler::target_address_at(pc_immediate_load_address)) == + reinterpret_cast<uint32_t>( + isolate->builtins()->OnStackReplacement()->entry())) { + return ON_STACK_REPLACEMENT; + } + + ASSERT(reinterpret_cast<uint32_t>( + Assembler::target_address_at(pc_immediate_load_address)) == + reinterpret_cast<uint32_t>( + isolate->builtins()->OsrAfterStackCheck()->entry())); + return OSR_AFTER_STACK_CHECK; +} + + } } // namespace v8::internal #endif // V8_TARGET_ARCH_MIPS diff --git a/deps/v8/src/mips/ic-mips.cc b/deps/v8/src/mips/ic-mips.cc index e250e0ee4a..aa2773462c 100644 --- a/deps/v8/src/mips/ic-mips.cc +++ b/deps/v8/src/mips/ic-mips.cc @@ -656,7 +656,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) { // Probe the stub cache. Code::Flags flags = Code::ComputeFlags( - Code::STUB, MONOMORPHIC, Code::kNoExtraICState, + Code::HANDLER, MONOMORPHIC, Code::kNoExtraICState, Code::NORMAL, Code::LOAD_IC); masm->isolate()->stub_cache()->GenerateProbe( masm, flags, a0, a2, a3, t0, t1, t2); @@ -1496,7 +1496,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm, // Get the receiver from the stack and probe the stub cache. Code::Flags flags = Code::ComputeFlags( - Code::STUB, MONOMORPHIC, strict_mode, + Code::HANDLER, MONOMORPHIC, strict_mode, Code::NORMAL, Code::STORE_IC); masm->isolate()->stub_cache()->GenerateProbe( masm, flags, a1, a2, a3, t0, t1, t2); diff --git a/deps/v8/src/mips/lithium-codegen-mips.cc b/deps/v8/src/mips/lithium-codegen-mips.cc index b37c7e0419..f54d4a5b0c 100644 --- a/deps/v8/src/mips/lithium-codegen-mips.cc +++ b/deps/v8/src/mips/lithium-codegen-mips.cc @@ -98,24 +98,6 @@ void LChunkBuilder::Abort(BailoutReason reason) { } -void LCodeGen::Comment(const char* format, ...) { - if (!FLAG_code_comments) return; - char buffer[4 * KB]; - StringBuilder builder(buffer, ARRAY_SIZE(buffer)); - va_list arguments; - va_start(arguments, format); - builder.AddFormattedList(format, arguments); - va_end(arguments); - - // Copy the string before recording it in the assembler to avoid - // issues when the stack allocated buffer goes out of scope. - size_t length = builder.position(); - Vector<char> copy = Vector<char>::New(length + 1); - OS::MemCopy(copy.start(), builder.Finalize(), copy.length()); - masm()->RecordComment(copy.start()); -} - - bool LCodeGen::GeneratePrologue() { ASSERT(is_generating()); @@ -151,21 +133,7 @@ bool LCodeGen::GeneratePrologue() { info()->set_prologue_offset(masm_->pc_offset()); if (NeedsEagerFrame()) { - if (info()->IsStub()) { - __ Push(ra, fp, cp); - __ Push(Smi::FromInt(StackFrame::STUB)); - // Adjust FP to point to saved FP. - __ Addu(fp, sp, Operand(2 * kPointerSize)); - } else { - // The following three instructions must remain together and unmodified - // for code aging to work properly. - __ Push(ra, fp, cp, a1); - // Add unused nop to ensure prologue sequence is identical for - // full-codegen and lithium-codegen. - __ nop(Assembler::CODE_AGE_SEQUENCE_NOP); - // Adj. FP to point to saved FP. - __ Addu(fp, sp, Operand(2 * kPointerSize)); - } + __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME); frame_is_built_ = true; info_->AddNoFrameRange(0, masm_->pc_offset()); } @@ -242,6 +210,8 @@ bool LCodeGen::GeneratePrologue() { // Trace the call. if (FLAG_trace && info()->IsOptimizing()) { + // We have not executed any compiled code yet, so cp still holds the + // incoming context. __ CallRuntime(Runtime::kTraceEnter, 0); } return !is_aborted(); @@ -263,45 +233,15 @@ void LCodeGen::GenerateOsrPrologue() { } -bool LCodeGen::GenerateBody() { - ASSERT(is_generating()); - bool emit_instructions = true; - for (current_instruction_ = 0; - !is_aborted() && current_instruction_ < instructions_->length(); - current_instruction_++) { - LInstruction* instr = instructions_->at(current_instruction_); - - // Don't emit code for basic blocks with a replacement. - if (instr->IsLabel()) { - emit_instructions = !LLabel::cast(instr)->HasReplacement(); - } - if (!emit_instructions) continue; - - if (FLAG_code_comments && instr->HasInterestingComment(this)) { - Comment(";;; <@%d,#%d> %s", - current_instruction_, - instr->hydrogen_value()->id(), - instr->Mnemonic()); - } - - RecordAndUpdatePosition(instr->position()); - - instr->CompileToNative(this); - } - EnsureSpaceForLazyDeopt(); - last_lazy_deopt_pc_ = masm()->pc_offset(); - return !is_aborted(); -} - - bool LCodeGen::GenerateDeferredCode() { ASSERT(is_generating()); if (deferred_.length() > 0) { for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { LDeferredCode* code = deferred_[i]; - int pos = instructions_->at(code->instruction_index())->position(); - RecordAndUpdatePosition(pos); + HValue* value = + instructions_->at(code->instruction_index())->hydrogen_value(); + RecordAndWritePosition(value->position()); Comment(";;; <@%d,#%d> " "-------------------- Deferred %s --------------------", @@ -701,10 +641,8 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code, RelocInfo::Mode mode, LInstruction* instr, SafepointMode safepoint_mode) { - EnsureSpaceForLazyDeopt(); + EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); ASSERT(instr != NULL); - LPointerMap* pointers = instr->pointer_map(); - RecordPosition(pointers->position()); __ Call(code, mode); RecordSafepointWithLazyDeopt(instr, safepoint_mode); } @@ -712,20 +650,36 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code, void LCodeGen::CallRuntime(const Runtime::Function* function, int num_arguments, - LInstruction* instr) { + LInstruction* instr, + SaveFPRegsMode save_doubles) { ASSERT(instr != NULL); - LPointerMap* pointers = instr->pointer_map(); - ASSERT(pointers != NULL); - RecordPosition(pointers->position()); - __ CallRuntime(function, num_arguments); + __ CallRuntime(function, num_arguments, save_doubles); + RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); } +void LCodeGen::LoadContextFromDeferred(LOperand* context) { + if (context->IsRegister()) { + __ Move(cp, ToRegister(context)); + } else if (context->IsStackSlot()) { + __ lw(cp, ToMemOperand(context)); + } else if (context->IsConstantOperand()) { + HConstant* constant = + chunk_->LookupConstant(LConstantOperand::cast(context)); + __ LoadObject(cp, Handle<Object>::cast(constant->handle(isolate()))); + } else { + UNREACHABLE(); + } +} + + void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, int argc, - LInstruction* instr) { + LInstruction* instr, + LOperand* context) { + LoadContextFromDeferred(context); __ CallRuntimeSaveDoubles(id); RecordSafepointWithRegisters( instr->pointer_map(), argc, Safepoint::kNoLazyDeopt); @@ -835,26 +789,31 @@ void LCodeGen::DeoptimizeIf(Condition condition, void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) { ZoneList<Handle<Map> > maps(1, zone()); + ZoneList<Handle<JSObject> > objects(1, zone()); int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT); for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) { - RelocInfo::Mode mode = it.rinfo()->rmode(); - if (mode == RelocInfo::EMBEDDED_OBJECT && - it.rinfo()->target_object()->IsMap()) { - Handle<Map> map(Map::cast(it.rinfo()->target_object())); - if (map->CanTransition()) { + if (Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) { + if (it.rinfo()->target_object()->IsMap()) { + Handle<Map> map(Map::cast(it.rinfo()->target_object())); maps.Add(map, zone()); + } else if (it.rinfo()->target_object()->IsJSObject()) { + Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object())); + objects.Add(object, zone()); } } } #ifdef VERIFY_HEAP - // This disables verification of weak embedded maps after full GC. + // This disables verification of weak embedded objects after full GC. // AddDependentCode can cause a GC, which would observe the state where // this code is not yet in the depended code lists of the embedded maps. - NoWeakEmbeddedMapsVerificationScope disable_verification_of_embedded_maps; + NoWeakObjectVerificationScope disable_verification_of_embedded_objects; #endif for (int i = 0; i < maps.length(); i++) { maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code); } + for (int i = 0; i < objects.length(); i++) { + AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code); + } } @@ -950,10 +909,6 @@ void LCodeGen::RecordSafepoint( safepoint.DefinePointerRegister(ToRegister(pointer), zone()); } } - if (kind & Safepoint::kWithRegisters) { - // Register cp always contains a pointer to the context. - safepoint.DefinePointerRegister(cp, zone()); - } } @@ -964,7 +919,7 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers, void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) { - LPointerMap empty_pointers(RelocInfo::kNoPosition, zone()); + LPointerMap empty_pointers(zone()); RecordSafepoint(&empty_pointers, deopt_mode); } @@ -986,17 +941,10 @@ void LCodeGen::RecordSafepointWithRegistersAndDoubles( } -void LCodeGen::RecordPosition(int position) { +void LCodeGen::RecordAndWritePosition(int position) { if (position == RelocInfo::kNoPosition) return; masm()->positions_recorder()->RecordPosition(position); -} - - -void LCodeGen::RecordAndUpdatePosition(int position) { - if (position >= 0 && position != old_position_) { - masm()->positions_recorder()->RecordPosition(position); - old_position_ = position; - } + masm()->positions_recorder()->WriteRecordedPositions(); } @@ -1046,6 +994,7 @@ void LCodeGen::DoParameter(LParameter* instr) { void LCodeGen::DoCallStub(LCallStub* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); ASSERT(ToRegister(instr->result()).is(v0)); switch (instr->hydrogen()->major_key()) { case CodeStub::RegExpConstructResult: { @@ -1063,11 +1012,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) { CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); break; } - case CodeStub::NumberToString: { - NumberToStringStub stub; - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); - break; - } case CodeStub::StringCompare: { StringCompareStub stub; CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); @@ -1408,11 +1352,11 @@ void LCodeGen::DoMulI(LMulI* instr) { Register left = ToRegister(instr->left()); LOperand* right_op = instr->right(); - bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); bool bailout_on_minus_zero = instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); + bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); - if (right_op->IsConstantOperand() && !can_overflow) { + if (right_op->IsConstantOperand()) { int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); if (bailout_on_minus_zero && (constant < 0)) { @@ -1423,7 +1367,12 @@ void LCodeGen::DoMulI(LMulI* instr) { switch (constant) { case -1: - __ Subu(result, zero_reg, left); + if (overflow) { + __ SubuAndCheckForOverflow(result, zero_reg, left, scratch); + DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg)); + } else { + __ Subu(result, zero_reg, left); + } break; case 0: if (bailout_on_minus_zero) { @@ -1444,27 +1393,23 @@ void LCodeGen::DoMulI(LMulI* instr) { int32_t mask = constant >> 31; uint32_t constant_abs = (constant + mask) ^ mask; - if (IsPowerOf2(constant_abs) || - IsPowerOf2(constant_abs - 1) || - IsPowerOf2(constant_abs + 1)) { - if (IsPowerOf2(constant_abs)) { - int32_t shift = WhichPowerOf2(constant_abs); - __ sll(result, left, shift); - } else if (IsPowerOf2(constant_abs - 1)) { - int32_t shift = WhichPowerOf2(constant_abs - 1); - __ sll(scratch, left, shift); - __ Addu(result, scratch, left); - } else if (IsPowerOf2(constant_abs + 1)) { - int32_t shift = WhichPowerOf2(constant_abs + 1); - __ sll(scratch, left, shift); - __ Subu(result, scratch, left); - } - - // Correct the sign of the result is the constant is negative. - if (constant < 0) { - __ Subu(result, zero_reg, result); - } - + if (IsPowerOf2(constant_abs)) { + int32_t shift = WhichPowerOf2(constant_abs); + __ sll(result, left, shift); + // Correct the sign of the result if the constant is negative. + if (constant < 0) __ Subu(result, zero_reg, result); + } else if (IsPowerOf2(constant_abs - 1)) { + int32_t shift = WhichPowerOf2(constant_abs - 1); + __ sll(scratch, left, shift); + __ Addu(result, scratch, left); + // Correct the sign of the result if the constant is negative. + if (constant < 0) __ Subu(result, zero_reg, result); + } else if (IsPowerOf2(constant_abs + 1)) { + int32_t shift = WhichPowerOf2(constant_abs + 1); + __ sll(scratch, left, shift); + __ Subu(result, scratch, left); + // Correct the sign of the result if the constant is negative. + if (constant < 0) __ Subu(result, zero_reg, result); } else { // Generate standard code. __ li(at, constant); @@ -1473,12 +1418,10 @@ void LCodeGen::DoMulI(LMulI* instr) { } } else { - Register right = EmitLoadRegister(right_op, scratch); - if (bailout_on_minus_zero) { - __ Or(ToRegister(instr->temp()), left, right); - } + ASSERT(right_op->IsRegister()); + Register right = ToRegister(right_op); - if (can_overflow) { + if (overflow) { // hi:lo = left * right. if (instr->hydrogen()->representation().IsSmi()) { __ SmiUntag(result, left); @@ -1502,12 +1445,13 @@ void LCodeGen::DoMulI(LMulI* instr) { } if (bailout_on_minus_zero) { - // Bail out if the result is supposed to be negative zero. Label done; - __ Branch(&done, ne, result, Operand(zero_reg)); - DeoptimizeIf(lt, + __ Xor(at, left, right); + __ Branch(&done, ge, at, Operand(zero_reg)); + // Bail out if the result is minus zero. + DeoptimizeIf(eq, instr->environment(), - ToRegister(instr->temp()), + result, Operand(zero_reg)); __ bind(&done); } @@ -1789,33 +1733,43 @@ void LCodeGen::DoDateField(LDateField* instr) { void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { Register string = ToRegister(instr->string()); - Register index = ToRegister(instr->index()); + LOperand* index_op = instr->index(); Register value = ToRegister(instr->value()); Register scratch = scratch0(); String::Encoding encoding = instr->encoding(); if (FLAG_debug_code) { - __ lw(at, FieldMemOperand(string, HeapObject::kMapOffset)); - __ lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset)); + __ lw(scratch, FieldMemOperand(string, HeapObject::kMapOffset)); + __ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); - __ And(at, at, Operand(kStringRepresentationMask | kStringEncodingMask)); + __ And(scratch, scratch, + Operand(kStringRepresentationMask | kStringEncodingMask)); static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; - __ Subu(at, at, Operand(encoding == String::ONE_BYTE_ENCODING + __ Subu(at, scratch, Operand(encoding == String::ONE_BYTE_ENCODING ? one_byte_seq_type : two_byte_seq_type)); __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg)); } - __ Addu(scratch, - string, - Operand(SeqString::kHeaderSize - kHeapObjectTag)); - if (encoding == String::ONE_BYTE_ENCODING) { - __ Addu(at, scratch, index); - __ sb(value, MemOperand(at)); + if (index_op->IsConstantOperand()) { + int constant_index = ToInteger32(LConstantOperand::cast(index_op)); + if (encoding == String::ONE_BYTE_ENCODING) { + __ sb(value, + FieldMemOperand(string, SeqString::kHeaderSize + constant_index)); + } else { + __ sh(value, + FieldMemOperand(string, SeqString::kHeaderSize + constant_index * 2)); + } } else { - __ sll(at, index, 1); - __ Addu(at, scratch, at); - __ sh(value, MemOperand(at)); + Register index = ToRegister(index_op); + if (encoding == String::ONE_BYTE_ENCODING) { + __ Addu(scratch, string, Operand(index)); + __ sb(value, FieldMemOperand(scratch, SeqString::kHeaderSize)); + } else { + __ sll(scratch, index, 1); + __ Addu(scratch, string, scratch); + __ sh(value, FieldMemOperand(scratch, SeqString::kHeaderSize)); + } } } @@ -1823,6 +1777,7 @@ void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { void LCodeGen::DoThrow(LThrow* instr) { Register input_reg = EmitLoadRegister(instr->value(), at); __ push(input_reg); + ASSERT(ToRegister(instr->context()).is(cp)); CallRuntime(Runtime::kThrow, 1, instr); if (FLAG_debug_code) { @@ -1974,6 +1929,7 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) { void LCodeGen::DoArithmeticT(LArithmeticT* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); ASSERT(ToRegister(instr->left()).is(a1)); ASSERT(ToRegister(instr->right()).is(a0)); ASSERT(ToRegister(instr->result()).is(v0)); @@ -1986,13 +1942,6 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) { } -int LCodeGen::GetNextEmittedBlock() const { - for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) { - if (!chunk_->GetLabel(i)->HasReplacement()) return i; - } - return -1; -} - template<class InstrType> void LCodeGen::EmitBranch(InstrType instr, Condition condition, @@ -2057,25 +2006,6 @@ void LCodeGen::DoDebugBreak(LDebugBreak* instr) { } -void LCodeGen::DoIsNumberAndBranch(LIsNumberAndBranch* instr) { - Representation r = instr->hydrogen()->value()->representation(); - if (r.IsSmiOrInteger32() || r.IsDouble()) { - EmitBranch(instr, al, zero_reg, Operand(zero_reg)); - } else { - ASSERT(r.IsTagged()); - Register reg = ToRegister(instr->value()); - HType type = instr->hydrogen()->value()->type(); - if (type.IsTaggedNumber()) { - EmitBranch(instr, al, zero_reg, Operand(zero_reg)); - } - __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); - __ lw(scratch0(), FieldMemOperand(reg, HeapObject::kMapOffset)); - __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); - EmitBranch(instr, eq, scratch0(), Operand(at)); - } -} - - void LCodeGen::DoBranch(LBranch* instr) { Representation r = instr->hydrogen()->value()->representation(); if (r.IsInteger32() || r.IsSmi()) { @@ -2223,6 +2153,10 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { case Token::EQ_STRICT: cond = eq; break; + case Token::NE: + case Token::NE_STRICT: + cond = ne; + break; case Token::LT: cond = is_unsigned ? lo : lt; break; @@ -2439,6 +2373,7 @@ static Condition ComputeCompareCondition(Token::Value op) { void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); Token::Value op = instr->op(); Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); @@ -2598,6 +2533,7 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { void LCodeGen::DoInstanceOf(LInstanceOf* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); Label true_label, done; ASSERT(ToRegister(instr->left()).is(a0)); // Object is in a0. ASSERT(ToRegister(instr->right()).is(a1)); // Function is in a1. @@ -2708,6 +2644,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, InstanceofStub stub(flags); PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); + LoadContextFromDeferred(instr->context()); // Get the temp register reserved by the instruction. This needs to be t0 as // its slot of the pushing of safepoint registers is used to communicate the @@ -2736,15 +2673,8 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, } -void LCodeGen::DoInstanceSize(LInstanceSize* instr) { - Register object = ToRegister(instr->object()); - Register result = ToRegister(instr->result()); - __ lw(result, FieldMemOperand(object, HeapObject::kMapOffset)); - __ lbu(result, FieldMemOperand(result, Map::kInstanceSizeOffset)); -} - - void LCodeGen::DoCmpT(LCmpT* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); Token::Value op = instr->op(); Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); @@ -2768,8 +2698,11 @@ void LCodeGen::DoCmpT(LCmpT* instr) { void LCodeGen::DoReturn(LReturn* instr) { if (FLAG_trace && info()->IsOptimizing()) { // Push the return value on the stack as the parameter. - // Runtime::TraceExit returns its parameter in v0. + // Runtime::TraceExit returns its parameter in v0. We're leaving the code + // managed by the register allocator and tearing down the frame, it's + // safe to write to the context register. __ push(v0); + __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); __ CallRuntime(Runtime::kTraceExit, 1); } if (info()->saves_caller_doubles()) { @@ -2814,7 +2747,7 @@ void LCodeGen::DoReturn(LReturn* instr) { void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { Register result = ToRegister(instr->result()); - __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell()))); + __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell().handle()))); __ lw(result, FieldMemOperand(at, Cell::kValueOffset)); if (instr->hydrogen()->RequiresHoleCheck()) { __ LoadRoot(at, Heap::kTheHoleValueRootIndex); @@ -2824,6 +2757,7 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); ASSERT(ToRegister(instr->global_object()).is(a0)); ASSERT(ToRegister(instr->result()).is(v0)); @@ -2840,7 +2774,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { Register cell = scratch0(); // Load the cell. - __ li(cell, Operand(instr->hydrogen()->cell())); + __ li(cell, Operand(instr->hydrogen()->cell().handle())); // If the cell we are storing to contains the hole it could have // been deleted from the property dictionary. In that case, we need @@ -2861,6 +2795,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); ASSERT(ToRegister(instr->global_object()).is(a1)); ASSERT(ToRegister(instr->value()).is(a0)); @@ -2937,7 +2872,12 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { if (access.IsExternalMemory()) { Register result = ToRegister(instr->result()); - __ lw(result, MemOperand(object, offset)); + MemOperand operand = MemOperand(object, offset); + if (access.representation().IsByte()) { + __ lb(result, operand); + } else { + __ lw(result, operand); + } return; } @@ -2948,16 +2888,21 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { } Register result = ToRegister(instr->result()); - if (access.IsInobject()) { - __ lw(result, FieldMemOperand(object, offset)); - } else { + if (!access.IsInobject()) { __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); - __ lw(result, FieldMemOperand(result, offset)); + object = result; + } + MemOperand operand = FieldMemOperand(object, offset); + if (access.representation().IsByte()) { + __ lb(result, operand); + } else { + __ lw(result, operand); } } void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); ASSERT(ToRegister(instr->object()).is(a0)); ASSERT(ToRegister(instr->result()).is(v0)); @@ -3011,6 +2956,12 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { } +void LCodeGen::DoLoadRoot(LLoadRoot* instr) { + Register result = ToRegister(instr->result()); + __ LoadRoot(result, instr->index()); +} + + void LCodeGen::DoLoadExternalArrayPointer( LLoadExternalArrayPointer* instr) { Register to_reg = ToRegister(instr->result()); @@ -3132,28 +3083,31 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { Register scratch = scratch0(); int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); - int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) - ? (element_size_shift - kSmiTagSize) : element_size_shift; - int constant_key = 0; + + int base_offset = + FixedDoubleArray::kHeaderSize - kHeapObjectTag + + (instr->additional_index() << element_size_shift); if (key_is_constant) { - constant_key = ToInteger32(LConstantOperand::cast(instr->key())); + int constant_key = ToInteger32(LConstantOperand::cast(instr->key())); if (constant_key & 0xF0000000) { Abort(kArrayIndexConstantValueTooBig); } - } else { - key = ToRegister(instr->key()); + base_offset += constant_key << element_size_shift; } + __ Addu(scratch, elements, Operand(base_offset)); - int base_offset = (FixedDoubleArray::kHeaderSize - kHeapObjectTag) + - ((constant_key + instr->additional_index()) << element_size_shift); if (!key_is_constant) { - __ sll(scratch, key, shift_size); - __ Addu(elements, elements, scratch); + key = ToRegister(instr->key()); + int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) + ? (element_size_shift - kSmiTagSize) : element_size_shift; + __ sll(at, key, shift_size); + __ Addu(scratch, scratch, at); } - __ Addu(elements, elements, Operand(base_offset)); - __ ldc1(result, MemOperand(elements)); + + __ ldc1(result, MemOperand(scratch)); + if (instr->hydrogen()->RequiresHoleCheck()) { - __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); + __ lw(scratch, MemOperand(scratch, sizeof(kHoleNanLower32))); DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32)); } } @@ -3172,7 +3126,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { instr->additional_index()); store_base = elements; } else { - Register key = EmitLoadRegister(instr->key(), scratch0()); + Register key = ToRegister(instr->key()); // Even though the HLoadKeyed instruction forces the input // representation for the key to be an integer, the input gets replaced // during bound check elimination with the index argument to the bounds @@ -3257,6 +3211,7 @@ MemOperand LCodeGen::PrepareKeyedOperand(Register key, void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); ASSERT(ToRegister(instr->object()).is(a1)); ASSERT(ToRegister(instr->key()).is(a0)); @@ -3394,7 +3349,6 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) { __ bind(&invoke); ASSERT(instr->HasPointerMap()); LPointerMap* pointers = instr->pointer_map(); - RecordPosition(pointers->position()); SafepointGenerator safepoint_generator( this, pointers, Safepoint::kLazyDeopt); // The number of arguments is stored in receiver which is a0, as expected @@ -3402,7 +3356,6 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) { ParameterCount actual(receiver); __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator, CALL_AS_METHOD); - __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); } @@ -3431,11 +3384,11 @@ void LCodeGen::DoThisFunction(LThisFunction* instr) { void LCodeGen::DoContext(LContext* instr) { // If there is a non-return use, the context must be moved to a register. Register result = ToRegister(instr->result()); - for (HUseIterator it(instr->hydrogen()->uses()); !it.Done(); it.Advance()) { - if (!it.value()->IsReturn()) { - __ mov(result, cp); - return; - } + if (info()->IsOptimizing()) { + __ lw(result, MemOperand(fp, StandardFrameConstants::kContextOffset)); + } else { + // If there is no frame, the context must be in cp. + ASSERT(result.is(cp)); } } @@ -3449,6 +3402,7 @@ void LCodeGen::DoOuterContext(LOuterContext* instr) { void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); __ LoadHeapObject(scratch0(), instr->hydrogen()->pairs()); __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags()))); // The context is the first argument. @@ -3458,8 +3412,9 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { void LCodeGen::DoGlobalObject(LGlobalObject* instr) { + Register context = ToRegister(instr->context()); Register result = ToRegister(instr->result()); - __ lw(result, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); + __ lw(result, ContextOperand(context, Context::GLOBAL_OBJECT_INDEX)); } @@ -3482,7 +3437,6 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function, dont_adapt_arguments || formal_parameter_count == arity; LPointerMap* pointers = instr->pointer_map(); - RecordPosition(pointers->position()); if (can_invoke_directly) { if (a1_state == A1_UNINITIALIZED) { @@ -3512,9 +3466,6 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function, __ InvokeFunction( function, expected, count, CALL_FUNCTION, generator, call_kind); } - - // Restore context. - __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); } @@ -3531,6 +3482,8 @@ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) { void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { + ASSERT(instr->context() != NULL); + ASSERT(ToRegister(instr->context()).is(cp)); Register input = ToRegister(instr->value()); Register result = ToRegister(instr->result()); Register scratch = scratch0(); @@ -3572,7 +3525,8 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { // Slow case: Call the runtime system to do the number allocation. __ bind(&slow); - CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); + CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr, + instr->context()); // Set the pointer to the new heap number in tmp. if (!tmp1.is(v0)) __ mov(tmp1, v0); @@ -3890,6 +3844,9 @@ void LCodeGen::DoMathExp(LMathExp* instr) { void LCodeGen::DoMathLog(LMathLog* instr) { ASSERT(ToDoubleRegister(instr->result()).is(f4)); + // Set the context register to a GC-safe fake value. Clobbering it is + // OK because this instruction is marked as a call. + __ mov(cp, zero_reg); TranscendentalCacheStub stub(TranscendentalCache::LOG, TranscendentalCacheStub::UNTAGGED); CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); @@ -3898,6 +3855,9 @@ void LCodeGen::DoMathLog(LMathLog* instr) { void LCodeGen::DoMathTan(LMathTan* instr) { ASSERT(ToDoubleRegister(instr->result()).is(f4)); + // Set the context register to a GC-safe fake value. Clobbering it is + // OK because this instruction is marked as a call. + __ mov(cp, zero_reg); TranscendentalCacheStub stub(TranscendentalCache::TAN, TranscendentalCacheStub::UNTAGGED); CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); @@ -3906,6 +3866,9 @@ void LCodeGen::DoMathTan(LMathTan* instr) { void LCodeGen::DoMathCos(LMathCos* instr) { ASSERT(ToDoubleRegister(instr->result()).is(f4)); + // Set the context register to a GC-safe fake value. Clobbering it is + // OK because this instruction is marked as a call. + __ mov(cp, zero_reg); TranscendentalCacheStub stub(TranscendentalCache::COS, TranscendentalCacheStub::UNTAGGED); CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); @@ -3914,6 +3877,9 @@ void LCodeGen::DoMathCos(LMathCos* instr) { void LCodeGen::DoMathSin(LMathSin* instr) { ASSERT(ToDoubleRegister(instr->result()).is(f4)); + // Set the context register to a GC-safe fake value. Clobbering it is + // OK because this instruction is marked as a call. + __ mov(cp, zero_reg); TranscendentalCacheStub stub(TranscendentalCache::SIN, TranscendentalCacheStub::UNTAGGED); CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); @@ -3921,17 +3887,16 @@ void LCodeGen::DoMathSin(LMathSin* instr) { void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); ASSERT(ToRegister(instr->function()).is(a1)); ASSERT(instr->HasPointerMap()); Handle<JSFunction> known_function = instr->hydrogen()->known_function(); if (known_function.is_null()) { LPointerMap* pointers = instr->pointer_map(); - RecordPosition(pointers->position()); SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); ParameterCount count(instr->arity()); __ InvokeFunction(a1, count, CALL_FUNCTION, generator, CALL_AS_METHOD); - __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); } else { CallKnownFunction(known_function, instr->hydrogen()->formal_parameter_count(), @@ -3944,17 +3909,18 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { void LCodeGen::DoCallKeyed(LCallKeyed* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); ASSERT(ToRegister(instr->result()).is(v0)); int arity = instr->arity(); Handle<Code> ic = isolate()->stub_cache()->ComputeKeyedCallInitialize(arity); CallCode(ic, RelocInfo::CODE_TARGET, instr); - __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); } void LCodeGen::DoCallNamed(LCallNamed* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); ASSERT(ToRegister(instr->result()).is(v0)); int arity = instr->arity(); @@ -3963,23 +3929,22 @@ void LCodeGen::DoCallNamed(LCallNamed* instr) { isolate()->stub_cache()->ComputeCallInitialize(arity, mode); __ li(a2, Operand(instr->name())); CallCode(ic, mode, instr); - // Restore context register. - __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); } void LCodeGen::DoCallFunction(LCallFunction* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); ASSERT(ToRegister(instr->function()).is(a1)); ASSERT(ToRegister(instr->result()).is(v0)); int arity = instr->arity(); CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS); CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); - __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); } void LCodeGen::DoCallGlobal(LCallGlobal* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); ASSERT(ToRegister(instr->result()).is(v0)); int arity = instr->arity(); @@ -3988,7 +3953,6 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) { isolate()->stub_cache()->ComputeCallInitialize(arity, mode); __ li(a2, Operand(instr->name())); CallCode(ic, mode, instr); - __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); } @@ -4004,6 +3968,7 @@ void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) { void LCodeGen::DoCallNew(LCallNew* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); ASSERT(ToRegister(instr->constructor()).is(a1)); ASSERT(ToRegister(instr->result()).is(v0)); @@ -4017,6 +3982,7 @@ void LCodeGen::DoCallNew(LCallNew* instr) { void LCodeGen::DoCallNewArray(LCallNewArray* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); ASSERT(ToRegister(instr->constructor()).is(a1)); ASSERT(ToRegister(instr->result()).is(v0)); @@ -4091,7 +4057,12 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { if (access.IsExternalMemory()) { Register value = ToRegister(instr->value()); - __ sw(value, MemOperand(object, offset)); + MemOperand operand = MemOperand(object, offset); + if (representation.IsByte()) { + __ sb(value, operand); + } else { + __ sw(value, operand); + } return; } @@ -4136,7 +4107,12 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { instr->hydrogen()->value()->IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; if (access.IsInobject()) { - __ sw(value, FieldMemOperand(object, offset)); + MemOperand operand = FieldMemOperand(object, offset); + if (representation.IsByte()) { + __ sb(value, operand); + } else { + __ sw(value, operand); + } if (instr->hydrogen()->NeedsWriteBarrier()) { // Update the write barrier for the object for in-object properties. __ RecordWriteField(object, @@ -4150,7 +4126,12 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { } } else { __ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset)); - __ sw(value, FieldMemOperand(scratch, offset)); + MemOperand operand = FieldMemOperand(scratch, offset); + if (representation.IsByte()) { + __ sb(value, operand); + } else { + __ sw(value, operand); + } if (instr->hydrogen()->NeedsWriteBarrier()) { // Update the write barrier for the properties array. // object is used as a scratch register. @@ -4168,6 +4149,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); ASSERT(ToRegister(instr->object()).is(a1)); ASSERT(ToRegister(instr->value()).is(a0)); @@ -4241,20 +4223,25 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { + Register address = scratch0(); FPURegister value(ToDoubleRegister(instr->value())); if (key_is_constant) { - __ Addu(scratch0(), external_pointer, constant_key << - element_size_shift); + if (constant_key != 0) { + __ Addu(address, external_pointer, + Operand(constant_key << element_size_shift)); + } else { + address = external_pointer; + } } else { - __ sll(scratch0(), key, shift_size); - __ Addu(scratch0(), scratch0(), external_pointer); + __ sll(address, key, shift_size); + __ Addu(address, external_pointer, address); } if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { __ cvt_s_d(double_scratch0(), value); - __ swc1(double_scratch0(), MemOperand(scratch0(), additional_offset)); + __ swc1(double_scratch0(), MemOperand(address, additional_offset)); } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS - __ sdc1(value, MemOperand(scratch0(), additional_offset)); + __ sdc1(value, MemOperand(address, additional_offset)); } } else { Register value(ToRegister(instr->value())); @@ -4296,33 +4283,29 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { DoubleRegister value = ToDoubleRegister(instr->value()); Register elements = ToRegister(instr->elements()); - Register key = no_reg; Register scratch = scratch0(); + DoubleRegister double_scratch = double_scratch0(); bool key_is_constant = instr->key()->IsConstantOperand(); - int constant_key = 0; - Label not_nan; + Label not_nan, done; // Calculate the effective address of the slot in the array to store the // double value. + int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); if (key_is_constant) { - constant_key = ToInteger32(LConstantOperand::cast(instr->key())); + int constant_key = ToInteger32(LConstantOperand::cast(instr->key())); if (constant_key & 0xF0000000) { Abort(kArrayIndexConstantValueTooBig); } + __ Addu(scratch, elements, + Operand((constant_key << element_size_shift) + + FixedDoubleArray::kHeaderSize - kHeapObjectTag)); } else { - key = ToRegister(instr->key()); - } - int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); - int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) - ? (element_size_shift - kSmiTagSize) : element_size_shift; - if (key_is_constant) { - __ Addu(scratch, elements, Operand((constant_key << element_size_shift) + - FixedDoubleArray::kHeaderSize - kHeapObjectTag)); - } else { - __ sll(scratch, key, shift_size); - __ Addu(scratch, elements, Operand(scratch)); - __ Addu(scratch, scratch, + int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) + ? (element_size_shift - kSmiTagSize) : element_size_shift; + __ Addu(scratch, elements, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); + __ sll(at, ToRegister(instr->key()), shift_size); + __ Addu(scratch, scratch, at); } if (instr->NeedsCanonicalization()) { @@ -4333,12 +4316,17 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { // Only load canonical NaN if the comparison above set the overflow. __ bind(&is_nan); - __ Move(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double()); + __ Move(double_scratch, + FixedDoubleArray::canonical_not_the_hole_nan_as_double()); + __ sdc1(double_scratch, MemOperand(scratch, instr->additional_index() << + element_size_shift)); + __ Branch(&done); } __ bind(¬_nan); __ sdc1(value, MemOperand(scratch, instr->additional_index() << element_size_shift)); + __ bind(&done); } @@ -4404,6 +4392,7 @@ void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) { void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); ASSERT(ToRegister(instr->object()).is(a2)); ASSERT(ToRegister(instr->key()).is(a1)); ASSERT(ToRegister(instr->value()).is(a0)); @@ -4436,6 +4425,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg, scratch, GetRAState(), kDontSaveFPRegs); } else { + ASSERT(ToRegister(instr->context()).is(cp)); PushSafepointRegistersScope scope( this, Safepoint::kWithRegistersAndDoubles); __ mov(a0, object_reg); @@ -4452,14 +4442,16 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { Register object = ToRegister(instr->object()); Register temp = ToRegister(instr->temp()); - Label fail; - __ TestJSArrayForAllocationMemento(object, temp, ne, &fail); + Label no_memento_found; + __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found, + ne, &no_memento_found); DeoptimizeIf(al, instr->environment()); - __ bind(&fail); + __ bind(&no_memento_found); } void LCodeGen::DoStringAdd(LStringAdd* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); __ push(ToRegister(instr->left())); __ push(ToRegister(instr->right())); StringAddStub stub(instr->hydrogen()->flags()); @@ -4514,7 +4506,8 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { __ SmiTag(index); __ push(index); } - CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr); + CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr, + instr->context()); __ AssertSmi(v0); __ SmiUntag(v0); __ StoreToSafepointRegisterSlot(v0, result); @@ -4567,7 +4560,7 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) { PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); __ SmiTag(char_code); __ push(char_code); - CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr); + CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context()); __ StoreToSafepointRegisterSlot(v0, result); } @@ -4707,7 +4700,15 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, // register is stored, as this register is in the pointer map, but contains an // integer value. __ StoreToSafepointRegisterSlot(zero_reg, dst); - CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); + // NumberTagI and NumberTagD use the context from the frame, rather than + // the environment's HContext or HInlinedContext value. + // They only call Runtime::kAllocateHeapNumber. + // The corresponding HChange instructions are added in a phase that does + // not have easy access to the local context. + __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); + __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); + RecordSafepointWithRegisters( + instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); __ Move(dst, v0); __ Subu(dst, dst, kHeapObjectTag); @@ -4763,7 +4764,15 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { __ mov(reg, zero_reg); PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); - CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); + // NumberTagI and NumberTagD use the context from the frame, rather than + // the environment's HContext or HInlinedContext value. + // They only call Runtime::kAllocateHeapNumber. + // The corresponding HChange instructions are added in a phase that does + // not have easy access to the local context. + __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); + __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); + RecordSafepointWithRegisters( + instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); __ Subu(v0, v0, kHeapObjectTag); __ StoreToSafepointRegisterSlot(v0, reg); } @@ -4798,34 +4807,19 @@ void LCodeGen::EmitNumberUntagD(Register input_reg, LEnvironment* env, NumberUntagDMode mode) { Register scratch = scratch0(); - - Label load_smi, heap_number, done; - + Label convert, load_smi, done; if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { // Smi check. __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); - // Heap number map check. __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); - if (!can_convert_undefined_to_nan) { - DeoptimizeIf(ne, env, scratch, Operand(at)); + if (can_convert_undefined_to_nan) { + __ Branch(&convert, ne, scratch, Operand(at)); } else { - Label heap_number, convert; - __ Branch(&heap_number, eq, scratch, Operand(at)); - - // Convert undefined (and hole) to NaN. - __ LoadRoot(at, Heap::kUndefinedValueRootIndex); - DeoptimizeIf(ne, env, input_reg, Operand(at)); - - __ bind(&convert); - __ LoadRoot(at, Heap::kNanValueRootIndex); - __ ldc1(result_reg, FieldMemOperand(at, HeapNumber::kValueOffset)); - __ Branch(&done); - - __ bind(&heap_number); + DeoptimizeIf(ne, env, scratch, Operand(at)); } - // Heap number to double register conversion. + // Load heap number. __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); if (deoptimize_on_minus_zero) { __ mfc1(at, result_reg.low()); @@ -4834,11 +4828,19 @@ void LCodeGen::EmitNumberUntagD(Register input_reg, DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask)); } __ Branch(&done); + if (can_convert_undefined_to_nan) { + __ bind(&convert); + // Convert undefined (and hole) to NaN. + __ LoadRoot(at, Heap::kUndefinedValueRootIndex); + DeoptimizeIf(ne, env, input_reg, Operand(at)); + __ LoadRoot(scratch, Heap::kNanValueRootIndex); + __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset)); + __ Branch(&done); + } } else { __ SmiUntag(scratch, input_reg); ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); } - // Smi to double register conversion __ bind(&load_smi); // scratch: untagged value of input_reg @@ -4870,19 +4872,32 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { if (instr->truncating()) { // Performs a truncating conversion of a floating point number as used by // the JS bitwise operations. - Label heap_number; - __ Branch(&heap_number, eq, scratch1, Operand(at)); // HeapNumber map? - // Check for undefined. Undefined is converted to zero for truncating - // conversions. + Label no_heap_number, check_bools, check_false; + __ Branch(&no_heap_number, ne, scratch1, Operand(at)); // HeapNumber map? + __ mov(scratch2, input_reg); + __ TruncateHeapNumberToI(input_reg, scratch2); + __ Branch(&done); + + // Check for Oddballs. Undefined/False is converted to zero and True to one + // for truncating conversions. + __ bind(&no_heap_number); __ LoadRoot(at, Heap::kUndefinedValueRootIndex); - DeoptimizeIf(ne, instr->environment(), input_reg, Operand(at)); + __ Branch(&check_bools, ne, input_reg, Operand(at)); ASSERT(ToRegister(instr->result()).is(input_reg)); - __ mov(input_reg, zero_reg); - __ Branch(&done); + __ Branch(USE_DELAY_SLOT, &done); + __ mov(input_reg, zero_reg); // In delay slot. - __ bind(&heap_number); - __ mov(scratch2, input_reg); - __ TruncateHeapNumberToI(input_reg, scratch2); + __ bind(&check_bools); + __ LoadRoot(at, Heap::kTrueValueRootIndex); + __ Branch(&check_false, ne, scratch2, Operand(at)); + __ Branch(USE_DELAY_SLOT, &done); + __ li(input_reg, Operand(1)); // In delay slot. + + __ bind(&check_false); + __ LoadRoot(at, Heap::kFalseValueRootIndex); + DeoptimizeIf(ne, instr->environment(), scratch2, Operand(at)); + __ Branch(USE_DELAY_SLOT, &done); + __ mov(input_reg, zero_reg); // In delay slot. } else { // Deoptimize if we don't have a heap number. DeoptimizeIf(ne, instr->environment(), scratch1, Operand(at)); @@ -4934,14 +4949,18 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) { Register input_reg = ToRegister(input); - DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr); + if (instr->hydrogen()->value()->representation().IsSmi()) { + __ SmiUntag(input_reg); + } else { + DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr); - // Let the deferred code handle the HeapObject case. - __ JumpIfNotSmi(input_reg, deferred->entry()); + // Let the deferred code handle the HeapObject case. + __ JumpIfNotSmi(input_reg, deferred->entry()); - // Smi to int32 conversion. - __ SmiUntag(input_reg); - __ bind(deferred->exit()); + // Smi to int32 conversion. + __ SmiUntag(input_reg); + __ bind(deferred->exit()); + } } @@ -5091,7 +5110,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { void LCodeGen::DoCheckValue(LCheckValue* instr) { Register reg = ToRegister(instr->value()); - Handle<HeapObject> object = instr->hydrogen()->object(); + Handle<HeapObject> object = instr->hydrogen()->object().handle(); AllowDeferredHandleDereference smi_check; if (isolate()->heap()->InNewSpace(*object)) { Register reg = ToRegister(instr->value()); @@ -5111,7 +5130,10 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { { PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); __ push(object); - CallRuntimeFromDeferred(Runtime::kMigrateInstance, 1, instr); + __ mov(cp, zero_reg); + __ CallRuntimeSaveDoubles(Runtime::kMigrateInstance); + RecordSafepointWithRegisters( + instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); __ StoreToSafepointRegisterSlot(v0, scratch0()); } __ And(at, scratch0(), Operand(kSmiTagMask)); @@ -5142,7 +5164,6 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) { LOperand* input = instr->value(); ASSERT(input->IsRegister()); Register reg = ToRegister(input); - SmallMapList* map_set = instr->hydrogen()->map_set(); __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); DeferredCheckMaps* deferred = NULL; @@ -5151,12 +5172,13 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) { __ bind(deferred->check_maps()); } + UniqueSet<Map> map_set = instr->hydrogen()->map_set(); Label success; - for (int i = 0; i < map_set->length() - 1; i++) { - Handle<Map> map = map_set->at(i); + for (int i = 0; i < map_set.size() - 1; i++) { + Handle<Map> map = map_set.at(i).handle(); __ CompareMapAndBranch(map_reg, map, &success, eq, &success); } - Handle<Map> map = map_set->last(); + Handle<Map> map = map_set.at(map_set.size() - 1).handle(); // Do the CompareMap() directly within the Branch() and DeoptimizeIf(). if (instr->hydrogen()->has_migration_target()) { __ Branch(deferred->entry(), ne, map_reg, Operand(map)); @@ -5309,12 +5331,15 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) { if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation()); ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); - CallRuntimeFromDeferred(Runtime::kAllocateInOldPointerSpace, 1, instr); + CallRuntimeFromDeferred(Runtime::kAllocateInOldPointerSpace, 1, instr, + instr->context()); } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); - CallRuntimeFromDeferred(Runtime::kAllocateInOldDataSpace, 1, instr); + CallRuntimeFromDeferred(Runtime::kAllocateInOldDataSpace, 1, instr, + instr->context()); } else { - CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr); + CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr, + instr->context()); } __ StoreToSafepointRegisterSlot(v0, result); } @@ -5329,6 +5354,7 @@ void LCodeGen::DoToFastProperties(LToFastProperties* instr) { void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); Label materialized; // Registers will be used as follows: // t3 = literals array. @@ -5381,6 +5407,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { + ASSERT(ToRegister(instr->context()).is(cp)); // Use the fast case closure allocation code that allocates in new // space for nested functions that don't need literals cloning. bool pretenure = instr->hydrogen()->pretenure(); @@ -5563,14 +5590,13 @@ void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) { } -void LCodeGen::EnsureSpaceForLazyDeopt() { +void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { if (info()->IsStub()) return; // Ensure that we have enough space after the previous lazy-bailout // instruction for patching the code here. int current_pc = masm()->pc_offset(); - int patch_size = Deoptimizer::patch_size(); - if (current_pc < last_lazy_deopt_pc_ + patch_size) { - int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc; + if (current_pc < last_lazy_deopt_pc_ + space_needed) { + int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; ASSERT_EQ(0, padding_size % Assembler::kInstrSize); while (padding_size > 0) { __ nop(); @@ -5581,7 +5607,7 @@ void LCodeGen::EnsureSpaceForLazyDeopt() { void LCodeGen::DoLazyBailout(LLazyBailout* instr) { - EnsureSpaceForLazyDeopt(); + EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); last_lazy_deopt_pc_ = masm()->pc_offset(); ASSERT(instr->HasEnvironment()); LEnvironment* env = instr->environment(); @@ -5612,6 +5638,7 @@ void LCodeGen::DoDummyUse(LDummyUse* instr) { void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); + LoadContextFromDeferred(instr->context()); __ CallRuntimeSaveDoubles(Runtime::kStackGuard); RecordSafepointWithLazyDeopt( instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); @@ -5643,10 +5670,12 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) { Label done; __ LoadRoot(at, Heap::kStackLimitRootIndex); __ Branch(&done, hs, sp, Operand(at)); + ASSERT(instr->context()->IsRegister()); + ASSERT(ToRegister(instr->context()).is(cp)); CallCode(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET, instr); - EnsureSpaceForLazyDeopt(); + EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); last_lazy_deopt_pc_ = masm()->pc_offset(); __ bind(&done); RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); @@ -5658,7 +5687,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) { new(zone()) DeferredStackCheck(this, instr); __ LoadRoot(at, Heap::kStackLimitRootIndex); __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at)); - EnsureSpaceForLazyDeopt(); + EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); last_lazy_deopt_pc_ = masm()->pc_offset(); __ bind(instr->done_label()); deferred_stack_check->SetExit(instr->done_label()); diff --git a/deps/v8/src/mips/lithium-codegen-mips.h b/deps/v8/src/mips/lithium-codegen-mips.h index 84105cae35..f643d02191 100644 --- a/deps/v8/src/mips/lithium-codegen-mips.h +++ b/deps/v8/src/mips/lithium-codegen-mips.h @@ -31,6 +31,7 @@ #include "deoptimizer.h" #include "mips/lithium-gap-resolver-mips.h" #include "mips/lithium-mips.h" +#include "lithium-codegen.h" #include "safepoint-table.h" #include "scopes.h" #include "v8utils.h" @@ -42,43 +43,26 @@ namespace internal { class LDeferredCode; class SafepointGenerator; -class LCodeGen V8_FINAL BASE_EMBEDDED { +class LCodeGen: public LCodeGenBase { public: LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) - : zone_(info->zone()), - chunk_(static_cast<LPlatformChunk*>(chunk)), - masm_(assembler), - info_(info), - current_block_(-1), - current_instruction_(-1), - instructions_(chunk->instructions()), + : LCodeGenBase(chunk, assembler, info), deoptimizations_(4, info->zone()), deopt_jump_table_(4, info->zone()), deoptimization_literals_(8, info->zone()), inlined_function_count_(0), scope_(info->scope()), - status_(UNUSED), translations_(info->zone()), deferred_(8, info->zone()), osr_pc_offset_(-1), - last_lazy_deopt_pc_(0), frame_is_built_(false), safepoints_(info->zone()), resolver_(this), - expected_safepoint_kind_(Safepoint::kSimple), - old_position_(RelocInfo::kNoPosition) { + expected_safepoint_kind_(Safepoint::kSimple) { PopulateDeoptimizationLiteralsWithInlinedFunctions(); } - // Simple accessors. - MacroAssembler* masm() const { return masm_; } - CompilationInfo* info() const { return info_; } - Isolate* isolate() const { return info_->isolate(); } - Factory* factory() const { return isolate()->factory(); } - Heap* heap() const { return isolate()->heap(); } - Zone* zone() const { return zone_; } - int LookupDestination(int block_id) const { return chunk()->LookupDestination(block_id); } @@ -177,31 +161,16 @@ class LCodeGen V8_FINAL BASE_EMBEDDED { #undef DECLARE_DO private: - enum Status { - UNUSED, - GENERATING, - DONE, - ABORTED - }; - - bool is_unused() const { return status_ == UNUSED; } - bool is_generating() const { return status_ == GENERATING; } - bool is_done() const { return status_ == DONE; } - bool is_aborted() const { return status_ == ABORTED; } - StrictModeFlag strict_mode_flag() const { return info()->is_classic_mode() ? kNonStrictMode : kStrictMode; } - LPlatformChunk* chunk() const { return chunk_; } Scope* scope() const { return scope_; } - HGraph* graph() const { return chunk()->graph(); } Register scratch0() { return kLithiumScratchReg; } Register scratch1() { return kLithiumScratchReg2; } DoubleRegister double_scratch0() { return kLithiumScratchDouble; } - int GetNextEmittedBlock() const; LInstruction* GetNextInstruction(); void EmitClassOfTest(Label* if_true, @@ -214,14 +183,12 @@ class LCodeGen V8_FINAL BASE_EMBEDDED { int GetStackSlotCount() const { return chunk()->spill_slot_count(); } void Abort(BailoutReason reason); - void FPRINTF_CHECKING Comment(const char* format, ...); void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } // Code generation passes. Returns true if code generation should // continue. bool GeneratePrologue(); - bool GenerateBody(); bool GenerateDeferredCode(); bool GenerateDeoptJumpTable(); bool GenerateSafepointTable(); @@ -245,7 +212,8 @@ class LCodeGen V8_FINAL BASE_EMBEDDED { void CallRuntime(const Runtime::Function* function, int num_arguments, - LInstruction* instr); + LInstruction* instr, + SaveFPRegsMode save_doubles = kDontSaveFPRegs); void CallRuntime(Runtime::FunctionId id, int num_arguments, @@ -254,9 +222,11 @@ class LCodeGen V8_FINAL BASE_EMBEDDED { CallRuntime(function, num_arguments, instr); } + void LoadContextFromDeferred(LOperand* context); void CallRuntimeFromDeferred(Runtime::FunctionId id, int argc, - LInstruction* instr); + LInstruction* instr, + LOperand* context); enum A1State { A1_UNINITIALIZED, @@ -324,8 +294,8 @@ class LCodeGen V8_FINAL BASE_EMBEDDED { void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers, int arguments, Safepoint::DeoptMode mode); - void RecordPosition(int position); - void RecordAndUpdatePosition(int position); + + void RecordAndWritePosition(int position) V8_OVERRIDE; static Condition TokenToCondition(Token::Value op, bool is_unsigned); void EmitGoto(int block); @@ -404,7 +374,7 @@ class LCodeGen V8_FINAL BASE_EMBEDDED { LEnvironment* environment); - void EnsureSpaceForLazyDeopt(); + void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE; void DoLoadKeyedExternalArray(LLoadKeyed* instr); void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr); void DoLoadKeyedFixedArray(LLoadKeyed* instr); @@ -412,24 +382,14 @@ class LCodeGen V8_FINAL BASE_EMBEDDED { void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr); void DoStoreKeyedFixedArray(LStoreKeyed* instr); - Zone* zone_; - LPlatformChunk* const chunk_; - MacroAssembler* const masm_; - CompilationInfo* const info_; - - int current_block_; - int current_instruction_; - const ZoneList<LInstruction*>* instructions_; ZoneList<LEnvironment*> deoptimizations_; ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_; ZoneList<Handle<Object> > deoptimization_literals_; int inlined_function_count_; Scope* const scope_; - Status status_; TranslationBuffer translations_; ZoneList<LDeferredCode*> deferred_; int osr_pc_offset_; - int last_lazy_deopt_pc_; bool frame_is_built_; // Builder that keeps track of safepoints in the code. The table @@ -441,8 +401,6 @@ class LCodeGen V8_FINAL BASE_EMBEDDED { Safepoint::Kind expected_safepoint_kind_; - int old_position_; - class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED { public: PushSafepointRegistersScope(LCodeGen* codegen, diff --git a/deps/v8/src/mips/lithium-mips.cc b/deps/v8/src/mips/lithium-mips.cc index 4dc80226f2..fb94bc3bdf 100644 --- a/deps/v8/src/mips/lithium-mips.cc +++ b/deps/v8/src/mips/lithium-mips.cc @@ -417,18 +417,19 @@ void LTransitionElementsKind::PrintDataTo(StringStream* stream) { } -int LPlatformChunk::GetNextSpillIndex(bool is_double) { +int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) { // Skip a slot if for a double-width slot. - if (is_double) spill_slot_count_++; + if (kind == DOUBLE_REGISTERS) spill_slot_count_++; return spill_slot_count_++; } -LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) { - int index = GetNextSpillIndex(is_double); - if (is_double) { +LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) { + int index = GetNextSpillIndex(kind); + if (kind == DOUBLE_REGISTERS) { return LDoubleStackSlot::Create(index, zone()); } else { + ASSERT(kind == GENERAL_REGISTERS); return LStackSlot::Create(index, zone()); } } @@ -444,7 +445,7 @@ LPlatformChunk* LChunkBuilder::Build() { // which will be subsumed into this frame. if (graph()->has_osr()) { for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) { - chunk_->GetNextSpillIndex(false); + chunk_->GetNextSpillIndex(GENERAL_REGISTERS); } } @@ -660,7 +661,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr, LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) { ASSERT(!instr->HasPointerMap()); - instr->set_pointer_map(new(zone()) LPointerMap(position_, zone())); + instr->set_pointer_map(new(zone()) LPointerMap(zone())); return instr; } @@ -715,51 +716,44 @@ LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) { LInstruction* LChunkBuilder::DoShift(Token::Value op, HBitwiseBinaryOperation* instr) { - if (instr->representation().IsTagged()) { - ASSERT(instr->left()->representation().IsTagged()); - ASSERT(instr->right()->representation().IsTagged()); - - LOperand* left = UseFixed(instr->left(), a1); - LOperand* right = UseFixed(instr->right(), a0); - LArithmeticT* result = new(zone()) LArithmeticT(op, left, right); - return MarkAsCall(DefineFixed(result, v0), instr); - } - - ASSERT(instr->representation().IsSmiOrInteger32()); - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); - LOperand* left = UseRegisterAtStart(instr->left()); + if (instr->representation().IsSmiOrInteger32()) { + ASSERT(instr->left()->representation().Equals(instr->representation())); + ASSERT(instr->right()->representation().Equals(instr->representation())); + LOperand* left = UseRegisterAtStart(instr->left()); - HValue* right_value = instr->right(); - LOperand* right = NULL; - int constant_value = 0; - bool does_deopt = false; - if (right_value->IsConstant()) { - HConstant* constant = HConstant::cast(right_value); - right = chunk_->DefineConstantOperand(constant); - constant_value = constant->Integer32Value() & 0x1f; - // Left shifts can deoptimize if we shift by > 0 and the result cannot be - // truncated to smi. - if (instr->representation().IsSmi() && constant_value > 0) { - does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi); + HValue* right_value = instr->right(); + LOperand* right = NULL; + int constant_value = 0; + bool does_deopt = false; + if (right_value->IsConstant()) { + HConstant* constant = HConstant::cast(right_value); + right = chunk_->DefineConstantOperand(constant); + constant_value = constant->Integer32Value() & 0x1f; + // Left shifts can deoptimize if we shift by > 0 and the result cannot be + // truncated to smi. + if (instr->representation().IsSmi() && constant_value > 0) { + does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi); + } + } else { + right = UseRegisterAtStart(right_value); } - } else { - right = UseRegisterAtStart(right_value); - } - // Shift operations can deoptimize if we do a logical shift - // by 0 and the result cannot be truncated to int32. - if (op == Token::SHR && constant_value == 0) { - if (FLAG_opt_safe_uint32_operations) { - does_deopt = !instr->CheckFlag(HInstruction::kUint32); - } else { - does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32); + // Shift operations can only deoptimize if we do a logical shift + // by 0 and the result cannot be truncated to int32. + if (op == Token::SHR && constant_value == 0) { + if (FLAG_opt_safe_uint32_operations) { + does_deopt = !instr->CheckFlag(HInstruction::kUint32); + } else { + does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32); + } } - } - LInstruction* result = - DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt)); - return does_deopt ? AssignEnvironment(result) : result; + LInstruction* result = + DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt)); + return does_deopt ? AssignEnvironment(result) : result; + } else { + return DoArithmeticT(op, instr); + } } @@ -768,29 +762,34 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op, ASSERT(instr->representation().IsDouble()); ASSERT(instr->left()->representation().IsDouble()); ASSERT(instr->right()->representation().IsDouble()); - ASSERT(op != Token::MOD); - LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* right = UseRegisterAtStart(instr->right()); - LArithmeticD* result = new(zone()) LArithmeticD(op, left, right); - return DefineAsRegister(result); + if (op == Token::MOD) { + LOperand* left = UseFixedDouble(instr->left(), f2); + LOperand* right = UseFixedDouble(instr->right(), f4); + LArithmeticD* result = new(zone()) LArithmeticD(op, left, right); + // We call a C function for double modulo. It can't trigger a GC. We need + // to use fixed result register for the call. + // TODO(fschneider): Allow any register as input registers. + return MarkAsCall(DefineFixedDouble(result, f2), instr); + } else { + LOperand* left = UseRegisterAtStart(instr->left()); + LOperand* right = UseRegisterAtStart(instr->right()); + LArithmeticD* result = new(zone()) LArithmeticD(op, left, right); + return DefineAsRegister(result); + } } LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op, - HArithmeticBinaryOperation* instr) { - ASSERT(op == Token::ADD || - op == Token::DIV || - op == Token::MOD || - op == Token::MUL || - op == Token::SUB); + HBinaryOperation* instr) { HValue* left = instr->left(); HValue* right = instr->right(); ASSERT(left->representation().IsTagged()); ASSERT(right->representation().IsTagged()); + LOperand* context = UseFixed(instr->context(), cp); LOperand* left_operand = UseFixed(left, a1); LOperand* right_operand = UseFixed(right, a0); LArithmeticT* result = - new(zone()) LArithmeticT(op, left_operand, right_operand); + new(zone()) LArithmeticT(op, context, left_operand, right_operand); return MarkAsCall(DefineFixed(result, v0), instr); } @@ -866,9 +865,31 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) { HInstruction* old_current = current_instruction_; current_instruction_ = current; if (current->has_position()) position_ = current->position(); - LInstruction* instr = current->CompileToLithium(this); + + LInstruction* instr = NULL; + if (current->CanReplaceWithDummyUses()) { + HValue* first_operand = current->OperandCount() == 0 + ? graph()->GetConstant1() + : current->OperandAt(0); + instr = DefineAsRegister(new(zone()) LDummyUse(UseAny(first_operand))); + for (int i = 1; i < current->OperandCount(); ++i) { + LInstruction* dummy = + new(zone()) LDummyUse(UseAny(current->OperandAt(i))); + dummy->set_hydrogen_value(current); + chunk_->AddInstruction(dummy, current_block_); + } + } else { + instr = current->CompileToLithium(this); + } + + argument_count_ += current->argument_delta(); + ASSERT(argument_count_ >= 0); if (instr != NULL) { + // Associate the hydrogen instruction first, since we may need it for + // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below. + instr->set_hydrogen_value(current); + #if DEBUG // Make sure that the lithium instruction has either no fixed register // constraints in temps or the result OR no uses that are only used at @@ -898,14 +919,12 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) { } #endif - instr->set_position(position_); if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) { instr = AssignPointerMap(instr); } if (FLAG_stress_environments && !instr->HasEnvironment()) { instr = AssignEnvironment(instr); } - instr->set_hydrogen_value(current); chunk_->AddInstruction(instr, current_block_); } current_instruction_ = old_current; @@ -997,19 +1016,15 @@ LEnvironment* LChunkBuilder::CreateEnvironment( LInstruction* LChunkBuilder::DoGoto(HGoto* instr) { - return new(zone()) LGoto(instr->FirstSuccessor()->block_id()); + return new(zone()) LGoto(instr->FirstSuccessor()); } LInstruction* LChunkBuilder::DoBranch(HBranch* instr) { - HValue* value = instr->value(); - if (value->EmitAtUses()) { - HBasicBlock* successor = HConstant::cast(value)->BooleanValue() - ? instr->FirstSuccessor() - : instr->SecondSuccessor(); - return new(zone()) LGoto(successor->block_id()); - } + LInstruction* goto_instr = CheckElideControlInstruction(instr); + if (goto_instr != NULL) return goto_instr; + HValue* value = instr->value(); LBranch* result = new(zone()) LBranch(UseRegister(value)); // Tagged values that are not known smis or booleans require a // deoptimization environment. If the instruction is generic no @@ -1047,8 +1062,9 @@ LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) { LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) { + LOperand* context = UseFixed(instr->context(), cp); LInstanceOf* result = - new(zone()) LInstanceOf(UseFixed(instr->left(), a0), + new(zone()) LInstanceOf(context, UseFixed(instr->left(), a0), UseFixed(instr->right(), a1)); return MarkAsCall(DefineFixed(result, v0), instr); } @@ -1057,18 +1073,14 @@ LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) { LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal( HInstanceOfKnownGlobal* instr) { LInstanceOfKnownGlobal* result = - new(zone()) LInstanceOfKnownGlobal(UseFixed(instr->left(), a0), - FixedTemp(t0)); + new(zone()) LInstanceOfKnownGlobal( + UseFixed(instr->context(), cp), + UseFixed(instr->left(), a0), + FixedTemp(t0)); return MarkAsCall(DefineFixed(result, v0), instr); } -LInstruction* LChunkBuilder::DoInstanceSize(HInstanceSize* instr) { - LOperand* object = UseRegisterAtStart(instr->object()); - return DefineAsRegister(new(zone()) LInstanceSize(object)); -} - - LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) { LOperand* receiver = UseRegisterAtStart(instr->receiver()); LOperand* function = UseRegisterAtStart(instr->function()); @@ -1091,7 +1103,6 @@ LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) { LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) { - ++argument_count_; LOperand* argument = Use(instr->argument()); return new(zone()) LPushArgument(argument); } @@ -1122,14 +1133,13 @@ LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) { LInstruction* LChunkBuilder::DoContext(HContext* instr) { - // If there is a non-return use, the context must be allocated in a register. - for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) { - if (!it.value()->IsReturn()) { - return DefineAsRegister(new(zone()) LContext); - } + if (instr->HasNoUses()) return NULL; + + if (info()->IsStub()) { + return DefineFixed(new(zone()) LContext, cp); } - return NULL; + return DefineAsRegister(new(zone()) LContext); } @@ -1140,7 +1150,8 @@ LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) { LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) { - return MarkAsCall(new(zone()) LDeclareGlobals, instr); + LOperand* context = UseFixed(instr->context(), cp); + return MarkAsCall(new(zone()) LDeclareGlobals(context), instr); } @@ -1158,15 +1169,14 @@ LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) { LInstruction* LChunkBuilder::DoCallConstantFunction( HCallConstantFunction* instr) { - argument_count_ -= instr->argument_count(); return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, v0), instr); } LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) { + LOperand* context = UseFixed(instr->context(), cp); LOperand* function = UseFixed(instr->function(), a1); - argument_count_ -= instr->argument_count(); - LInvokeFunction* result = new(zone()) LInvokeFunction(function); + LInvokeFunction* result = new(zone()) LInvokeFunction(context, function); return MarkAsCall(DefineFixed(result, v0), instr, CANNOT_DEOPTIMIZE_EAGERLY); } @@ -1221,7 +1231,7 @@ LInstruction* LChunkBuilder::DoMathTan(HUnaryMathOperation* instr) { LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) { ASSERT(instr->representation().IsDouble()); ASSERT(instr->value()->representation().IsDouble()); - LOperand* input = UseTempRegister(instr->value()); + LOperand* input = UseRegister(instr->value()); LOperand* temp1 = TempRegister(); LOperand* temp2 = TempRegister(); LOperand* double_temp = FixedTemp(f6); // Chosen by fair dice roll. @@ -1240,8 +1250,12 @@ LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) { LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) { + Representation r = instr->value()->representation(); + LOperand* context = (r.IsDouble() || r.IsSmiOrInteger32()) + ? NULL + : UseFixed(instr->context(), cp); LOperand* input = UseRegister(instr->value()); - LMathAbs* result = new(zone()) LMathAbs(input); + LMathAbs* result = new(zone()) LMathAbs(context, input); return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); } @@ -1271,57 +1285,57 @@ LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) { LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) { ASSERT(instr->key()->representation().IsTagged()); - argument_count_ -= instr->argument_count(); + LOperand* context = UseFixed(instr->context(), cp); LOperand* key = UseFixed(instr->key(), a2); - return MarkAsCall(DefineFixed(new(zone()) LCallKeyed(key), v0), instr); + return MarkAsCall( + DefineFixed(new(zone()) LCallKeyed(context, key), v0), instr); } LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) { - argument_count_ -= instr->argument_count(); - return MarkAsCall(DefineFixed(new(zone()) LCallNamed, v0), instr); + LOperand* context = UseFixed(instr->context(), cp); + return MarkAsCall(DefineFixed(new(zone()) LCallNamed(context), v0), instr); } LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) { - argument_count_ -= instr->argument_count(); - return MarkAsCall(DefineFixed(new(zone()) LCallGlobal, v0), instr); + LOperand* context = UseFixed(instr->context(), cp); + return MarkAsCall(DefineFixed(new(zone()) LCallGlobal(context), v0), instr); } LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) { - argument_count_ -= instr->argument_count(); return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, v0), instr); } LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) { + LOperand* context = UseFixed(instr->context(), cp); LOperand* constructor = UseFixed(instr->constructor(), a1); - argument_count_ -= instr->argument_count(); - LCallNew* result = new(zone()) LCallNew(constructor); + LCallNew* result = new(zone()) LCallNew(context, constructor); return MarkAsCall(DefineFixed(result, v0), instr); } LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) { + LOperand* context = UseFixed(instr->context(), cp); LOperand* constructor = UseFixed(instr->constructor(), a1); - argument_count_ -= instr->argument_count(); - LCallNewArray* result = new(zone()) LCallNewArray(constructor); + LCallNewArray* result = new(zone()) LCallNewArray(context, constructor); return MarkAsCall(DefineFixed(result, v0), instr); } LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) { + LOperand* context = UseFixed(instr->context(), cp); LOperand* function = UseFixed(instr->function(), a1); - argument_count_ -= instr->argument_count(); - return MarkAsCall(DefineFixed(new(zone()) LCallFunction(function), v0), - instr); + return MarkAsCall( + DefineFixed(new(zone()) LCallFunction(context, function), v0), instr); } LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) { - argument_count_ -= instr->argument_count(); - return MarkAsCall(DefineFixed(new(zone()) LCallRuntime, v0), instr); + LOperand* context = UseFixed(instr->context(), cp); + return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), v0), instr); } @@ -1349,33 +1363,27 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) { if (instr->representation().IsSmiOrInteger32()) { ASSERT(instr->left()->representation().Equals(instr->representation())); ASSERT(instr->right()->representation().Equals(instr->representation())); + ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32)); LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand()); return DefineAsRegister(new(zone()) LBitI(left, right)); } else { - ASSERT(instr->representation().IsTagged()); - ASSERT(instr->left()->representation().IsTagged()); - ASSERT(instr->right()->representation().IsTagged()); - - LOperand* left = UseFixed(instr->left(), a1); - LOperand* right = UseFixed(instr->right(), a0); - LArithmeticT* result = new(zone()) LArithmeticT(instr->op(), left, right); - return MarkAsCall(DefineFixed(result, v0), instr); + return DoArithmeticT(instr->op(), instr); } } LInstruction* LChunkBuilder::DoDiv(HDiv* instr) { - if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::DIV, instr); - } else if (instr->representation().IsSmiOrInteger32()) { + if (instr->representation().IsSmiOrInteger32()) { ASSERT(instr->left()->representation().Equals(instr->representation())); ASSERT(instr->right()->representation().Equals(instr->representation())); LOperand* dividend = UseRegister(instr->left()); LOperand* divisor = UseRegister(instr->right()); LDivI* div = new(zone()) LDivI(dividend, divisor); return AssignEnvironment(DefineAsRegister(div)); + } else if (instr->representation().IsDouble()) { + return DoArithmeticD(Token::DIV, instr); } else { return DoArithmeticT(Token::DIV, instr); } @@ -1466,17 +1474,10 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) { ? AssignEnvironment(result) : result; } - } else if (instr->representation().IsTagged()) { - return DoArithmeticT(Token::MOD, instr); + } else if (instr->representation().IsDouble()) { + return DoArithmeticD(Token::MOD, instr); } else { - ASSERT(instr->representation().IsDouble()); - // We call a C function for double modulo. It can't trigger a GC. We need - // to use fixed result register for the call. - // TODO(fschneider): Allow any register as input registers. - LArithmeticD* mod = new(zone()) LArithmeticD(Token::MOD, - UseFixedDouble(left, f2), - UseFixedDouble(right, f4)); - return MarkAsCall(DefineFixedDouble(mod, f2), instr); + return DoArithmeticT(Token::MOD, instr); } } @@ -1485,20 +1486,39 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) { if (instr->representation().IsSmiOrInteger32()) { ASSERT(instr->left()->representation().Equals(instr->representation())); ASSERT(instr->right()->representation().Equals(instr->representation())); - LOperand* left; - LOperand* right = UseOrConstant(instr->BetterRightOperand()); - LOperand* temp = NULL; - if (instr->CheckFlag(HValue::kBailoutOnMinusZero) && - (instr->CheckFlag(HValue::kCanOverflow) || - !right->IsConstantOperand())) { - left = UseRegister(instr->BetterLeftOperand()); - temp = TempRegister(); + HValue* left = instr->BetterLeftOperand(); + HValue* right = instr->BetterRightOperand(); + LOperand* left_op; + LOperand* right_op; + bool can_overflow = instr->CheckFlag(HValue::kCanOverflow); + bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero); + + if (right->IsConstant()) { + HConstant* constant = HConstant::cast(right); + int32_t constant_value = constant->Integer32Value(); + // Constants -1, 0 and 1 can be optimized if the result can overflow. + // For other constants, it can be optimized only without overflow. + if (!can_overflow || ((constant_value >= -1) && (constant_value <= 1))) { + left_op = UseRegisterAtStart(left); + right_op = UseConstant(right); + } else { + if (bailout_on_minus_zero) { + left_op = UseRegister(left); + } else { + left_op = UseRegisterAtStart(left); + } + right_op = UseRegister(right); + } } else { - left = UseRegisterAtStart(instr->BetterLeftOperand()); + if (bailout_on_minus_zero) { + left_op = UseRegister(left); + } else { + left_op = UseRegisterAtStart(left); + } + right_op = UseRegister(right); } - LMulI* mul = new(zone()) LMulI(left, right, temp); - if (instr->CheckFlag(HValue::kCanOverflow) || - instr->CheckFlag(HValue::kBailoutOnMinusZero)) { + LMulI* mul = new(zone()) LMulI(left_op, right_op); + if (can_overflow || bailout_on_minus_zero) { AssignEnvironment(mul); } return DefineAsRegister(mul); @@ -1579,7 +1599,6 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) { } return DoArithmeticD(Token::ADD, instr); } else { - ASSERT(instr->representation().IsTagged()); return DoArithmeticT(Token::ADD, instr); } } @@ -1637,9 +1656,10 @@ LInstruction* LChunkBuilder::DoRandom(HRandom* instr) { LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) { ASSERT(instr->left()->representation().IsTagged()); ASSERT(instr->right()->representation().IsTagged()); + LOperand* context = UseFixed(instr->context(), cp); LOperand* left = UseFixed(instr->left(), a1); LOperand* right = UseFixed(instr->right(), a0); - LCmpT* result = new(zone()) LCmpT(left, right); + LCmpT* result = new(zone()) LCmpT(context, left, right); return MarkAsCall(DefineFixed(result, v0), instr); } @@ -1666,6 +1686,8 @@ LInstruction* LChunkBuilder::DoCompareNumericAndBranch( LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch( HCompareObjectEqAndBranch* instr) { + LInstruction* goto_instr = CheckElideControlInstruction(instr); + if (goto_instr != NULL) return goto_instr; LOperand* left = UseRegisterAtStart(instr->left()); LOperand* right = UseRegisterAtStart(instr->right()); return new(zone()) LCmpObjectEqAndBranch(left, right); @@ -1674,8 +1696,8 @@ LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch( LInstruction* LChunkBuilder::DoCompareHoleAndBranch( HCompareHoleAndBranch* instr) { - LOperand* object = UseRegisterAtStart(instr->object()); - return new(zone()) LCmpHoleAndBranch(object); + LOperand* value = UseRegisterAtStart(instr->value()); + return new(zone()) LCmpHoleAndBranch(value); } @@ -1713,10 +1735,11 @@ LInstruction* LChunkBuilder::DoStringCompareAndBranch( HStringCompareAndBranch* instr) { ASSERT(instr->left()->representation().IsTagged()); ASSERT(instr->right()->representation().IsTagged()); + LOperand* context = UseFixed(instr->context(), cp); LOperand* left = UseFixed(instr->left(), a1); LOperand* right = UseFixed(instr->right(), a0); LStringCompareAndBranch* result = - new(zone()) LStringCompareAndBranch(left, right); + new(zone()) LStringCompareAndBranch(context, left, right); return MarkAsCall(result, instr); } @@ -1783,11 +1806,9 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) { LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) { LOperand* string = UseRegister(instr->string()); - LOperand* index = UseRegister(instr->index()); - LOperand* value = UseTempRegister(instr->value()); - LSeqStringSetChar* result = - new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value); - return DefineAsRegister(result); + LOperand* index = UseRegisterOrConstant(instr->index()); + LOperand* value = UseRegister(instr->value()); + return new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value); } @@ -1805,9 +1826,17 @@ LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation( } +LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) { + // The control instruction marking the end of a block that completed + // abruptly (e.g., threw an exception). There is nothing specific to do. + return NULL; +} + + LInstruction* LChunkBuilder::DoThrow(HThrow* instr) { + LOperand* context = UseFixed(instr->context(), cp); LOperand* value = UseFixed(instr->value(), a0); - return MarkAsCall(new(zone()) LThrow(value), instr); + return MarkAsCall(new(zone()) LThrow(context, value), instr); } @@ -1836,7 +1865,6 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { } if (from.IsTagged()) { if (to.IsDouble()) { - info()->MarkAsDeferredCalling(); LOperand* value = UseRegister(instr->value()); LNumberUntagD* res = new(zone()) LNumberUntagD(value); return AssignEnvironment(DefineAsRegister(res)); @@ -1940,12 +1968,6 @@ LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) { } -LInstruction* LChunkBuilder::DoIsNumberAndBranch(HIsNumberAndBranch* instr) { - return new(zone()) - LIsNumberAndBranch(UseRegisterOrConstantAtStart(instr->value())); -} - - LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) { LOperand* value = UseRegisterAtStart(instr->value()); LInstruction* result = new(zone()) LCheckInstanceType(value); @@ -1994,8 +2016,11 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) { LInstruction* LChunkBuilder::DoReturn(HReturn* instr) { + LOperand* context = info()->IsStub() + ? UseFixed(instr->context(), cp) + : NULL; LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count()); - return new(zone()) LReturn(UseFixed(instr->value(), v0), + return new(zone()) LReturn(UseFixed(instr->value(), v0), context, parameter_count); } @@ -2028,8 +2053,10 @@ LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) { LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) { + LOperand* context = UseFixed(instr->context(), cp); LOperand* global_object = UseFixed(instr->global_object(), a0); - LLoadGlobalGeneric* result = new(zone()) LLoadGlobalGeneric(global_object); + LLoadGlobalGeneric* result = + new(zone()) LLoadGlobalGeneric(context, global_object); return MarkAsCall(DefineFixed(result, v0), instr); } @@ -2045,10 +2072,11 @@ LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) { LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) { + LOperand* context = UseFixed(instr->context(), cp); LOperand* global_object = UseFixed(instr->global_object(), a1); LOperand* value = UseFixed(instr->value(), a0); LStoreGlobalGeneric* result = - new(zone()) LStoreGlobalGeneric(global_object, value); + new(zone()) LStoreGlobalGeneric(context, global_object, value); return MarkAsCall(result, instr); } @@ -2083,8 +2111,10 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) { LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) { + LOperand* context = UseFixed(instr->context(), cp); LOperand* object = UseFixed(instr->object(), a0); - LInstruction* result = DefineFixed(new(zone()) LLoadNamedGeneric(object), v0); + LInstruction* result = + DefineFixed(new(zone()) LLoadNamedGeneric(context, object), v0); return MarkAsCall(result, instr); } @@ -2096,6 +2126,11 @@ LInstruction* LChunkBuilder::DoLoadFunctionPrototype( } +LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) { + return DefineAsRegister(new(zone()) LLoadRoot); +} + + LInstruction* LChunkBuilder::DoLoadExternalArrayPointer( HLoadExternalArrayPointer* instr) { LOperand* input = UseRegisterAtStart(instr->value()); @@ -2112,7 +2147,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) { if (!instr->is_external()) { LOperand* obj = NULL; if (instr->representation().IsDouble()) { - obj = UseTempRegister(instr->elements()); + obj = UseRegister(instr->elements()); } else { ASSERT(instr->representation().IsSmiOrTagged()); obj = UseRegisterAtStart(instr->elements()); @@ -2140,18 +2175,17 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) { LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) { + LOperand* context = UseFixed(instr->context(), cp); LOperand* object = UseFixed(instr->object(), a1); LOperand* key = UseFixed(instr->key(), a0); LInstruction* result = - DefineFixed(new(zone()) LLoadKeyedGeneric(object, key), v0); + DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key), v0); return MarkAsCall(result, instr); } LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) { - ElementsKind elements_kind = instr->elements_kind(); - if (!instr->is_external()) { ASSERT(instr->elements()->representation().IsTagged()); bool needs_write_barrier = instr->NeedsWriteBarrier(); @@ -2162,14 +2196,18 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) { if (instr->value()->representation().IsDouble()) { object = UseRegisterAtStart(instr->elements()); key = UseRegisterOrConstantAtStart(instr->key()); - val = UseTempRegister(instr->value()); + val = UseRegister(instr->value()); } else { ASSERT(instr->value()->representation().IsSmiOrTagged()); - object = UseTempRegister(instr->elements()); - val = needs_write_barrier ? UseTempRegister(instr->value()) - : UseRegisterAtStart(instr->value()); - key = needs_write_barrier ? UseTempRegister(instr->key()) - : UseRegisterOrConstantAtStart(instr->key()); + if (needs_write_barrier) { + object = UseTempRegister(instr->elements()); + val = UseTempRegister(instr->value()); + key = UseTempRegister(instr->key()); + } else { + object = UseRegisterAtStart(instr->elements()); + val = UseRegisterAtStart(instr->value()); + key = UseRegisterOrConstantAtStart(instr->key()); + } } return new(zone()) LStoreKeyed(object, key, val); @@ -2177,17 +2215,13 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) { ASSERT( (instr->value()->representation().IsInteger32() && - (elements_kind != EXTERNAL_FLOAT_ELEMENTS) && - (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) || + (instr->elements_kind() != EXTERNAL_FLOAT_ELEMENTS) && + (instr->elements_kind() != EXTERNAL_DOUBLE_ELEMENTS)) || (instr->value()->representation().IsDouble() && - ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) || - (elements_kind == EXTERNAL_DOUBLE_ELEMENTS)))); + ((instr->elements_kind() == EXTERNAL_FLOAT_ELEMENTS) || + (instr->elements_kind() == EXTERNAL_DOUBLE_ELEMENTS)))); ASSERT(instr->elements()->representation().IsExternal()); - bool val_is_temp_register = - elements_kind == EXTERNAL_PIXEL_ELEMENTS || - elements_kind == EXTERNAL_FLOAT_ELEMENTS; - LOperand* val = val_is_temp_register ? UseTempRegister(instr->value()) - : UseRegister(instr->value()); + LOperand* val = UseRegister(instr->value()); LOperand* key = UseRegisterOrConstantAtStart(instr->key()); LOperand* external_pointer = UseRegister(instr->elements()); @@ -2196,6 +2230,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) { LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) { + LOperand* context = UseFixed(instr->context(), cp); LOperand* obj = UseFixed(instr->object(), a2); LOperand* key = UseFixed(instr->key(), a1); LOperand* val = UseFixed(instr->value(), a0); @@ -2204,7 +2239,8 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) { ASSERT(instr->key()->representation().IsTagged()); ASSERT(instr->value()->representation().IsTagged()); - return MarkAsCall(new(zone()) LStoreKeyedGeneric(obj, key, val), instr); + return MarkAsCall( + new(zone()) LStoreKeyedGeneric(context, obj, key, val), instr); } @@ -2214,11 +2250,12 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind( if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) { LOperand* new_map_reg = TempRegister(); LTransitionElementsKind* result = - new(zone()) LTransitionElementsKind(object, new_map_reg); + new(zone()) LTransitionElementsKind(object, NULL, new_map_reg); return result; } else { + LOperand* context = UseFixed(instr->context(), cp); LTransitionElementsKind* result = - new(zone()) LTransitionElementsKind(object, NULL); + new(zone()) LTransitionElementsKind(object, context, NULL); return AssignPointerMap(result); } } @@ -2277,56 +2314,68 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) { LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) { + LOperand* context = UseFixed(instr->context(), cp); LOperand* obj = UseFixed(instr->object(), a1); LOperand* val = UseFixed(instr->value(), a0); - LInstruction* result = new(zone()) LStoreNamedGeneric(obj, val); + LInstruction* result = new(zone()) LStoreNamedGeneric(context, obj, val); return MarkAsCall(result, instr); } LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) { + LOperand* context = UseFixed(instr->context(), cp); LOperand* left = UseRegisterAtStart(instr->left()); LOperand* right = UseRegisterAtStart(instr->right()); - return MarkAsCall(DefineFixed(new(zone()) LStringAdd(left, right), v0), - instr); + return MarkAsCall( + DefineFixed(new(zone()) LStringAdd(context, left, right), v0), + instr); } LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) { LOperand* string = UseTempRegister(instr->string()); LOperand* index = UseTempRegister(instr->index()); - LStringCharCodeAt* result = new(zone()) LStringCharCodeAt(string, index); + LOperand* context = UseAny(instr->context()); + LStringCharCodeAt* result = + new(zone()) LStringCharCodeAt(context, string, index); return AssignEnvironment(AssignPointerMap(DefineAsRegister(result))); } LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) { LOperand* char_code = UseRegister(instr->value()); - LStringCharFromCode* result = new(zone()) LStringCharFromCode(char_code); + LOperand* context = UseAny(instr->context()); + LStringCharFromCode* result = + new(zone()) LStringCharFromCode(context, char_code); return AssignPointerMap(DefineAsRegister(result)); } LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) { info()->MarkAsDeferredCalling(); + LOperand* context = UseAny(instr->context()); LOperand* size = instr->size()->IsConstant() ? UseConstant(instr->size()) : UseTempRegister(instr->size()); LOperand* temp1 = TempRegister(); LOperand* temp2 = TempRegister(); - LAllocate* result = new(zone()) LAllocate(size, temp1, temp2); + LAllocate* result = new(zone()) LAllocate(context, size, temp1, temp2); return AssignPointerMap(DefineAsRegister(result)); } LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) { - return MarkAsCall(DefineFixed(new(zone()) LRegExpLiteral, v0), instr); + LOperand* context = UseFixed(instr->context(), cp); + return MarkAsCall( + DefineFixed(new(zone()) LRegExpLiteral(context), v0), instr); } LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) { - return MarkAsCall(DefineFixed(new(zone()) LFunctionLiteral, v0), instr); + LOperand* context = UseFixed(instr->context(), cp); + return MarkAsCall( + DefineFixed(new(zone()) LFunctionLiteral(context), v0), instr); } @@ -2373,8 +2422,8 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) { LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) { - argument_count_ -= instr->argument_count(); - return MarkAsCall(DefineFixed(new(zone()) LCallStub, v0), instr); + LOperand* context = UseFixed(instr->context(), cp); + return MarkAsCall(DefineFixed(new(zone()) LCallStub(context), v0), instr); } @@ -2419,7 +2468,8 @@ LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) { LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) { - LTypeof* result = new(zone()) LTypeof(UseFixed(instr->value(), a0)); + LOperand* context = UseFixed(instr->context(), cp); + LTypeof* result = new(zone()) LTypeof(context, UseFixed(instr->value(), a0)); return MarkAsCall(DefineFixed(result, v0), instr); } @@ -2458,10 +2508,13 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) { LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) { if (instr->is_function_entry()) { - return MarkAsCall(new(zone()) LStackCheck, instr); + LOperand* context = UseFixed(instr->context(), cp); + return MarkAsCall(new(zone()) LStackCheck(context), instr); } else { ASSERT(instr->is_backwards_branch()); - return AssignEnvironment(AssignPointerMap(new(zone()) LStackCheck)); + LOperand* context = UseAny(instr->context()); + return AssignEnvironment( + AssignPointerMap(new(zone()) LStackCheck(context))); } } @@ -2494,7 +2547,7 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) { if (env->entry()->arguments_pushed()) { int argument_count = env->arguments_environment()->parameter_count(); pop = new(zone()) LDrop(argument_count); - argument_count_ -= argument_count; + ASSERT(instr->argument_delta() == -argument_count); } HEnvironment* outer = current_block_->last_environment()-> @@ -2506,8 +2559,9 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) { LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) { + LOperand* context = UseFixed(instr->context(), cp); LOperand* object = UseFixed(instr->enumerable(), a0); - LForInPrepareMap* result = new(zone()) LForInPrepareMap(object); + LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object); return MarkAsCall(DefineFixed(result, v0), instr, CAN_DEOPTIMIZE_EAGERLY); } diff --git a/deps/v8/src/mips/lithium-mips.h b/deps/v8/src/mips/lithium-mips.h index 29a8eac63f..301be8fdf2 100644 --- a/deps/v8/src/mips/lithium-mips.h +++ b/deps/v8/src/mips/lithium-mips.h @@ -105,7 +105,6 @@ class LCodeGen; V(InnerAllocatedObject) \ V(InstanceOf) \ V(InstanceOfKnownGlobal) \ - V(InstanceSize) \ V(InstructionGap) \ V(Integer32ToDouble) \ V(Integer32ToSmi) \ @@ -113,13 +112,13 @@ class LCodeGen; V(IsConstructCallAndBranch) \ V(IsObjectAndBranch) \ V(IsStringAndBranch) \ - V(IsNumberAndBranch) \ V(IsSmiAndBranch) \ V(IsUndetectableAndBranch) \ V(Label) \ V(LazyBailout) \ V(LoadContextSlot) \ V(LoadExternalArrayPointer) \ + V(LoadRoot) \ V(LoadFieldByIndex) \ V(LoadFunctionPrototype) \ V(LoadGlobalCell) \ @@ -213,7 +212,6 @@ class LInstruction : public ZoneObject { : environment_(NULL), hydrogen_value_(NULL), bit_field_(IsCallBits::encode(false)) { - set_position(RelocInfo::kNoPosition); } virtual ~LInstruction() {} @@ -254,15 +252,6 @@ class LInstruction : public ZoneObject { LPointerMap* pointer_map() const { return pointer_map_.get(); } bool HasPointerMap() const { return pointer_map_.is_set(); } - // The 31 bits PositionBits is used to store the int position value. And the - // position value may be RelocInfo::kNoPosition (-1). The accessor always - // +1/-1 so that the encoded value of position in bit_field_ is always >= 0 - // and can fit into the 31 bits PositionBits. - void set_position(int pos) { - bit_field_ = PositionBits::update(bit_field_, pos + 1); - } - int position() { return PositionBits::decode(bit_field_) - 1; } - void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; } HValue* hydrogen_value() const { return hydrogen_value_; } @@ -274,7 +263,7 @@ class LInstruction : public ZoneObject { // Interface to the register allocator and iterators. bool ClobbersTemps() const { return IsCall(); } bool ClobbersRegisters() const { return IsCall(); } - bool ClobbersDoubleRegisters() const { return IsCall(); } + virtual bool ClobbersDoubleRegisters() const { return IsCall(); } // Interface to the register allocator and iterators. bool IsMarkedAsCall() const { return IsCall(); } @@ -302,7 +291,6 @@ class LInstruction : public ZoneObject { virtual LOperand* TempAt(int i) = 0; class IsCallBits: public BitField<bool, 0, 1> {}; - class PositionBits: public BitField<int, 1, 31> {}; LEnvironment* environment_; SetOncePointer<LPointerMap> pointer_map_; @@ -401,17 +389,17 @@ class LInstructionGap V8_FINAL : public LGap { class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> { public: - explicit LGoto(int block_id) : block_id_(block_id) { } + explicit LGoto(HBasicBlock* block) : block_(block) { } virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE; DECLARE_CONCRETE_INSTRUCTION(Goto, "goto") virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; virtual bool IsControl() const V8_OVERRIDE { return true; } - int block_id() const { return block_id_; } + int block_id() const { return block_->block_id(); } private: - int block_id_; + HBasicBlock* block_; }; @@ -482,8 +470,14 @@ class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> { }; -class LCallStub V8_FINAL : public LTemplateInstruction<1, 0, 0> { +class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> { public: + explicit LCallStub(LOperand* context) { + inputs_[0] = context; + } + + LOperand* context() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub") DECLARE_HYDROGEN_ACCESSOR(CallStub) @@ -688,17 +682,15 @@ class LMathFloorOfDiv V8_FINAL : public LTemplateInstruction<1, 2, 1> { }; -class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 1> { +class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 0> { public: - LMulI(LOperand* left, LOperand* right, LOperand* temp) { + LMulI(LOperand* left, LOperand* right) { inputs_[0] = left; inputs_[1] = right; - temps_[0] = temp; } LOperand* left() { return inputs_[0]; } LOperand* right() { return inputs_[1]; } - LOperand* temp() { return temps_[0]; } DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i") DECLARE_HYDROGEN_ACCESSOR(Mul) @@ -782,12 +774,14 @@ class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 1> { }; -class LMathAbs V8_FINAL : public LTemplateInstruction<1, 1, 0> { +class LMathAbs V8_FINAL : public LTemplateInstruction<1, 2, 0> { public: - explicit LMathAbs(LOperand* value) { + LMathAbs(LOperand* context, LOperand* value) { + inputs_[1] = context; inputs_[0] = value; } + LOperand* context() { return inputs_[1]; } LOperand* value() { return inputs_[0]; } DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs") @@ -936,19 +930,6 @@ class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> { }; -class LIsNumberAndBranch V8_FINAL : public LControlInstruction<1, 0> { - public: - explicit LIsNumberAndBranch(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(IsNumberAndBranch, "is-number-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsNumberAndBranch) -}; - - class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> { public: LIsStringAndBranch(LOperand* value, LOperand* temp) { @@ -999,15 +980,17 @@ class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> { }; -class LStringCompareAndBranch V8_FINAL : public LControlInstruction<2, 0> { +class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> { public: - LStringCompareAndBranch(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; + LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) { + inputs_[0] = context; + inputs_[1] = left; + inputs_[2] = right; } - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } + LOperand* context() { return inputs_[0]; } + LOperand* left() { return inputs_[1]; } + LOperand* right() { return inputs_[2]; } DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch, "string-compare-and-branch") @@ -1083,15 +1066,17 @@ class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 1> { }; -class LCmpT V8_FINAL : public LTemplateInstruction<1, 2, 0> { +class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> { public: - LCmpT(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; + LCmpT(LOperand* context, LOperand* left, LOperand* right) { + inputs_[0] = context; + inputs_[1] = left; + inputs_[2] = right; } - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } + LOperand* context() { return inputs_[0]; } + LOperand* left() { return inputs_[1]; } + LOperand* right() { return inputs_[2]; } DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t") DECLARE_HYDROGEN_ACCESSOR(CompareGeneric) @@ -1100,28 +1085,32 @@ class LCmpT V8_FINAL : public LTemplateInstruction<1, 2, 0> { }; -class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 2, 0> { +class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> { public: - LInstanceOf(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; + LInstanceOf(LOperand* context, LOperand* left, LOperand* right) { + inputs_[0] = context; + inputs_[1] = left; + inputs_[2] = right; } - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } + LOperand* context() { return inputs_[0]; } + LOperand* left() { return inputs_[1]; } + LOperand* right() { return inputs_[2]; } DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of") }; -class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 1, 1> { +class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> { public: - LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) { - inputs_[0] = value; + LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) { + inputs_[0] = context; + inputs_[1] = value; temps_[0] = temp; } - LOperand* value() { return inputs_[0]; } + LOperand* context() { return inputs_[0]; } + LOperand* value() { return inputs_[1]; } LOperand* temp() { return temps_[0]; } DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal, @@ -1142,19 +1131,6 @@ class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 1, 1> { }; -class LInstanceSize V8_FINAL : public LTemplateInstruction<1, 1, 0> { - public: - explicit LInstanceSize(LOperand* object) { - inputs_[0] = object; - } - - LOperand* object() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(InstanceSize, "instance-size") - DECLARE_HYDROGEN_ACCESSOR(InstanceSize) -}; - - class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> { public: LBoundsCheck(LOperand* index, LOperand* length) { @@ -1300,7 +1276,7 @@ class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 1> { DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch") DECLARE_HYDROGEN_ACCESSOR(CompareMap) - Handle<Map> map() const { return hydrogen()->map(); } + Handle<Map> map() const { return hydrogen()->map().handle(); } }; @@ -1355,8 +1331,8 @@ class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> { LOperand* temp() { return temps_[0]; } Smi* index() const { return index_; } - DECLARE_CONCRETE_INSTRUCTION(ValueOf, "date-field") - DECLARE_HYDROGEN_ACCESSOR(ValueOf) + DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field") + DECLARE_HYDROGEN_ACCESSOR(DateField) private: Smi* index_; @@ -1387,13 +1363,15 @@ class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 3, 0> { }; -class LThrow V8_FINAL : public LTemplateInstruction<0, 1, 0> { +class LThrow V8_FINAL : public LTemplateInstruction<0, 2, 0> { public: - explicit LThrow(LOperand* value) { - inputs_[0] = value; + LThrow(LOperand* context, LOperand* value) { + inputs_[0] = context; + inputs_[1] = value; } - LOperand* value() { return inputs_[0]; } + LOperand* context() { return inputs_[0]; } + LOperand* value() { return inputs_[1]; } DECLARE_CONCRETE_INSTRUCTION(Throw, "throw") }; @@ -1489,16 +1467,21 @@ class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> { }; -class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 2, 0> { +class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> { public: - LArithmeticT(Token::Value op, LOperand* left, LOperand* right) + LArithmeticT(Token::Value op, + LOperand* context, + LOperand* left, + LOperand* right) : op_(op) { - inputs_[0] = left; - inputs_[1] = right; + inputs_[0] = context; + inputs_[1] = left; + inputs_[2] = right; } - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } + LOperand* context() { return inputs_[0]; } + LOperand* left() { return inputs_[1]; } + LOperand* right() { return inputs_[2]; } Token::Value op() const { return op_; } virtual Opcode opcode() const V8_FINAL { return LInstruction::kArithmeticT; } @@ -1510,11 +1493,12 @@ class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 2, 0> { }; -class LReturn V8_FINAL : public LTemplateInstruction<0, 2, 0> { +class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> { public: - explicit LReturn(LOperand* value, LOperand* parameter_count) { + LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) { inputs_[0] = value; - inputs_[1] = parameter_count; + inputs_[1] = context; + inputs_[2] = parameter_count; } LOperand* value() { return inputs_[0]; } @@ -1526,7 +1510,7 @@ class LReturn V8_FINAL : public LTemplateInstruction<0, 2, 0> { ASSERT(has_constant_parameter_count()); return LConstantOperand::cast(parameter_count()); } - LOperand* parameter_count() { return inputs_[1]; } + LOperand* parameter_count() { return inputs_[2]; } DECLARE_CONCRETE_INSTRUCTION(Return, "return") }; @@ -1545,13 +1529,15 @@ class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> { }; -class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 1, 0> { +class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> { public: - explicit LLoadNamedGeneric(LOperand* object) { - inputs_[0] = object; + LLoadNamedGeneric(LOperand* context, LOperand* object) { + inputs_[0] = context; + inputs_[1] = object; } - LOperand* object() { return inputs_[0]; } + LOperand* context() { return inputs_[0]; } + LOperand* object() { return inputs_[1]; } DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic") DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric) @@ -1573,6 +1559,15 @@ class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 0> { }; +class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> { + public: + DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root") + DECLARE_HYDROGEN_ACCESSOR(LoadRoot) + + Heap::RootListIndex index() const { return hydrogen()->index(); } +}; + + class LLoadExternalArrayPointer V8_FINAL : public LTemplateInstruction<1, 1, 0> { public: @@ -1611,15 +1606,17 @@ class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> { }; -class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> { +class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 0> { public: - LLoadKeyedGeneric(LOperand* object, LOperand* key) { - inputs_[0] = object; - inputs_[1] = key; + LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key) { + inputs_[0] = context; + inputs_[1] = object; + inputs_[2] = key; } - LOperand* object() { return inputs_[0]; } - LOperand* key() { return inputs_[1]; } + LOperand* context() { return inputs_[0]; } + LOperand* object() { return inputs_[1]; } + LOperand* key() { return inputs_[2]; } DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic") }; @@ -1632,13 +1629,15 @@ class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> { }; -class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 1, 0> { +class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> { public: - explicit LLoadGlobalGeneric(LOperand* global_object) { - inputs_[0] = global_object; + LLoadGlobalGeneric(LOperand* context, LOperand* global_object) { + inputs_[0] = context; + inputs_[1] = global_object; } - LOperand* global_object() { return inputs_[0]; } + LOperand* context() { return inputs_[0]; } + LOperand* global_object() { return inputs_[1]; } DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic") DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric) @@ -1663,16 +1662,19 @@ class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 1> { }; -class LStoreGlobalGeneric V8_FINAL : public LTemplateInstruction<0, 2, 0> { +class LStoreGlobalGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> { public: - explicit LStoreGlobalGeneric(LOperand* global_object, - LOperand* value) { - inputs_[0] = global_object; - inputs_[1] = value; + LStoreGlobalGeneric(LOperand* context, + LOperand* global_object, + LOperand* value) { + inputs_[0] = context; + inputs_[1] = global_object; + inputs_[2] = value; } - LOperand* global_object() { return inputs_[0]; } - LOperand* value() { return inputs_[1]; } + LOperand* context() { return inputs_[0]; } + LOperand* global_object() { return inputs_[1]; } + LOperand* value() { return inputs_[2]; } DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic") DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric) @@ -1802,8 +1804,14 @@ class LOuterContext V8_FINAL : public LTemplateInstruction<1, 1, 0> { }; -class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 0, 0> { +class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> { public: + explicit LDeclareGlobals(LOperand* context) { + inputs_[0] = context; + } + + LOperand* context() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals") DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals) }; @@ -1845,13 +1853,15 @@ class LCallConstantFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> { }; -class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> { +class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> { public: - explicit LInvokeFunction(LOperand* function) { - inputs_[0] = function; + LInvokeFunction(LOperand* context, LOperand* function) { + inputs_[0] = context; + inputs_[1] = function; } - LOperand* function() { return inputs_[0]; } + LOperand* context() { return inputs_[0]; } + LOperand* function() { return inputs_[1]; } DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function") DECLARE_HYDROGEN_ACCESSOR(InvokeFunction) @@ -1862,13 +1872,15 @@ class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> { }; -class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 1, 0> { +class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> { public: - explicit LCallKeyed(LOperand* key) { - inputs_[0] = key; + LCallKeyed(LOperand* context, LOperand* key) { + inputs_[0] = context; + inputs_[1] = key; } - LOperand* key() { return inputs_[0]; } + LOperand* context() { return inputs_[0]; } + LOperand* key() { return inputs_[1]; } DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed") DECLARE_HYDROGEN_ACCESSOR(CallKeyed) @@ -1880,8 +1892,14 @@ class LCallKeyed V8_FINAL : public LTemplateInstruction<1, 1, 0> { -class LCallNamed V8_FINAL : public LTemplateInstruction<1, 0, 0> { +class LCallNamed V8_FINAL : public LTemplateInstruction<1, 1, 0> { public: + explicit LCallNamed(LOperand* context) { + inputs_[0] = context; + } + + LOperand* context() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(CallNamed, "call-named") DECLARE_HYDROGEN_ACCESSOR(CallNamed) @@ -1892,13 +1910,15 @@ class LCallNamed V8_FINAL : public LTemplateInstruction<1, 0, 0> { }; -class LCallFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> { +class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> { public: - explicit LCallFunction(LOperand* function) { - inputs_[0] = function; + LCallFunction(LOperand* context, LOperand* function) { + inputs_[0] = context; + inputs_[1] = function; } - LOperand* function() { return inputs_[0]; } + LOperand* context() { return inputs_[0]; } + LOperand* function() { return inputs_[1]; } DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function") DECLARE_HYDROGEN_ACCESSOR(CallFunction) @@ -1907,8 +1927,14 @@ class LCallFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> { }; -class LCallGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> { +class LCallGlobal V8_FINAL : public LTemplateInstruction<1, 1, 0> { public: + explicit LCallGlobal(LOperand* context) { + inputs_[0] = context; + } + + LOperand* context() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(CallGlobal, "call-global") DECLARE_HYDROGEN_ACCESSOR(CallGlobal) @@ -1930,13 +1956,15 @@ class LCallKnownGlobal V8_FINAL : public LTemplateInstruction<1, 0, 0> { }; -class LCallNew V8_FINAL : public LTemplateInstruction<1, 1, 0> { +class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> { public: - explicit LCallNew(LOperand* constructor) { - inputs_[0] = constructor; + LCallNew(LOperand* context, LOperand* constructor) { + inputs_[0] = context; + inputs_[1] = constructor; } - LOperand* constructor() { return inputs_[0]; } + LOperand* context() { return inputs_[0]; } + LOperand* constructor() { return inputs_[1]; } DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new") DECLARE_HYDROGEN_ACCESSOR(CallNew) @@ -1947,13 +1975,15 @@ class LCallNew V8_FINAL : public LTemplateInstruction<1, 1, 0> { }; -class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 1, 0> { +class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> { public: - explicit LCallNewArray(LOperand* constructor) { - inputs_[0] = constructor; + LCallNewArray(LOperand* context, LOperand* constructor) { + inputs_[0] = context; + inputs_[1] = constructor; } - LOperand* constructor() { return inputs_[0]; } + LOperand* context() { return inputs_[0]; } + LOperand* constructor() { return inputs_[1]; } DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array") DECLARE_HYDROGEN_ACCESSOR(CallNewArray) @@ -1964,13 +1994,24 @@ class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 1, 0> { }; -class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 0, 0> { +class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> { public: + explicit LCallRuntime(LOperand* context) { + inputs_[0] = context; + } + + LOperand* context() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime") DECLARE_HYDROGEN_ACCESSOR(CallRuntime) + virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE { + return save_doubles() == kDontSaveFPRegs; + } + const Runtime::Function* function() const { return hydrogen()->function(); } int arity() const { return hydrogen()->argument_count(); } + SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); } }; @@ -2099,7 +2140,7 @@ class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 2> { LOperand* temp2() { return temps_[1]; } DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i") - DECLARE_HYDROGEN_ACCESSOR(UnaryOperation) + DECLARE_HYDROGEN_ACCESSOR(Change) bool truncating() { return hydrogen()->CanTruncateToInt32(); } }; @@ -2171,15 +2212,17 @@ class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> { }; -class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 2, 0> { +class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> { public: - LStoreNamedGeneric(LOperand* object, LOperand* value) { - inputs_[0] = object; - inputs_[1] = value; + LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) { + inputs_[0] = context; + inputs_[1] = object; + inputs_[2] = value; } - LOperand* object() { return inputs_[0]; } - LOperand* value() { return inputs_[1]; } + LOperand* context() { return inputs_[0]; } + LOperand* object() { return inputs_[1]; } + LOperand* value() { return inputs_[2]; } DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic") DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric) @@ -2216,17 +2259,22 @@ class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> { }; -class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> { +class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> { public: - LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* value) { - inputs_[0] = obj; - inputs_[1] = key; - inputs_[2] = value; + LStoreKeyedGeneric(LOperand* context, + LOperand* obj, + LOperand* key, + LOperand* value) { + inputs_[0] = context; + inputs_[1] = obj; + inputs_[2] = key; + inputs_[3] = value; } - LOperand* object() { return inputs_[0]; } - LOperand* key() { return inputs_[1]; } - LOperand* value() { return inputs_[2]; } + LOperand* context() { return inputs_[0]; } + LOperand* object() { return inputs_[1]; } + LOperand* key() { return inputs_[2]; } + LOperand* value() { return inputs_[3]; } DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic") DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric) @@ -2237,14 +2285,17 @@ class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> { }; -class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 1, 1> { +class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 1> { public: LTransitionElementsKind(LOperand* object, + LOperand* context, LOperand* new_map_temp) { inputs_[0] = object; + inputs_[1] = context; temps_[0] = new_map_temp; } + LOperand* context() { return inputs_[1]; } LOperand* object() { return inputs_[0]; } LOperand* new_map_temp() { return temps_[0]; } @@ -2254,8 +2305,10 @@ class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 1, 1> { virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; - Handle<Map> original_map() { return hydrogen()->original_map(); } - Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); } + Handle<Map> original_map() { return hydrogen()->original_map().handle(); } + Handle<Map> transitioned_map() { + return hydrogen()->transitioned_map().handle(); + } ElementsKind from_kind() { return hydrogen()->from_kind(); } ElementsKind to_kind() { return hydrogen()->to_kind(); } }; @@ -2277,15 +2330,17 @@ class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 1> { }; -class LStringAdd V8_FINAL : public LTemplateInstruction<1, 2, 0> { +class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> { public: - LStringAdd(LOperand* left, LOperand* right) { - inputs_[0] = left; - inputs_[1] = right; + LStringAdd(LOperand* context, LOperand* left, LOperand* right) { + inputs_[0] = context; + inputs_[1] = left; + inputs_[2] = right; } - LOperand* left() { return inputs_[0]; } - LOperand* right() { return inputs_[1]; } + LOperand* context() { return inputs_[0]; } + LOperand* left() { return inputs_[1]; } + LOperand* right() { return inputs_[2]; } DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add") DECLARE_HYDROGEN_ACCESSOR(StringAdd) @@ -2293,28 +2348,32 @@ class LStringAdd V8_FINAL : public LTemplateInstruction<1, 2, 0> { -class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 2, 0> { +class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> { public: - LStringCharCodeAt(LOperand* string, LOperand* index) { - inputs_[0] = string; - inputs_[1] = index; + LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) { + inputs_[0] = context; + inputs_[1] = string; + inputs_[2] = index; } - LOperand* string() { return inputs_[0]; } - LOperand* index() { return inputs_[1]; } + LOperand* context() { return inputs_[0]; } + LOperand* string() { return inputs_[1]; } + LOperand* index() { return inputs_[2]; } DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at") DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt) }; -class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 1, 0> { +class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> { public: - explicit LStringCharFromCode(LOperand* char_code) { - inputs_[0] = char_code; + explicit LStringCharFromCode(LOperand* context, LOperand* char_code) { + inputs_[0] = context; + inputs_[1] = char_code; } - LOperand* char_code() { return inputs_[0]; } + LOperand* context() { return inputs_[0]; } + LOperand* char_code() { return inputs_[1]; } DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code") DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode) @@ -2427,12 +2486,17 @@ class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> { class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 2> { public: - LAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) { + LAllocate(LOperand* context, + LOperand* size, + LOperand* temp1, + LOperand* temp2) { + inputs_[0] = context; inputs_[1] = size; temps_[0] = temp1; temps_[1] = temp2; } + LOperand* context() { return inputs_[0]; } LOperand* size() { return inputs_[1]; } LOperand* temp1() { return temps_[0]; } LOperand* temp2() { return temps_[1]; } @@ -2442,15 +2506,27 @@ class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 2> { }; -class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 0, 0> { +class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> { public: + explicit LRegExpLiteral(LOperand* context) { + inputs_[0] = context; + } + + LOperand* context() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal") DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral) }; -class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 0, 0> { +class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> { public: + explicit LFunctionLiteral(LOperand* context) { + inputs_[0] = context; + } + + LOperand* context() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal") DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral) }; @@ -2469,13 +2545,15 @@ class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> { }; -class LTypeof V8_FINAL : public LTemplateInstruction<1, 1, 0> { +class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> { public: - explicit LTypeof(LOperand* value) { - inputs_[0] = value; + LTypeof(LOperand* context, LOperand* value) { + inputs_[0] = context; + inputs_[1] = value; } - LOperand* value() { return inputs_[0]; } + LOperand* context() { return inputs_[0]; } + LOperand* value() { return inputs_[1]; } DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof") }; @@ -2522,8 +2600,14 @@ class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> { }; -class LStackCheck V8_FINAL : public LTemplateInstruction<0, 0, 0> { +class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> { public: + explicit LStackCheck(LOperand* context) { + inputs_[0] = context; + } + + LOperand* context() { return inputs_[0]; } + DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check") DECLARE_HYDROGEN_ACCESSOR(StackCheck) @@ -2534,13 +2618,15 @@ class LStackCheck V8_FINAL : public LTemplateInstruction<0, 0, 0> { }; -class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 1, 0> { +class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> { public: - explicit LForInPrepareMap(LOperand* object) { - inputs_[0] = object; + LForInPrepareMap(LOperand* context, LOperand* object) { + inputs_[0] = context; + inputs_[1] = object; } - LOperand* object() { return inputs_[0]; } + LOperand* context() { return inputs_[0]; } + LOperand* object() { return inputs_[1]; } DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map") }; @@ -2596,8 +2682,8 @@ class LPlatformChunk V8_FINAL : public LChunk { LPlatformChunk(CompilationInfo* info, HGraph* graph) : LChunk(info, graph) { } - int GetNextSpillIndex(bool is_double); - LOperand* GetNextSpillSlot(bool is_double); + int GetNextSpillIndex(RegisterKind kind); + LOperand* GetNextSpillSlot(RegisterKind kind); }; @@ -2621,6 +2707,8 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED { // Build the sequence for the graph. LPlatformChunk* Build(); + LInstruction* CheckElideControlInstruction(HControlInstruction* instr); + // Declare methods that deal with the individual node types. #define DECLARE_DO(type) LInstruction* Do##type(H##type* node); HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) @@ -2753,7 +2841,7 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED { LInstruction* DoArithmeticD(Token::Value op, HArithmeticBinaryOperation* instr); LInstruction* DoArithmeticT(Token::Value op, - HArithmeticBinaryOperation* instr); + HBinaryOperation* instr); LPlatformChunk* chunk_; CompilationInfo* info_; diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc index a85b0d8034..e0cb1ba824 100644 --- a/deps/v8/src/mips/macro-assembler-mips.cc +++ b/deps/v8/src/mips/macro-assembler-mips.cc @@ -35,6 +35,7 @@ #include "codegen.h" #include "cpu-profiler.h" #include "debug.h" +#include "isolate-inl.h" #include "runtime.h" namespace v8 { @@ -248,10 +249,6 @@ void MacroAssembler::RecordWrite(Register object, SmiCheck smi_check) { ASSERT(!AreAliased(object, address, value, t8)); ASSERT(!AreAliased(object, address, value, t9)); - // The compiled code assumes that record write doesn't change the - // context register, so we check that none of the clobbered - // registers are cp. - ASSERT(!address.is(cp) && !value.is(cp)); if (emit_debug_code()) { lw(at, MemOperand(address)); @@ -3220,11 +3217,10 @@ void MacroAssembler::CopyBytes(Register src, Register dst, Register length, Register scratch) { - Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done; + Label align_loop_1, word_loop, byte_loop, byte_loop_1, done; // Align src before copying in word size chunks. - bind(&align_loop); - Branch(&done, eq, length, Operand(zero_reg)); + Branch(&byte_loop, le, length, Operand(kPointerSize)); bind(&align_loop_1); And(scratch, src, kPointerSize - 1); Branch(&word_loop, eq, scratch, Operand(zero_reg)); @@ -3233,7 +3229,7 @@ void MacroAssembler::CopyBytes(Register src, sb(scratch, MemOperand(dst)); Addu(dst, dst, 1); Subu(length, length, Operand(1)); - Branch(&byte_loop_1, ne, length, Operand(zero_reg)); + Branch(&align_loop_1, ne, length, Operand(zero_reg)); // Copy bytes in word size chunks. bind(&word_loop); @@ -3847,12 +3843,14 @@ static int AddressOffset(ExternalReference ref0, ExternalReference ref1) { } -void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function, - Address function_address, - ExternalReference thunk_ref, - Register thunk_last_arg, - int stack_space, - int return_value_offset_from_fp) { +void MacroAssembler::CallApiFunctionAndReturn( + ExternalReference function, + Address function_address, + ExternalReference thunk_ref, + Register thunk_last_arg, + int stack_space, + MemOperand return_value_operand, + MemOperand* context_restore_operand) { ExternalReference next_address = ExternalReference::handle_scope_next_address(isolate()); const int kNextOffset = 0; @@ -3915,12 +3913,13 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function, } Label promote_scheduled_exception; + Label exception_handled; Label delete_allocated_handles; Label leave_exit_frame; Label return_value_loaded; // Load value from ReturnValue. - lw(v0, MemOperand(fp, return_value_offset_from_fp*kPointerSize)); + lw(v0, return_value_operand); bind(&return_value_loaded); // No more valid handles (the result handle was the last one). Restore @@ -3941,14 +3940,23 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function, li(at, Operand(ExternalReference::scheduled_exception_address(isolate()))); lw(t1, MemOperand(at)); Branch(&promote_scheduled_exception, ne, t0, Operand(t1)); + bind(&exception_handled); + + bool restore_context = context_restore_operand != NULL; + if (restore_context) { + lw(cp, *context_restore_operand); + } li(s0, Operand(stack_space)); - LeaveExitFrame(false, s0, true); + LeaveExitFrame(false, s0, !restore_context, EMIT_RETURN); bind(&promote_scheduled_exception); - TailCallExternalReference( - ExternalReference(Runtime::kPromoteScheduledException, isolate()), - 0, - 1); + { + FrameScope frame(this, StackFrame::INTERNAL); + CallExternalReference( + ExternalReference(Runtime::kPromoteScheduledException, isolate()), + 0); + } + jmp(&exception_handled); // HandleScope limit has changed. Delete allocated extensions. bind(&delete_allocated_handles); @@ -4125,7 +4133,8 @@ void MacroAssembler::SubuAndCheckForOverflow(Register dst, void MacroAssembler::CallRuntime(const Runtime::Function* f, - int num_arguments) { + int num_arguments, + SaveFPRegsMode save_doubles) { // All parameters are on the stack. v0 has the return value after call. // If the expected number of arguments of the runtime function is @@ -4142,25 +4151,11 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, // smarter. PrepareCEntryArgs(num_arguments); PrepareCEntryFunction(ExternalReference(f, isolate())); - CEntryStub stub(1); - CallStub(&stub); -} - - -void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) { - const Runtime::Function* function = Runtime::FunctionForId(id); - PrepareCEntryArgs(function->nargs); - PrepareCEntryFunction(ExternalReference(function, isolate())); - CEntryStub stub(1, kSaveFPRegs); + CEntryStub stub(1, save_doubles); CallStub(&stub); } -void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) { - CallRuntime(Runtime::FunctionForId(fid), num_arguments); -} - - void MacroAssembler::CallExternalReference(const ExternalReference& ext, int num_arguments, BranchDelaySlot bd) { @@ -4591,6 +4586,40 @@ void MacroAssembler::LoadNumberAsInt32(Register object, } +void MacroAssembler::Prologue(PrologueFrameMode frame_mode) { + if (frame_mode == BUILD_STUB_FRAME) { + Push(ra, fp, cp); + Push(Smi::FromInt(StackFrame::STUB)); + // Adjust FP to point to saved FP. + Addu(fp, sp, Operand(2 * kPointerSize)); + } else { + PredictableCodeSizeScope predictible_code_size_scope( + this, kNoCodeAgeSequenceLength * Assembler::kInstrSize); + // The following three instructions must remain together and unmodified + // for code aging to work properly. + if (isolate()->IsCodePreAgingActive()) { + // Pre-age the code. + Code* stub = Code::GetPreAgedCodeAgeStub(isolate()); + nop(Assembler::CODE_AGE_MARKER_NOP); + // Save the function's original return address + // (it will be clobbered by Call(t9)) + mov(at, ra); + // Load the stub address to t9 and call it + li(t9, + Operand(reinterpret_cast<uint32_t>(stub->instruction_start()))); + Call(t9); + // Record the stub address in the empty space for GetCodeAgeAndParity() + dd(reinterpret_cast<uint32_t>(stub->instruction_start())); + } else { + Push(ra, fp, cp, a1); + nop(Assembler::CODE_AGE_SEQUENCE_NOP); + // Adjust fp to point to caller's fp. + Addu(fp, sp, Operand(2 * kPointerSize)); + } + } +} + + void MacroAssembler::EnterFrame(StackFrame::Type type) { addiu(sp, sp, -5 * kPointerSize); li(t8, Operand(Smi::FromInt(type))); @@ -4684,6 +4713,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count, + bool restore_context, bool do_return) { // Optionally restore all double registers. if (save_doubles) { @@ -4700,9 +4730,12 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, sw(zero_reg, MemOperand(t8)); // Restore current context from top and clear it in debug mode. - li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); - lw(cp, MemOperand(t8)); + if (restore_context) { + li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); + lw(cp, MemOperand(t8)); + } #ifdef DEBUG + li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); sw(a3, MemOperand(t8)); #endif @@ -4929,6 +4962,86 @@ void MacroAssembler::JumpIfNotHeapNumber(Register object, } +void MacroAssembler::LookupNumberStringCache(Register object, + Register result, + Register scratch1, + Register scratch2, + Register scratch3, + Label* not_found) { + // Use of registers. Register result is used as a temporary. + Register number_string_cache = result; + Register mask = scratch3; + + // Load the number string cache. + LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); + + // Make the hash mask from the length of the number string cache. It + // contains two elements (number and string) for each cache entry. + lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset)); + // Divide length by two (length is a smi). + sra(mask, mask, kSmiTagSize + 1); + Addu(mask, mask, -1); // Make mask. + + // Calculate the entry in the number string cache. The hash value in the + // number string cache for smis is just the smi value, and the hash for + // doubles is the xor of the upper and lower words. See + // Heap::GetNumberStringCache. + Label is_smi; + Label load_result_from_cache; + JumpIfSmi(object, &is_smi); + CheckMap(object, + scratch1, + Heap::kHeapNumberMapRootIndex, + not_found, + DONT_DO_SMI_CHECK); + + STATIC_ASSERT(8 == kDoubleSize); + Addu(scratch1, + object, + Operand(HeapNumber::kValueOffset - kHeapObjectTag)); + lw(scratch2, MemOperand(scratch1, kPointerSize)); + lw(scratch1, MemOperand(scratch1, 0)); + Xor(scratch1, scratch1, Operand(scratch2)); + And(scratch1, scratch1, Operand(mask)); + + // Calculate address of entry in string cache: each entry consists + // of two pointer sized fields. + sll(scratch1, scratch1, kPointerSizeLog2 + 1); + Addu(scratch1, number_string_cache, scratch1); + + Register probe = mask; + lw(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize)); + JumpIfSmi(probe, not_found); + ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset)); + ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset)); + BranchF(&load_result_from_cache, NULL, eq, f12, f14); + Branch(not_found); + + bind(&is_smi); + Register scratch = scratch1; + sra(scratch, object, 1); // Shift away the tag. + And(scratch, mask, Operand(scratch)); + + // Calculate address of entry in string cache: each entry consists + // of two pointer sized fields. + sll(scratch, scratch, kPointerSizeLog2 + 1); + Addu(scratch, number_string_cache, scratch); + + // Check if the entry is the smi we are looking for. + lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize)); + Branch(not_found, ne, object, Operand(probe)); + + // Get the result from the cache. + bind(&load_result_from_cache); + lw(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize)); + + IncrementCounter(isolate()->counters()->number_to_string_native(), + 1, + scratch1, + scratch2); +} + + void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings( Register first, Register second, @@ -5492,23 +5605,24 @@ void MacroAssembler::ClampDoubleToUint8(Register result_reg, void MacroAssembler::TestJSArrayForAllocationMemento( Register receiver_reg, Register scratch_reg, + Label* no_memento_found, Condition cond, Label* allocation_memento_present) { - Label no_memento_available; ExternalReference new_space_start = ExternalReference::new_space_start(isolate()); ExternalReference new_space_allocation_top = ExternalReference::new_space_allocation_top_address(isolate()); Addu(scratch_reg, receiver_reg, Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag)); - Branch(&no_memento_available, lt, scratch_reg, Operand(new_space_start)); + Branch(no_memento_found, lt, scratch_reg, Operand(new_space_start)); li(at, Operand(new_space_allocation_top)); lw(at, MemOperand(at)); - Branch(&no_memento_available, gt, scratch_reg, Operand(at)); + Branch(no_memento_found, gt, scratch_reg, Operand(at)); lw(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize)); - Branch(allocation_memento_present, cond, scratch_reg, - Operand(Handle<Map>(isolate()->heap()->allocation_memento_map()))); - bind(&no_memento_available); + if (allocation_memento_present) { + Branch(allocation_memento_present, cond, scratch_reg, + Operand(isolate()->factory()->allocation_memento_map())); + } } diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h index 75ded88490..0805bb9670 100644 --- a/deps/v8/src/mips/macro-assembler-mips.h +++ b/deps/v8/src/mips/macro-assembler-mips.h @@ -51,6 +51,12 @@ class JumpTarget; // MIPS generated code calls C code, it must be via t9 register. +// Flags used for LeaveExitFrame function. +enum LeaveExitFrameMode { + EMIT_RETURN = true, + NO_EMIT_RETURN = false +}; + // Flags used for AllocateHeapNumber enum TaggingMode { // Tag the result. @@ -848,7 +854,8 @@ class MacroAssembler: public Assembler { // Leave the current exit frame. void LeaveExitFrame(bool save_doubles, Register arg_count, - bool do_return = false); + bool restore_context, + bool do_return = NO_EMIT_RETURN); // Get the actual activation frame alignment for target environment. static int ActivationFrameAlignment(); @@ -1194,11 +1201,18 @@ class MacroAssembler: public Assembler { void CallJSExitStub(CodeStub* stub); // Call a runtime routine. - void CallRuntime(const Runtime::Function* f, int num_arguments); - void CallRuntimeSaveDoubles(Runtime::FunctionId id); + void CallRuntime(const Runtime::Function* f, + int num_arguments, + SaveFPRegsMode save_doubles = kDontSaveFPRegs); + void CallRuntimeSaveDoubles(Runtime::FunctionId id) { + const Runtime::Function* function = Runtime::FunctionForId(id); + CallRuntime(function, function->nargs, kSaveFPRegs); + } // Convenience function: Same as above, but takes the fid instead. - void CallRuntime(Runtime::FunctionId fid, int num_arguments); + void CallRuntime(Runtime::FunctionId id, int num_arguments) { + CallRuntime(Runtime::FunctionForId(id), num_arguments); + } // Convenience function: call an external reference. void CallExternalReference(const ExternalReference& ext, @@ -1271,7 +1285,8 @@ class MacroAssembler: public Assembler { ExternalReference thunk_ref, Register thunk_last_arg, int stack_space, - int return_value_offset_from_fp); + MemOperand return_value_operand, + MemOperand* context_restore_operand); // Jump to the builtin routine. void JumpToExternalReference(const ExternalReference& builtin, @@ -1419,6 +1434,18 @@ class MacroAssembler: public Assembler { // ------------------------------------------------------------------------- // String utilities. + // Generate code to do a lookup in the number string cache. If the number in + // the register object is found in the cache the generated code falls through + // with the result in the result register. The object and the result register + // can be the same. If the number is not found in the cache the code jumps to + // the label not_found with only the content of register object unchanged. + void LookupNumberStringCache(Register object, + Register result, + Register scratch1, + Register scratch2, + Register scratch3, + Label* not_found); + // Checks if both instance types are sequential ASCII strings and jumps to // label if either is not. void JumpIfBothInstanceTypesAreNotSequentialAscii( @@ -1471,6 +1498,9 @@ class MacroAssembler: public Assembler { And(reg, reg, Operand(mask)); } + // Generates function and stub prologue code. + void Prologue(PrologueFrameMode frame_mode); + // Activation support. void EnterFrame(StackFrame::Type type); void LeaveFrame(StackFrame::Type type); @@ -1493,11 +1523,22 @@ class MacroAssembler: public Assembler { // to another type. // On entry, receiver_reg should point to the array object. // scratch_reg gets clobbered. - // If allocation info is present, jump to allocation_info_present - void TestJSArrayForAllocationMemento(Register receiver_reg, - Register scratch_reg, - Condition cond, - Label* allocation_memento_present); + // If allocation info is present, jump to allocation_memento_present. + void TestJSArrayForAllocationMemento( + Register receiver_reg, + Register scratch_reg, + Label* no_memento_found, + Condition cond = al, + Label* allocation_memento_present = NULL); + + void JumpIfJSArrayHasAllocationMemento(Register receiver_reg, + Register scratch_reg, + Label* memento_found) { + Label no_memento_found; + TestJSArrayForAllocationMemento(receiver_reg, scratch_reg, + &no_memento_found, eq, memento_found); + bind(&no_memento_found); + } private: void CallCFunctionHelper(Register function, diff --git a/deps/v8/src/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/mips/regexp-macro-assembler-mips.cc index 1a04fd1029..49dec3c024 100644 --- a/deps/v8/src/mips/regexp-macro-assembler-mips.cc +++ b/deps/v8/src/mips/regexp-macro-assembler-mips.cc @@ -1063,15 +1063,56 @@ bool RegExpMacroAssemblerMIPS::CanReadUnaligned() { // Private methods: void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) { - static const int num_arguments = 3; - __ PrepareCallCFunction(num_arguments, scratch); + int stack_alignment = OS::ActivationFrameAlignment(); + + // Align the stack pointer and save the original sp value on the stack. + __ mov(scratch, sp); + __ Subu(sp, sp, Operand(kPointerSize)); + ASSERT(IsPowerOf2(stack_alignment)); + __ And(sp, sp, Operand(-stack_alignment)); + __ sw(scratch, MemOperand(sp)); + __ mov(a2, frame_pointer()); // Code* of self. __ li(a1, Operand(masm_->CodeObject()), CONSTANT_SIZE); - // a0 becomes return address pointer. + + // We need to make room for the return address on the stack. + ASSERT(IsAligned(stack_alignment, kPointerSize)); + __ Subu(sp, sp, Operand(stack_alignment)); + + // Stack pointer now points to cell where return address is to be written. + // Arguments are in registers, meaning we teat the return address as + // argument 5. Since DirectCEntryStub will handleallocating space for the C + // argument slots, we don't need to care about that here. This is how the + // stack will look (sp meaning the value of sp at this moment): + // [sp + 3] - empty slot if needed for alignment. + // [sp + 2] - saved sp. + // [sp + 1] - second word reserved for return value. + // [sp + 0] - first word reserved for return value. + + // a0 will point to the return address, placed by DirectCEntry. + __ mov(a0, sp); + ExternalReference stack_guard_check = ExternalReference::re_check_stack_guard_state(masm_->isolate()); - CallCFunctionUsingStub(stack_guard_check, num_arguments); + __ li(t9, Operand(stack_guard_check)); + DirectCEntryStub stub; + stub.GenerateCall(masm_, t9); + + // DirectCEntryStub allocated space for the C argument slots so we have to + // drop them with the return address from the stack with loading saved sp. + // At this point stack must look: + // [sp + 7] - empty slot if needed for alignment. + // [sp + 6] - saved sp. + // [sp + 5] - second word reserved for return value. + // [sp + 4] - first word reserved for return value. + // [sp + 3] - C argument slot. + // [sp + 2] - C argument slot. + // [sp + 1] - C argument slot. + // [sp + 0] - C argument slot. + __ lw(sp, MemOperand(sp, stack_alignment + kCArgsSlotsSize)); + + __ li(code_pointer(), Operand(masm_->CodeObject())); } @@ -1276,21 +1317,6 @@ void RegExpMacroAssemblerMIPS::CheckStackLimit() { } -void RegExpMacroAssemblerMIPS::CallCFunctionUsingStub( - ExternalReference function, - int num_arguments) { - // Must pass all arguments in registers. The stub pushes on the stack. - ASSERT(num_arguments <= 4); - __ li(code_pointer(), Operand(function)); - RegExpCEntryStub stub; - __ CallStub(&stub); - if (OS::ActivationFrameAlignment() != 0) { - __ lw(sp, MemOperand(sp, 16)); - } - __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE); -} - - void RegExpMacroAssemblerMIPS::LoadCurrentCharacterUnchecked(int cp_offset, int characters) { Register offset = current_input_offset(); @@ -1312,23 +1338,6 @@ void RegExpMacroAssemblerMIPS::LoadCurrentCharacterUnchecked(int cp_offset, } -void RegExpCEntryStub::Generate(MacroAssembler* masm_) { - int stack_alignment = OS::ActivationFrameAlignment(); - if (stack_alignment < kPointerSize) stack_alignment = kPointerSize; - // Stack is already aligned for call, so decrement by alignment - // to make room for storing the return address. - __ Subu(sp, sp, Operand(stack_alignment + kCArgsSlotsSize)); - const int return_address_offset = kCArgsSlotsSize; - __ Addu(a0, sp, return_address_offset); - __ sw(ra, MemOperand(a0, 0)); - __ mov(t9, t1); - __ Call(t9); - __ lw(ra, MemOperand(sp, return_address_offset)); - __ Addu(sp, sp, Operand(stack_alignment + kCArgsSlotsSize)); - __ Jump(ra); -} - - #undef __ #endif // V8_INTERPRETED_REGEXP diff --git a/deps/v8/src/mips/regexp-macro-assembler-mips.h b/deps/v8/src/mips/regexp-macro-assembler-mips.h index 86ae4d45ee..063582c648 100644 --- a/deps/v8/src/mips/regexp-macro-assembler-mips.h +++ b/deps/v8/src/mips/regexp-macro-assembler-mips.h @@ -217,14 +217,6 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler { // and increments it by a word size. inline void Pop(Register target); - // Calls a C function and cleans up the frame alignment done by - // by FrameAlign. The called function *is* allowed to trigger a garbage - // collection, but may not take more than four arguments (no arguments - // passed on the stack), and the first argument will be a pointer to the - // return address. - inline void CallCFunctionUsingStub(ExternalReference function, - int num_arguments); - Isolate* isolate() const { return masm_->isolate(); } MacroAssembler* masm_; diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc index ea8b65948a..5a96efe9c1 100644 --- a/deps/v8/src/mips/simulator-mips.cc +++ b/deps/v8/src/mips/simulator-mips.cc @@ -2274,9 +2274,13 @@ void Simulator::DecodeTypeRegister(Instruction* instr) { break; case DIV: // Divide by zero and overflow was not checked in the configuration - // step - div and divu do not raise exceptions. On division by 0 and - // on overflow (INT_MIN/-1), the result will be UNPREDICTABLE. - if (rt != 0 && !(rs == INT_MIN && rt == -1)) { + // step - div and divu do not raise exceptions. On division by 0 + // the result will be UNPREDICTABLE. On overflow (INT_MIN/-1), + // return INT_MIN which is what the hardware does. + if (rs == INT_MIN && rt == -1) { + set_register(LO, INT_MIN); + set_register(HI, 0); + } else if (rt != 0) { set_register(LO, rs / rt); set_register(HI, rs % rt); } diff --git a/deps/v8/src/mips/stub-cache-mips.cc b/deps/v8/src/mips/stub-cache-mips.cc index 58452cad1b..471c25ef82 100644 --- a/deps/v8/src/mips/stub-cache-mips.cc +++ b/deps/v8/src/mips/stub-cache-mips.cc @@ -374,30 +374,26 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm, Register receiver, Register scratch1, Register scratch2, - Label* miss, - bool support_wrappers) { + Label* miss) { Label check_wrapper; // Check if the object is a string leaving the instance type in the // scratch1 register. - GenerateStringCheck(masm, receiver, scratch1, scratch2, miss, - support_wrappers ? &check_wrapper : miss); + GenerateStringCheck(masm, receiver, scratch1, scratch2, miss, &check_wrapper); // Load length directly from the string. __ Ret(USE_DELAY_SLOT); __ lw(v0, FieldMemOperand(receiver, String::kLengthOffset)); - if (support_wrappers) { - // Check if the object is a JSValue wrapper. - __ bind(&check_wrapper); - __ Branch(miss, ne, scratch1, Operand(JS_VALUE_TYPE)); + // Check if the object is a JSValue wrapper. + __ bind(&check_wrapper); + __ Branch(miss, ne, scratch1, Operand(JS_VALUE_TYPE)); - // Unwrap the value and check if the wrapped value is a string. - __ lw(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset)); - GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss); - __ Ret(USE_DELAY_SLOT); - __ lw(v0, FieldMemOperand(scratch1, String::kLengthOffset)); - } + // Unwrap the value and check if the wrapped value is a string. + __ lw(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset)); + GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss); + __ Ret(USE_DELAY_SLOT); + __ lw(v0, FieldMemOperand(scratch1, String::kLengthOffset)); } @@ -429,7 +425,7 @@ static void GenerateCheckPropertyCell(MacroAssembler* masm, } -void BaseStoreStubCompiler::GenerateNegativeHolderLookup( +void StoreStubCompiler::GenerateNegativeHolderLookup( MacroAssembler* masm, Handle<JSObject> holder, Register holder_reg, @@ -448,19 +444,19 @@ void BaseStoreStubCompiler::GenerateNegativeHolderLookup( // Generate StoreTransition code, value is passed in a0 register. // After executing generated code, the receiver_reg and name_reg // may be clobbered. -void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm, - Handle<JSObject> object, - LookupResult* lookup, - Handle<Map> transition, - Handle<Name> name, - Register receiver_reg, - Register storage_reg, - Register value_reg, - Register scratch1, - Register scratch2, - Register scratch3, - Label* miss_label, - Label* slow) { +void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm, + Handle<JSObject> object, + LookupResult* lookup, + Handle<Map> transition, + Handle<Name> name, + Register receiver_reg, + Register storage_reg, + Register value_reg, + Register scratch1, + Register scratch2, + Register scratch3, + Label* miss_label, + Label* slow) { // a0 : value. Label exit; @@ -612,15 +608,15 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm, // When leaving generated code after success, the receiver_reg and name_reg // may be clobbered. Upon branch to miss_label, the receiver and name // registers have their original values. -void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm, - Handle<JSObject> object, - LookupResult* lookup, - Register receiver_reg, - Register name_reg, - Register value_reg, - Register scratch1, - Register scratch2, - Label* miss_label) { +void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm, + Handle<JSObject> object, + LookupResult* lookup, + Register receiver_reg, + Register name_reg, + Register value_reg, + Register scratch1, + Register scratch2, + Label* miss_label) { // a0 : value Label exit; @@ -733,9 +729,9 @@ void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm, } -void BaseStoreStubCompiler::GenerateRestoreName(MacroAssembler* masm, - Label* label, - Handle<Name> name) { +void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm, + Label* label, + Handle<Name> name) { if (!label->is_unused()) { __ bind(label); __ li(this->name(), Operand(name)); @@ -833,25 +829,26 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm) { static void GenerateFastApiDirectCall(MacroAssembler* masm, const CallOptimization& optimization, - int argc) { + int argc, + bool restore_context) { // ----------- S t a t e ------------- - // -- sp[0] : holder (set by CheckPrototypes) - // -- sp[4] : callee JS function - // -- sp[8] : call data - // -- sp[12] : isolate - // -- sp[16] : ReturnValue default value - // -- sp[20] : ReturnValue - // -- sp[24] : last JS argument + // -- sp[0] - sp[24] : FunctionCallbackInfo, incl. + // : holder (set by CheckPrototypes) + // -- sp[28] : last JS argument // -- ... - // -- sp[(argc + 5) * 4] : first JS argument - // -- sp[(argc + 6) * 4] : receiver + // -- sp[(argc + 6) * 4] : first JS argument + // -- sp[(argc + 7) * 4] : receiver // ----------------------------------- + typedef FunctionCallbackArguments FCA; + // Save calling context. + __ sw(cp, MemOperand(sp, FCA::kContextSaveIndex * kPointerSize)); // Get the function and setup the context. Handle<JSFunction> function = optimization.constant_function(); __ LoadHeapObject(t1, function); __ lw(cp, FieldMemOperand(t1, JSFunction::kContextOffset)); + __ sw(t1, MemOperand(sp, FCA::kCalleeIndex * kPointerSize)); - // Pass the additional arguments. + // Construct the FunctionCallbackInfo. Handle<CallHandlerInfo> api_call_info = optimization.api_call_info(); Handle<Object> call_data(api_call_info->data(), masm->isolate()); if (masm->isolate()->heap()->InNewSpace(*call_data)) { @@ -860,18 +857,18 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm, } else { __ li(t2, call_data); } - + // Store call data. + __ sw(t2, MemOperand(sp, FCA::kDataIndex * kPointerSize)); + // Store isolate. __ li(t3, Operand(ExternalReference::isolate_address(masm->isolate()))); - // Store JS function, call data, isolate ReturnValue default and ReturnValue. - __ sw(t1, MemOperand(sp, 1 * kPointerSize)); - __ sw(t2, MemOperand(sp, 2 * kPointerSize)); - __ sw(t3, MemOperand(sp, 3 * kPointerSize)); + __ sw(t3, MemOperand(sp, FCA::kIsolateIndex * kPointerSize)); + // Store ReturnValue default and ReturnValue. __ LoadRoot(t1, Heap::kUndefinedValueRootIndex); - __ sw(t1, MemOperand(sp, 4 * kPointerSize)); - __ sw(t1, MemOperand(sp, 5 * kPointerSize)); + __ sw(t1, MemOperand(sp, FCA::kReturnValueOffset * kPointerSize)); + __ sw(t1, MemOperand(sp, FCA::kReturnValueDefaultValueIndex * kPointerSize)); // Prepare arguments. - __ Addu(a2, sp, Operand(5 * kPointerSize)); + __ Move(a2, sp); // Allocate the v8::Arguments structure in the arguments' space since // it's not controlled by GC. @@ -880,19 +877,18 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm, FrameScope frame_scope(masm, StackFrame::MANUAL); __ EnterExitFrame(false, kApiStackSpace); - // a0 = v8::Arguments& + // a0 = FunctionCallbackInfo& // Arguments is built at sp + 1 (sp is a reserved spot for ra). __ Addu(a0, sp, kPointerSize); - - // v8::Arguments::implicit_args_ + // FunctionCallbackInfo::implicit_args_ __ sw(a2, MemOperand(a0, 0 * kPointerSize)); - // v8::Arguments::values_ - __ Addu(t0, a2, Operand(argc * kPointerSize)); + // FunctionCallbackInfo::values_ + __ Addu(t0, a2, Operand((kFastApiCallArguments - 1 + argc) * kPointerSize)); __ sw(t0, MemOperand(a0, 1 * kPointerSize)); - // v8::Arguments::length_ = argc + // FunctionCallbackInfo::length_ = argc __ li(t0, Operand(argc)); __ sw(t0, MemOperand(a0, 2 * kPointerSize)); - // v8::Arguments::is_construct_call = 0 + // FunctionCallbackInfo::is_construct_call = 0 __ sw(zero_reg, MemOperand(a0, 3 * kPointerSize)); const int kStackUnwindSpace = argc + kFastApiCallArguments + 1; @@ -910,12 +906,19 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm, masm->isolate()); AllowExternalCallThatCantCauseGC scope(masm); + MemOperand context_restore_operand( + fp, (2 + FCA::kContextSaveIndex) * kPointerSize); + MemOperand return_value_operand( + fp, (2 + FCA::kReturnValueOffset) * kPointerSize); + __ CallApiFunctionAndReturn(ref, function_address, thunk_ref, a1, kStackUnwindSpace, - kFastApiCallArguments + 1); + return_value_operand, + restore_context ? + &context_restore_operand : NULL); } @@ -929,11 +932,12 @@ static void GenerateFastApiCall(MacroAssembler* masm, ASSERT(optimization.is_simple_api_call()); ASSERT(!receiver.is(scratch)); + typedef FunctionCallbackArguments FCA; const int stack_space = kFastApiCallArguments + argc + 1; // Assign stack space for the call arguments. __ Subu(sp, sp, Operand(stack_space * kPointerSize)); // Write holder to stack frame. - __ sw(receiver, MemOperand(sp, 0)); + __ sw(receiver, MemOperand(sp, FCA::kHolderIndex * kPointerSize)); // Write receiver to stack frame. int index = stack_space - 1; __ sw(receiver, MemOperand(sp, index * kPointerSize)); @@ -944,7 +948,7 @@ static void GenerateFastApiCall(MacroAssembler* masm, __ sw(receiver, MemOperand(sp, index-- * kPointerSize)); } - GenerateFastApiDirectCall(masm, optimization, argc); + GenerateFastApiDirectCall(masm, optimization, argc, true); } @@ -1058,7 +1062,8 @@ class CallInterceptorCompiler BASE_EMBEDDED { // Invoke function. if (can_do_fast_api_call) { - GenerateFastApiDirectCall(masm, optimization, arguments_.immediate()); + GenerateFastApiDirectCall( + masm, optimization, arguments_.immediate(), false); } else { CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_) ? CALL_AS_FUNCTION @@ -1199,8 +1204,9 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object, Register reg = object_reg; int depth = 0; + typedef FunctionCallbackArguments FCA; if (save_at_depth == depth) { - __ sw(reg, MemOperand(sp)); + __ sw(reg, MemOperand(sp, FCA::kHolderIndex * kPointerSize)); } // Check the maps in the prototype chain. @@ -1258,7 +1264,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object, } if (save_at_depth == depth) { - __ sw(reg, MemOperand(sp)); + __ sw(reg, MemOperand(sp, FCA::kHolderIndex * kPointerSize)); } // Go to the next object in the prototype chain. @@ -1290,9 +1296,9 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object, } -void BaseLoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, - Label* success, - Label* miss) { +void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, + Label* success, + Label* miss) { if (!miss->is_unused()) { __ Branch(success); __ bind(miss); @@ -1301,9 +1307,9 @@ void BaseLoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, } -void BaseStoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, - Label* success, - Label* miss) { +void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, + Label* success, + Label* miss) { if (!miss->is_unused()) { __ b(success); GenerateRestoreName(masm(), miss, name); @@ -1312,7 +1318,7 @@ void BaseStoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, } -Register BaseLoadStubCompiler::CallbackHandlerFrontend( +Register LoadStubCompiler::CallbackHandlerFrontend( Handle<JSObject> object, Register object_reg, Handle<JSObject> holder, @@ -1358,7 +1364,7 @@ Register BaseLoadStubCompiler::CallbackHandlerFrontend( } -void BaseLoadStubCompiler::NonexistentHandlerFrontend( +void LoadStubCompiler::NonexistentHandlerFrontend( Handle<JSObject> object, Handle<JSObject> last, Handle<Name> name, @@ -1378,10 +1384,10 @@ void BaseLoadStubCompiler::NonexistentHandlerFrontend( } -void BaseLoadStubCompiler::GenerateLoadField(Register reg, - Handle<JSObject> holder, - PropertyIndex field, - Representation representation) { +void LoadStubCompiler::GenerateLoadField(Register reg, + Handle<JSObject> holder, + PropertyIndex field, + Representation representation) { if (!reg.is(receiver())) __ mov(receiver(), reg); if (kind() == Code::LOAD_IC) { LoadFieldStub stub(field.is_inobject(holder), @@ -1397,36 +1403,36 @@ void BaseLoadStubCompiler::GenerateLoadField(Register reg, } -void BaseLoadStubCompiler::GenerateLoadConstant(Handle<Object> value) { +void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) { // Return the constant value. __ LoadObject(v0, value); __ Ret(); } -void BaseLoadStubCompiler::GenerateLoadCallback( +void LoadStubCompiler::GenerateLoadCallback( const CallOptimization& call_optimization) { GenerateFastApiCall( masm(), call_optimization, receiver(), scratch3(), 0, NULL); } -void BaseLoadStubCompiler::GenerateLoadCallback( +void LoadStubCompiler::GenerateLoadCallback( Register reg, Handle<ExecutableAccessorInfo> callback) { // Build AccessorInfo::args_ list on the stack and push property name below // the exit frame to make GC aware of them and store pointers to them. - STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 0); - STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == -1); - STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == -2); - STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == -3); - STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == -4); - STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == -5); + STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0); + STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1); + STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2); + STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3); + STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4); + STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5); + STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6); ASSERT(!scratch2().is(reg)); ASSERT(!scratch3().is(reg)); ASSERT(!scratch4().is(reg)); __ push(receiver()); - __ mov(scratch2(), sp); // scratch2 = AccessorInfo::args_ if (heap()->InNewSpace(callback->data())) { __ li(scratch3(), callback); __ lw(scratch3(), FieldMemOperand(scratch3(), @@ -1444,6 +1450,7 @@ void BaseLoadStubCompiler::GenerateLoadCallback( __ sw(scratch4(), MemOperand(sp, 2 * kPointerSize)); __ sw(reg, MemOperand(sp, 1 * kPointerSize)); __ sw(name(), MemOperand(sp, 0 * kPointerSize)); + __ Addu(scratch2(), sp, 1 * kPointerSize); __ mov(a2, scratch2()); // Saved in case scratch2 == a1. __ mov(a0, sp); // (first argument - a0) = Handle<Name> @@ -1452,13 +1459,13 @@ void BaseLoadStubCompiler::GenerateLoadCallback( FrameScope frame_scope(masm(), StackFrame::MANUAL); __ EnterExitFrame(false, kApiStackSpace); - // Create AccessorInfo instance on the stack above the exit frame with + // Create PropertyAccessorInfo instance on the stack above the exit frame with // scratch2 (internal::Object** args_) as the data. __ sw(a2, MemOperand(sp, kPointerSize)); // (second argument - a1) = AccessorInfo& __ Addu(a1, sp, kPointerSize); - const int kStackUnwindSpace = kFastApiCallArguments + 1; + const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1; Address getter_address = v8::ToCData<Address>(callback->getter()); ApiFunction fun(getter_address); ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL; @@ -1475,11 +1482,12 @@ void BaseLoadStubCompiler::GenerateLoadCallback( thunk_ref, a2, kStackUnwindSpace, - 6); + MemOperand(fp, 6 * kPointerSize), + NULL); } -void BaseLoadStubCompiler::GenerateLoadInterceptor( +void LoadStubCompiler::GenerateLoadInterceptor( Register holder_reg, Handle<JSObject> object, Handle<JSObject> interceptor_holder, @@ -2558,7 +2566,7 @@ Handle<Code> CallStubCompiler::CompileFastApiCall( CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, a0, a3, t0, name, depth, &miss); - GenerateFastApiDirectCall(masm(), optimization, argc); + GenerateFastApiDirectCall(masm(), optimization, argc, false); __ bind(&miss); FreeSpaceForFastApiCall(masm()); @@ -3011,6 +3019,7 @@ void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name, void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm, + Register receiver, Handle<JSFunction> getter) { // ----------- S t a t e ------------- // -- a0 : receiver @@ -3022,7 +3031,7 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm, if (!getter.is_null()) { // Call the JavaScript getter with the receiver on the stack. - __ push(a0); + __ push(receiver); ParameterCount actual(0); ParameterCount expected(getter); __ InvokeFunction(getter, expected, actual, diff --git a/deps/v8/src/mirror-debugger.js b/deps/v8/src/mirror-debugger.js index 3b360bb5d7..4277136b60 100644 --- a/deps/v8/src/mirror-debugger.js +++ b/deps/v8/src/mirror-debugger.js @@ -117,7 +117,7 @@ function LookupMirror(handle) { * @returns {Mirror} the mirror reflects the undefined value */ function GetUndefinedMirror() { - return MakeMirror(void 0); + return MakeMirror(UNDEFINED); } @@ -482,7 +482,7 @@ ValueMirror.prototype.value = function() { * @extends ValueMirror */ function UndefinedMirror() { - %_CallFunction(this, UNDEFINED_TYPE, void 0, ValueMirror); + %_CallFunction(this, UNDEFINED_TYPE, UNDEFINED, ValueMirror); } inherits(UndefinedMirror, ValueMirror); @@ -957,7 +957,7 @@ FunctionMirror.prototype.scopeCount = function() { FunctionMirror.prototype.scope = function(index) { if (this.resolved()) { - return new ScopeMirror(void 0, this, index); + return new ScopeMirror(UNDEFINED, this, index); } }; @@ -1670,7 +1670,7 @@ FrameMirror.prototype.scopeCount = function() { FrameMirror.prototype.scope = function(index) { - return new ScopeMirror(this, void 0, index); + return new ScopeMirror(this, UNDEFINED, index); }; diff --git a/deps/v8/src/mksnapshot.cc b/deps/v8/src/mksnapshot.cc index 9cf9e2e8a4..95d3daada2 100644 --- a/deps/v8/src/mksnapshot.cc +++ b/deps/v8/src/mksnapshot.cc @@ -43,49 +43,6 @@ using namespace v8; -static const unsigned int kMaxCounters = 256; - -// A single counter in a counter collection. -class Counter { - public: - static const int kMaxNameSize = 64; - int32_t* Bind(const char* name) { - int i; - for (i = 0; i < kMaxNameSize - 1 && name[i]; i++) { - name_[i] = name[i]; - } - name_[i] = '\0'; - return &counter_; - } - private: - int32_t counter_; - uint8_t name_[kMaxNameSize]; -}; - - -// A set of counters and associated information. An instance of this -// class is stored directly in the memory-mapped counters file if -// the --save-counters options is used -class CounterCollection { - public: - CounterCollection() { - magic_number_ = 0xDEADFACE; - max_counters_ = kMaxCounters; - max_name_size_ = Counter::kMaxNameSize; - counters_in_use_ = 0; - } - Counter* GetNextCounter() { - if (counters_in_use_ == kMaxCounters) return NULL; - return &counters_[counters_in_use_++]; - } - private: - uint32_t magic_number_; - uint32_t max_counters_; - uint32_t max_name_size_; - uint32_t counters_in_use_; - Counter counters_[kMaxCounters]; -}; - class Compressor { public: @@ -310,6 +267,7 @@ void DumpException(Handle<Message> message) { int main(int argc, char** argv) { V8::InitializeICU(); + i::Isolate::SetCrashIfDefaultIsolateInitialized(); // By default, log code create information in the snapshot. i::FLAG_log_code = true; @@ -330,7 +288,10 @@ int main(int argc, char** argv) { exit(1); } #endif - Isolate* isolate = Isolate::GetCurrent(); + i::FLAG_logfile_per_isolate = false; + + Isolate* isolate = v8::Isolate::New(); + isolate->Enter(); i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate); i::Serializer::Enable(internal_isolate); Persistent<Context> context; diff --git a/deps/v8/src/msan.h b/deps/v8/src/msan.h new file mode 100644 index 0000000000..484c9fa397 --- /dev/null +++ b/deps/v8/src/msan.h @@ -0,0 +1,49 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// MemorySanitizer support. + +#ifndef V8_MSAN_H_ +#define V8_MSAN_H_ + +#ifndef __has_feature +# define __has_feature(x) 0 +#endif + +#if __has_feature(memory_sanitizer) && !defined(MEMORY_SANITIZER) +# define MEMORY_SANITIZER +#endif + +#ifdef MEMORY_SANITIZER +# include <sanitizer/msan_interface.h> +// Marks a memory range as fully initialized. +# define MSAN_MEMORY_IS_INITIALIZED(p, s) __msan_unpoison((p), (s)) +#else +# define MSAN_MEMORY_IS_INITIALIZED(p, s) +#endif + +#endif // V8_MSAN_H_ diff --git a/deps/v8/src/object-observe.js b/deps/v8/src/object-observe.js index 1035792e8b..9c7ac3889e 100644 --- a/deps/v8/src/object-observe.js +++ b/deps/v8/src/object-observe.js @@ -72,12 +72,12 @@ function ObservationWeakMap(map) { ObservationWeakMap.prototype = { get: function(key) { key = %UnwrapGlobalProxy(key); - if (!IS_SPEC_OBJECT(key)) return void 0; + if (!IS_SPEC_OBJECT(key)) return UNDEFINED; return %WeakCollectionGet(this.map_, key); }, set: function(key, value) { key = %UnwrapGlobalProxy(key); - if (!IS_SPEC_OBJECT(key)) return void 0; + if (!IS_SPEC_OBJECT(key)) return UNDEFINED; %WeakCollectionSet(this.map_, key, value); }, has: function(key) { @@ -284,11 +284,6 @@ function AcceptArgIsValid(arg) { arg.length < 0) return false; - var length = arg.length; - for (var i = 0; i < length; i++) { - if (!IS_STRING(arg[i])) - return false; - } return true; } @@ -497,7 +492,7 @@ function ObjectNotifierPerformChange(changeType, changeFn) { ObjectInfoAddPerformingType(objectInfo, changeType); try { - %_CallFunction(void 0, changeFn); + %_CallFunction(UNDEFINED, changeFn); } finally { ObjectInfoRemovePerformingType(objectInfo, changeType); } @@ -530,7 +525,7 @@ function CallbackDeliverPending(callback) { %MoveArrayContents(callbackInfo, delivered); try { - %_CallFunction(void 0, delivered, callback); + %_CallFunction(UNDEFINED, delivered, callback); } catch (ex) {} return true; } diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc index 5d9e161a7e..6ab2ddffe2 100644 --- a/deps/v8/src/objects-debug.cc +++ b/deps/v8/src/objects-debug.cc @@ -95,6 +95,9 @@ void HeapObject::HeapObjectVerify() { case FIXED_DOUBLE_ARRAY_TYPE: FixedDoubleArray::cast(this)->FixedDoubleArrayVerify(); break; + case CONSTANT_POOL_ARRAY_TYPE: + ConstantPoolArray::cast(this)->ConstantPoolArrayVerify(); + break; case BYTE_ARRAY_TYPE: ByteArray::cast(this)->ByteArrayVerify(); break; @@ -303,6 +306,13 @@ void ExternalDoubleArray::ExternalDoubleArrayVerify() { } +bool JSObject::ElementsAreSafeToExamine() { + return (FLAG_use_gvn && FLAG_use_allocation_folding) || + reinterpret_cast<Map*>(elements()) != + GetHeap()->one_pointer_filler_map(); +} + + void JSObject::JSObjectVerify() { VerifyHeapPointer(properties()); VerifyHeapPointer(elements()); @@ -330,10 +340,9 @@ void JSObject::JSObjectVerify() { } } - // TODO(hpayer): deal gracefully with partially constructed JSObjects, when - // allocation folding is turned off. - if (reinterpret_cast<Map*>(elements()) != - GetHeap()->one_pointer_filler_map()) { + // If a GC was caused while constructing this object, the elements + // pointer may point to a one pointer filler map. + if (ElementsAreSafeToExamine()) { CHECK_EQ((map()->has_fast_smi_or_object_elements() || (elements() == GetHeap()->empty_fixed_array())), (elements()->map() == GetHeap()->fixed_array_map() || @@ -438,6 +447,11 @@ void FixedDoubleArray::FixedDoubleArrayVerify() { } +void ConstantPoolArray::ConstantPoolArrayVerify() { + CHECK(IsConstantPoolArray()); +} + + void JSGeneratorObject::JSGeneratorObjectVerify() { // In an expression like "new g()", there can be a point where a generator // object is allocated but its fields are all undefined, as it hasn't yet been @@ -664,16 +678,20 @@ void Code::CodeVerify() { } -void Code::VerifyEmbeddedMapsDependency() { +void Code::VerifyEmbeddedObjectsDependency() { int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT); for (RelocIterator it(this, mode_mask); !it.done(); it.next()) { - RelocInfo::Mode mode = it.rinfo()->rmode(); - if (mode == RelocInfo::EMBEDDED_OBJECT && - it.rinfo()->target_object()->IsMap()) { - Map* map = Map::cast(it.rinfo()->target_object()); - if (map->CanTransition()) { + Object* obj = it.rinfo()->target_object(); + if (IsWeakEmbeddedObject(kind(), obj)) { + if (obj->IsMap()) { + Map* map = Map::cast(obj); CHECK(map->dependent_code()->Contains( DependentCode::kWeaklyEmbeddedGroup, this)); + } else if (obj->IsJSObject()) { + Object* raw_table = GetIsolate()->heap()->weak_object_to_code_table(); + WeakHashTable* table = WeakHashTable::cast(raw_table); + CHECK(DependentCode::cast(table->Lookup(obj))->Contains( + DependentCode::kWeaklyEmbeddedGroup, this)); } } } @@ -683,10 +701,9 @@ void Code::VerifyEmbeddedMapsDependency() { void JSArray::JSArrayVerify() { JSObjectVerify(); CHECK(length()->IsNumber() || length()->IsUndefined()); - // TODO(hpayer): deal gracefully with partially constructed JSObjects, when - // allocation folding is turned off. - if (reinterpret_cast<Map*>(elements()) != - GetHeap()->one_pointer_filler_map()) { + // If a GC was caused while constructing this array, the elements + // pointer may point to a one pointer filler map. + if (ElementsAreSafeToExamine()) { CHECK(elements()->IsUndefined() || elements()->IsFixedArray() || elements()->IsFixedDoubleArray()); diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h index 89abe50433..deb33653f7 100644 --- a/deps/v8/src/objects-inl.h +++ b/deps/v8/src/objects-inl.h @@ -80,7 +80,7 @@ PropertyDetails PropertyDetails::AsDeleted() { #define CAST_ACCESSOR(type) \ type* type::cast(Object* object) { \ - ASSERT(object->Is##type()); \ + SLOW_ASSERT(object->Is##type()); \ return reinterpret_cast<type*>(object); \ } @@ -133,7 +133,7 @@ PropertyDetails PropertyDetails::AsDeleted() { bool Object::IsFixedArrayBase() { - return IsFixedArray() || IsFixedDoubleArray(); + return IsFixedArray() || IsFixedDoubleArray() || IsConstantPoolArray(); } @@ -285,14 +285,13 @@ bool Object::HasValidElements() { MaybeObject* Object::AllocateNewStorageFor(Heap* heap, - Representation representation, - PretenureFlag tenure) { + Representation representation) { if (!FLAG_track_double_fields) return this; if (!representation.IsDouble()) return this; if (IsUninitialized()) { - return heap->AllocateHeapNumber(0, tenure); + return heap->AllocateHeapNumber(0); } - return heap->AllocateHeapNumber(Number(), tenure); + return heap->AllocateHeapNumber(Number()); } @@ -572,6 +571,7 @@ TYPE_CHECKER(JSContextExtensionObject, JS_CONTEXT_EXTENSION_OBJECT_TYPE) TYPE_CHECKER(Map, MAP_TYPE) TYPE_CHECKER(FixedArray, FIXED_ARRAY_TYPE) TYPE_CHECKER(FixedDoubleArray, FIXED_DOUBLE_ARRAY_TYPE) +TYPE_CHECKER(ConstantPoolArray, CONSTANT_POOL_ARRAY_TYPE) bool Object::IsJSWeakCollection() { @@ -1028,6 +1028,12 @@ MaybeObject* Object::GetProperty(Name* key, PropertyAttributes* attributes) { #define WRITE_UINT32_FIELD(p, offset, value) \ (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)) = value) +#define READ_INT32_FIELD(p, offset) \ + (*reinterpret_cast<int32_t*>(FIELD_ADDR(p, offset))) + +#define WRITE_INT32_FIELD(p, offset, value) \ + (*reinterpret_cast<int32_t*>(FIELD_ADDR(p, offset)) = value) + #define READ_INT64_FIELD(p, offset) \ (*reinterpret_cast<int64_t*>(FIELD_ADDR(p, offset))) @@ -1184,7 +1190,7 @@ void HeapObject::VerifySmiField(int offset) { Heap* HeapObject::GetHeap() { Heap* heap = MemoryChunk::FromAddress(reinterpret_cast<Address>(this))->heap(); - ASSERT(heap != NULL); + SLOW_ASSERT(heap != NULL); return heap; } @@ -1301,7 +1307,7 @@ FixedArrayBase* JSObject::elements() { void JSObject::ValidateElements() { -#if DEBUG +#ifdef ENABLE_SLOW_ASSERTS if (FLAG_enable_slow_asserts) { ElementsAccessor* accessor = GetElementsAccessor(); accessor->Validate(this); @@ -1323,6 +1329,14 @@ bool JSObject::ShouldTrackAllocationInfo() { } +void AllocationSite::Initialize() { + SetElementsKind(GetInitialFastElementsKind()); + set_nested_site(Smi::FromInt(0)); + set_dependent_code(DependentCode::cast(GetHeap()->empty_fixed_array()), + SKIP_WRITE_BARRIER); +} + + // Heuristic: We only need to create allocation site info if the boilerplate // elements kind is the initial elements kind. AllocationSiteMode AllocationSite::GetMode( @@ -1535,65 +1549,6 @@ MaybeObject* JSObject::ResetElements() { } -MaybeObject* JSObject::AllocateStorageForMap(Map* map) { - ASSERT(this->map()->inobject_properties() == map->inobject_properties()); - ElementsKind obj_kind = this->map()->elements_kind(); - ElementsKind map_kind = map->elements_kind(); - if (map_kind != obj_kind) { - ElementsKind to_kind = map_kind; - if (IsMoreGeneralElementsKindTransition(map_kind, obj_kind) || - IsDictionaryElementsKind(obj_kind)) { - to_kind = obj_kind; - } - MaybeObject* maybe_obj = - IsDictionaryElementsKind(to_kind) ? NormalizeElements() - : TransitionElementsKind(to_kind); - if (maybe_obj->IsFailure()) return maybe_obj; - MaybeObject* maybe_map = map->AsElementsKind(to_kind); - if (!maybe_map->To(&map)) return maybe_map; - } - int total_size = - map->NumberOfOwnDescriptors() + map->unused_property_fields(); - int out_of_object = total_size - map->inobject_properties(); - if (out_of_object != properties()->length()) { - FixedArray* new_properties; - MaybeObject* maybe_properties = properties()->CopySize(out_of_object); - if (!maybe_properties->To(&new_properties)) return maybe_properties; - set_properties(new_properties); - } - set_map(map); - return this; -} - - -MaybeObject* JSObject::MigrateInstance() { - // Converting any field to the most specific type will cause the - // GeneralizeFieldRepresentation algorithm to create the most general existing - // transition that matches the object. This achieves what is needed. - Map* original_map = map(); - MaybeObject* maybe_result = GeneralizeFieldRepresentation( - 0, Representation::None(), ALLOW_AS_CONSTANT); - JSObject* result; - if (FLAG_trace_migration && maybe_result->To(&result)) { - PrintInstanceMigration(stdout, original_map, result->map()); - } - return maybe_result; -} - - -MaybeObject* JSObject::TryMigrateInstance() { - Map* new_map = map()->CurrentMapForDeprecated(); - if (new_map == NULL) return Smi::FromInt(0); - Map* original_map = map(); - MaybeObject* maybe_result = MigrateToMap(new_map); - JSObject* result; - if (FLAG_trace_migration && maybe_result->To(&result)) { - PrintInstanceMigration(stdout, original_map, result->map()); - } - return maybe_result; -} - - Handle<String> JSObject::ExpectedTransitionKey(Handle<Map> map) { DisallowHeapAllocation no_gc; if (!map->HasTransitionArray()) return Handle<String>::null(); @@ -1629,13 +1584,6 @@ Handle<Map> JSObject::FindTransitionToField(Handle<Map> map, Handle<Name> key) { } -int JSObject::LastAddedFieldIndex() { - Map* map = this->map(); - int last_added = map->LastAdded(); - return map->instance_descriptors()->GetFieldIndex(last_added); -} - - ACCESSORS(Oddball, to_string, String, kToStringOffset) ACCESSORS(Oddball, to_number, Object, kToNumberOffset) @@ -1719,7 +1667,9 @@ int JSObject::GetHeaderSize() { case JS_MESSAGE_OBJECT_TYPE: return JSMessageObject::kSize; default: - UNREACHABLE(); + // TODO(jkummerow): Re-enable this. Blink currently hits this + // from its CustomElementConstructorBuilder. + // UNREACHABLE(); return 0; } } @@ -1946,13 +1896,14 @@ void Object::VerifyApiCallResultType() { FixedArrayBase* FixedArrayBase::cast(Object* object) { - ASSERT(object->IsFixedArray() || object->IsFixedDoubleArray()); + ASSERT(object->IsFixedArray() || object->IsFixedDoubleArray() || + object->IsConstantPoolArray()); return reinterpret_cast<FixedArrayBase*>(object); } Object* FixedArray::get(int index) { - ASSERT(index >= 0 && index < this->length()); + SLOW_ASSERT(index >= 0 && index < this->length()); return READ_FIELD(this, kHeaderSize + index * kPointerSize); } @@ -2045,6 +1996,98 @@ bool FixedDoubleArray::is_the_hole(int index) { } +SMI_ACCESSORS(ConstantPoolArray, first_ptr_index, kFirstPointerIndexOffset) +SMI_ACCESSORS(ConstantPoolArray, first_int32_index, kFirstInt32IndexOffset) + + +int ConstantPoolArray::first_int64_index() { + return 0; +} + + +int ConstantPoolArray::count_of_int64_entries() { + return first_ptr_index(); +} + + +int ConstantPoolArray::count_of_ptr_entries() { + return first_int32_index() - first_ptr_index(); +} + + +int ConstantPoolArray::count_of_int32_entries() { + return length() - first_int32_index(); +} + + +void ConstantPoolArray::SetEntryCounts(int number_of_int64_entries, + int number_of_ptr_entries, + int number_of_int32_entries) { + set_first_ptr_index(number_of_int64_entries); + set_first_int32_index(number_of_int64_entries + number_of_ptr_entries); + set_length(number_of_int64_entries + number_of_ptr_entries + + number_of_int32_entries); +} + + +int64_t ConstantPoolArray::get_int64_entry(int index) { + ASSERT(map() == GetHeap()->constant_pool_array_map()); + ASSERT(index >= 0 && index < first_ptr_index()); + return READ_INT64_FIELD(this, OffsetOfElementAt(index)); +} + +double ConstantPoolArray::get_int64_entry_as_double(int index) { + STATIC_ASSERT(kDoubleSize == kInt64Size); + ASSERT(map() == GetHeap()->constant_pool_array_map()); + ASSERT(index >= 0 && index < first_ptr_index()); + return READ_DOUBLE_FIELD(this, OffsetOfElementAt(index)); +} + + +Object* ConstantPoolArray::get_ptr_entry(int index) { + ASSERT(map() == GetHeap()->constant_pool_array_map()); + ASSERT(index >= first_ptr_index() && index < first_int32_index()); + return READ_FIELD(this, OffsetOfElementAt(index)); +} + + +int32_t ConstantPoolArray::get_int32_entry(int index) { + ASSERT(map() == GetHeap()->constant_pool_array_map()); + ASSERT(index >= first_int32_index() && index < length()); + return READ_INT32_FIELD(this, OffsetOfElementAt(index)); +} + + +void ConstantPoolArray::set(int index, Object* value) { + ASSERT(map() == GetHeap()->constant_pool_array_map()); + ASSERT(index >= first_ptr_index() && index < first_int32_index()); + WRITE_FIELD(this, OffsetOfElementAt(index), value); + WRITE_BARRIER(GetHeap(), this, OffsetOfElementAt(index), value); +} + + +void ConstantPoolArray::set(int index, int64_t value) { + ASSERT(map() == GetHeap()->constant_pool_array_map()); + ASSERT(index >= first_int64_index() && index < first_ptr_index()); + WRITE_INT64_FIELD(this, OffsetOfElementAt(index), value); +} + + +void ConstantPoolArray::set(int index, double value) { + STATIC_ASSERT(kDoubleSize == kInt64Size); + ASSERT(map() == GetHeap()->constant_pool_array_map()); + ASSERT(index >= first_int64_index() && index < first_ptr_index()); + WRITE_DOUBLE_FIELD(this, OffsetOfElementAt(index), value); +} + + +void ConstantPoolArray::set(int index, int32_t value) { + ASSERT(map() == GetHeap()->constant_pool_array_map()); + ASSERT(index >= this->first_int32_index() && index < length()); + WRITE_INT32_FIELD(this, OffsetOfElementAt(index), value); +} + + WriteBarrierMode HeapObject::GetWriteBarrierMode( const DisallowHeapAllocation& promise) { Heap* heap = GetHeap(); @@ -2537,6 +2580,7 @@ void SeededNumberDictionary::set_requires_slow_elements() { CAST_ACCESSOR(FixedArray) CAST_ACCESSOR(FixedDoubleArray) +CAST_ACCESSOR(ConstantPoolArray) CAST_ACCESSOR(DescriptorArray) CAST_ACCESSOR(DeoptimizationInputData) CAST_ACCESSOR(DeoptimizationOutputData) @@ -3432,6 +3476,12 @@ int HeapObject::SizeFromMap(Map* map) { return FixedDoubleArray::SizeFor( reinterpret_cast<FixedDoubleArray*>(this)->length()); } + if (instance_type == CONSTANT_POOL_ARRAY_TYPE) { + return ConstantPoolArray::SizeFor( + reinterpret_cast<ConstantPoolArray*>(this)->count_of_int64_entries(), + reinterpret_cast<ConstantPoolArray*>(this)->count_of_ptr_entries(), + reinterpret_cast<ConstantPoolArray*>(this)->count_of_int32_entries()); + } ASSERT(instance_type == CODE_TYPE); return reinterpret_cast<Code*>(this)->CodeSize(); } @@ -3808,7 +3858,8 @@ Code::StubType Code::type() { int Code::arguments_count() { - ASSERT(is_call_stub() || is_keyed_call_stub() || kind() == STUB); + ASSERT(is_call_stub() || is_keyed_call_stub() || + kind() == STUB || is_handler()); return ExtractArgumentsCountFromFlags(flags()); } @@ -3828,6 +3879,7 @@ inline void Code::set_is_crankshafted(bool value) { int Code::major_key() { ASSERT(kind() == STUB || + kind() == HANDLER || kind() == BINARY_OP_IC || kind() == COMPARE_IC || kind() == COMPARE_NIL_IC || @@ -3842,6 +3894,7 @@ int Code::major_key() { void Code::set_major_key(int major) { ASSERT(kind() == STUB || + kind() == HANDLER || kind() == BINARY_OP_IC || kind() == COMPARE_IC || kind() == COMPARE_NIL_IC || @@ -4077,6 +4130,11 @@ bool Code::is_inline_cache_stub() { } +bool Code::is_keyed_stub() { + return is_keyed_load_stub() || is_keyed_store_stub() || is_keyed_call_stub(); +} + + bool Code::is_debug_stub() { return ic_state() == DEBUG_STUB; } @@ -4495,6 +4553,9 @@ ACCESSORS(SignatureInfo, args, Object, kArgsOffset) ACCESSORS(TypeSwitchInfo, types, Object, kTypesOffset) ACCESSORS(AllocationSite, transition_info, Object, kTransitionInfoOffset) +ACCESSORS(AllocationSite, nested_site, Object, kNestedSiteOffset) +ACCESSORS(AllocationSite, dependent_code, DependentCode, + kDependentCodeOffset) ACCESSORS(AllocationSite, weak_next, Object, kWeakNextOffset) ACCESSORS(AllocationMemento, allocation_site, Object, kAllocationSiteOffset) @@ -5457,19 +5518,24 @@ ElementsKind JSObject::GetElementsKind() { #if DEBUG FixedArrayBase* fixed_array = reinterpret_cast<FixedArrayBase*>(READ_FIELD(this, kElementsOffset)); - Map* map = fixed_array->map(); - ASSERT((IsFastSmiOrObjectElementsKind(kind) && - (map == GetHeap()->fixed_array_map() || - map == GetHeap()->fixed_cow_array_map())) || - (IsFastDoubleElementsKind(kind) && - (fixed_array->IsFixedDoubleArray() || - fixed_array == GetHeap()->empty_fixed_array())) || - (kind == DICTIONARY_ELEMENTS && + + // If a GC was caused while constructing this object, the elements + // pointer may point to a one pointer filler map. + if (ElementsAreSafeToExamine()) { + Map* map = fixed_array->map(); + ASSERT((IsFastSmiOrObjectElementsKind(kind) && + (map == GetHeap()->fixed_array_map() || + map == GetHeap()->fixed_cow_array_map())) || + (IsFastDoubleElementsKind(kind) && + (fixed_array->IsFixedDoubleArray() || + fixed_array == GetHeap()->empty_fixed_array())) || + (kind == DICTIONARY_ELEMENTS && fixed_array->IsFixedArray() && - fixed_array->IsDictionary()) || - (kind > DICTIONARY_ELEMENTS)); - ASSERT((kind != NON_STRICT_ARGUMENTS_ELEMENTS) || - (elements()->IsFixedArray() && elements()->length() >= 2)); + fixed_array->IsDictionary()) || + (kind > DICTIONARY_ELEMENTS)); + ASSERT((kind != NON_STRICT_ARGUMENTS_ELEMENTS) || + (elements()->IsFixedArray() && elements()->length() >= 2)); + } #endif return kind; } @@ -5729,19 +5795,23 @@ Object* JSReceiver::GetConstructor() { } -bool JSReceiver::HasProperty(Name* name) { - if (IsJSProxy()) { - return JSProxy::cast(this)->HasPropertyWithHandler(name); +bool JSReceiver::HasProperty(Handle<JSReceiver> object, + Handle<Name> name) { + if (object->IsJSProxy()) { + Handle<JSProxy> proxy = Handle<JSProxy>::cast(object); + return JSProxy::HasPropertyWithHandler(proxy, name); } - return GetPropertyAttribute(name) != ABSENT; + return object->GetPropertyAttribute(*name) != ABSENT; } -bool JSReceiver::HasLocalProperty(Name* name) { - if (IsJSProxy()) { - return JSProxy::cast(this)->HasPropertyWithHandler(name); +bool JSReceiver::HasLocalProperty(Handle<JSReceiver> object, + Handle<Name> name) { + if (object->IsJSProxy()) { + Handle<JSProxy> proxy = Handle<JSProxy>::cast(object); + return JSProxy::HasPropertyWithHandler(proxy, name); } - return GetLocalPropertyAttribute(name) != ABSENT; + return object->GetLocalPropertyAttribute(*name) != ABSENT; } @@ -5783,21 +5853,23 @@ MaybeObject* JSReceiver::GetIdentityHash(CreationFlag flag) { } -bool JSReceiver::HasElement(uint32_t index) { - if (IsJSProxy()) { - return JSProxy::cast(this)->HasElementWithHandler(index); +bool JSReceiver::HasElement(Handle<JSReceiver> object, uint32_t index) { + if (object->IsJSProxy()) { + Handle<JSProxy> proxy = Handle<JSProxy>::cast(object); + return JSProxy::HasElementWithHandler(proxy, index); } - return JSObject::cast(this)->GetElementAttributeWithReceiver( - this, index, true) != ABSENT; + return Handle<JSObject>::cast(object)->GetElementAttributeWithReceiver( + *object, index, true) != ABSENT; } -bool JSReceiver::HasLocalElement(uint32_t index) { - if (IsJSProxy()) { - return JSProxy::cast(this)->HasElementWithHandler(index); +bool JSReceiver::HasLocalElement(Handle<JSReceiver> object, uint32_t index) { + if (object->IsJSProxy()) { + Handle<JSProxy> proxy = Handle<JSProxy>::cast(object); + return JSProxy::HasElementWithHandler(proxy, index); } - return JSObject::cast(this)->GetElementAttributeWithReceiver( - this, index, false) != ABSENT; + return Handle<JSObject>::cast(object)->GetElementAttributeWithReceiver( + *object, index, false) != ABSENT; } @@ -5964,6 +6036,7 @@ uint32_t NameDictionaryShape::HashForObject(Name* key, Object* other) { MaybeObject* NameDictionaryShape::AsObject(Heap* heap, Name* key) { + ASSERT(key->IsUniqueName()); return key; } @@ -5996,6 +6069,34 @@ MaybeObject* ObjectHashTableShape<entrysize>::AsObject(Heap* heap, } +template <int entrysize> +bool WeakHashTableShape<entrysize>::IsMatch(Object* key, Object* other) { + return key->SameValue(other); +} + + +template <int entrysize> +uint32_t WeakHashTableShape<entrysize>::Hash(Object* key) { + intptr_t hash = reinterpret_cast<intptr_t>(key); + return (uint32_t)(hash & 0xFFFFFFFF); +} + + +template <int entrysize> +uint32_t WeakHashTableShape<entrysize>::HashForObject(Object* key, + Object* other) { + intptr_t hash = reinterpret_cast<intptr_t>(other); + return (uint32_t)(hash & 0xFFFFFFFF); +} + + +template <int entrysize> +MaybeObject* WeakHashTableShape<entrysize>::AsObject(Heap* heap, + Object* key) { + return key; +} + + void Map::ClearCodeCache(Heap* heap) { // No write barrier is needed since empty_fixed_array is not in new space. // Please note this function is used during marking: @@ -6065,6 +6166,12 @@ MaybeObject* FixedDoubleArray::Copy() { } +MaybeObject* ConstantPoolArray::Copy() { + if (length() == 0) return this; + return GetHeap()->CopyConstantPoolArray(this); +} + + void TypeFeedbackCells::SetAstId(int index, TypeFeedbackId id) { set(1 + index * 2, Smi::FromInt(id.ToInt())); } diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc index 0b8fdfda03..60c1ef4c38 100644 --- a/deps/v8/src/objects-printer.cc +++ b/deps/v8/src/objects-printer.cc @@ -95,6 +95,9 @@ void HeapObject::HeapObjectPrint(FILE* out) { case FIXED_DOUBLE_ARRAY_TYPE: FixedDoubleArray::cast(this)->FixedDoubleArrayPrint(out); break; + case CONSTANT_POOL_ARRAY_TYPE: + ConstantPoolArray::cast(this)->ConstantPoolArrayPrint(out); + break; case FIXED_ARRAY_TYPE: FixedArray::cast(this)->FixedArrayPrint(out); break; @@ -630,6 +633,23 @@ void FixedDoubleArray::FixedDoubleArrayPrint(FILE* out) { } +void ConstantPoolArray::ConstantPoolArrayPrint(FILE* out) { + HeapObject::PrintHeader(out, "ConstantPoolArray"); + PrintF(out, " - length: %d", length()); + for (int i = 0; i < length(); i++) { + if (i < first_ptr_index()) { + PrintF(out, "\n [%d]: double: %g", i, get_int64_entry_as_double(i)); + } else if (i < first_int32_index()) { + PrintF(out, "\n [%d]: pointer: %p", i, + reinterpret_cast<void*>(get_ptr_entry(i))); + } else { + PrintF(out, "\n [%d]: int32: %d", i, get_int32_entry(i)); + } + } + PrintF(out, "\n"); +} + + void JSValue::JSValuePrint(FILE* out) { HeapObject::PrintHeader(out, "ValueObject"); value()->Print(out); @@ -1100,9 +1120,11 @@ void AllocationSite::AllocationSitePrint(FILE* out) { HeapObject::PrintHeader(out, "AllocationSite"); PrintF(out, " - weak_next: "); weak_next()->ShortPrint(out); - PrintF(out, "\n"); - - PrintF(out, " - transition_info: "); + PrintF(out, "\n - dependent code: "); + dependent_code()->ShortPrint(out); + PrintF(out, "\n - nested site: "); + nested_site()->ShortPrint(out); + PrintF(out, "\n - transition_info: "); if (transition_info()->IsCell()) { Cell* cell = Cell::cast(transition_info()); Object* cell_contents = cell->value(); diff --git a/deps/v8/src/objects-visiting-inl.h b/deps/v8/src/objects-visiting-inl.h index 46cc9d7989..93b7cb96ad 100644 --- a/deps/v8/src/objects-visiting-inl.h +++ b/deps/v8/src/objects-visiting-inl.h @@ -185,6 +185,8 @@ void StaticMarkingVisitor<StaticVisitor>::Initialize() { table_.Register(kVisitFixedDoubleArray, &DataObjectVisitor::Visit); + table_.Register(kVisitConstantPoolArray, &VisitConstantPoolArray); + table_.Register(kVisitNativeContext, &VisitNativeContext); table_.Register(kVisitAllocationSite, @@ -261,10 +263,8 @@ void StaticMarkingVisitor<StaticVisitor>::VisitEmbeddedPointer( ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT); ASSERT(!rinfo->target_object()->IsConsString()); HeapObject* object = HeapObject::cast(rinfo->target_object()); - if (!FLAG_weak_embedded_maps_in_optimized_code || !FLAG_collect_maps || - rinfo->host()->kind() != Code::OPTIMIZED_FUNCTION || - !object->IsMap() || !Map::cast(object)->CanTransition()) { - heap->mark_compact_collector()->RecordRelocSlot(rinfo, object); + heap->mark_compact_collector()->RecordRelocSlot(rinfo, object); + if (!Code::IsWeakEmbeddedObject(rinfo->host()->kind(), object)) { StaticVisitor::MarkObject(heap, object); } } @@ -452,6 +452,22 @@ void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo( template<typename StaticVisitor> +void StaticMarkingVisitor<StaticVisitor>::VisitConstantPoolArray( + Map* map, HeapObject* object) { + Heap* heap = map->GetHeap(); + ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object); + int first_ptr_offset = constant_pool->OffsetOfElementAt( + constant_pool->first_ptr_index()); + int last_ptr_offset = constant_pool->OffsetOfElementAt( + constant_pool->first_ptr_index() + constant_pool->count_of_ptr_entries()); + StaticVisitor::VisitPointers( + heap, + HeapObject::RawField(object, first_ptr_offset), + HeapObject::RawField(object, last_ptr_offset)); +} + + +template<typename StaticVisitor> void StaticMarkingVisitor<StaticVisitor>::VisitJSFunction( Map* map, HeapObject* object) { Heap* heap = map->GetHeap(); diff --git a/deps/v8/src/objects-visiting.cc b/deps/v8/src/objects-visiting.cc index cd46013398..5ced2cf7a3 100644 --- a/deps/v8/src/objects-visiting.cc +++ b/deps/v8/src/objects-visiting.cc @@ -82,6 +82,9 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId( case FIXED_DOUBLE_ARRAY_TYPE: return kVisitFixedDoubleArray; + case CONSTANT_POOL_ARRAY_TYPE: + return kVisitConstantPoolArray; + case ODDBALL_TYPE: return kVisitOddball; diff --git a/deps/v8/src/objects-visiting.h b/deps/v8/src/objects-visiting.h index 21757377a4..60e6f67471 100644 --- a/deps/v8/src/objects-visiting.h +++ b/deps/v8/src/objects-visiting.h @@ -54,6 +54,7 @@ class StaticVisitorBase : public AllStatic { V(FreeSpace) \ V(FixedArray) \ V(FixedDoubleArray) \ + V(ConstantPoolArray) \ V(NativeContext) \ V(AllocationSite) \ V(DataObject2) \ @@ -416,6 +417,7 @@ class StaticMarkingVisitor : public StaticVisitorBase { INLINE(static void VisitMap(Map* map, HeapObject* object)); INLINE(static void VisitCode(Map* map, HeapObject* object)); INLINE(static void VisitSharedFunctionInfo(Map* map, HeapObject* object)); + INLINE(static void VisitConstantPoolArray(Map* map, HeapObject* object)); INLINE(static void VisitJSFunction(Map* map, HeapObject* object)); INLINE(static void VisitJSRegExp(Map* map, HeapObject* object)); INLINE(static void VisitJSArrayBuffer(Map* map, HeapObject* object)); diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc index 35646b8be5..f7c89175da 100644 --- a/deps/v8/src/objects.cc +++ b/deps/v8/src/objects.cc @@ -28,6 +28,7 @@ #include "v8.h" #include "accessors.h" +#include "allocation-site-scopes.h" #include "api.h" #include "arguments.h" #include "bootstrapper.h" @@ -142,6 +143,20 @@ void Object::Lookup(Name* name, LookupResult* result) { } +Handle<Object> Object::GetPropertyWithReceiver( + Handle<Object> object, + Handle<Object> receiver, + Handle<Name> name, + PropertyAttributes* attributes) { + LookupResult lookup(name->GetIsolate()); + object->Lookup(*name, &lookup); + Handle<Object> result = + GetProperty(object, receiver, &lookup, name, attributes); + ASSERT(*attributes <= ABSENT); + return result; +} + + MaybeObject* Object::GetPropertyWithReceiver(Object* receiver, Name* name, PropertyAttributes* attributes) { @@ -328,9 +343,18 @@ static MaybeObject* GetDeclaredAccessorProperty(Object* receiver, } -MaybeObject* JSObject::GetPropertyWithCallback(Object* receiver, - Object* structure, - Name* name) { +Handle<FixedArray> JSObject::EnsureWritableFastElements( + Handle<JSObject> object) { + CALL_HEAP_FUNCTION(object->GetIsolate(), + object->EnsureWritableFastElements(), + FixedArray); +} + + +Handle<Object> JSObject::GetPropertyWithCallback(Handle<JSObject> object, + Handle<Object> receiver, + Handle<Object> structure, + Handle<Name> name) { Isolate* isolate = name->GetIsolate(); // To accommodate both the old and the new api we switch on the // data structure used to store the callbacks. Eventually foreign @@ -338,66 +362,71 @@ MaybeObject* JSObject::GetPropertyWithCallback(Object* receiver, if (structure->IsForeign()) { AccessorDescriptor* callback = reinterpret_cast<AccessorDescriptor*>( - Foreign::cast(structure)->foreign_address()); - MaybeObject* value = (callback->getter)(isolate, receiver, callback->data); - RETURN_IF_SCHEDULED_EXCEPTION(isolate); - return value; + Handle<Foreign>::cast(structure)->foreign_address()); + CALL_HEAP_FUNCTION(isolate, + (callback->getter)(isolate, *receiver, callback->data), + Object); } // api style callbacks. if (structure->IsAccessorInfo()) { - if (!AccessorInfo::cast(structure)->IsCompatibleReceiver(receiver)) { - Handle<Object> name_handle(name, isolate); - Handle<Object> receiver_handle(receiver, isolate); - Handle<Object> args[2] = { name_handle, receiver_handle }; + Handle<AccessorInfo> accessor_info = Handle<AccessorInfo>::cast(structure); + if (!accessor_info->IsCompatibleReceiver(*receiver)) { + Handle<Object> args[2] = { name, receiver }; Handle<Object> error = isolate->factory()->NewTypeError("incompatible_method_receiver", HandleVector(args, ARRAY_SIZE(args))); - return isolate->Throw(*error); + isolate->Throw(*error); + return Handle<Object>::null(); } // TODO(rossberg): Handling symbols in the API requires changing the API, // so we do not support it for now. - if (name->IsSymbol()) return isolate->heap()->undefined_value(); + if (name->IsSymbol()) return isolate->factory()->undefined_value(); if (structure->IsDeclaredAccessorInfo()) { - return GetDeclaredAccessorProperty(receiver, - DeclaredAccessorInfo::cast(structure), - isolate); + CALL_HEAP_FUNCTION( + isolate, + GetDeclaredAccessorProperty(*receiver, + DeclaredAccessorInfo::cast(*structure), + isolate), + Object); } - ExecutableAccessorInfo* data = ExecutableAccessorInfo::cast(structure); - Object* fun_obj = data->getter(); + + Handle<ExecutableAccessorInfo> data = + Handle<ExecutableAccessorInfo>::cast(structure); v8::AccessorGetterCallback call_fun = - v8::ToCData<v8::AccessorGetterCallback>(fun_obj); - if (call_fun == NULL) return isolate->heap()->undefined_value(); + v8::ToCData<v8::AccessorGetterCallback>(data->getter()); + if (call_fun == NULL) return isolate->factory()->undefined_value(); + HandleScope scope(isolate); - JSObject* self = JSObject::cast(receiver); - Handle<String> key(String::cast(name)); - LOG(isolate, ApiNamedPropertyAccess("load", self, name)); - PropertyCallbackArguments args(isolate, data->data(), self, this); + Handle<JSObject> self = Handle<JSObject>::cast(receiver); + Handle<String> key = Handle<String>::cast(name); + LOG(isolate, ApiNamedPropertyAccess("load", *self, *name)); + PropertyCallbackArguments args(isolate, data->data(), *self, *object); v8::Handle<v8::Value> result = args.Call(call_fun, v8::Utils::ToLocal(key)); - RETURN_IF_SCHEDULED_EXCEPTION(isolate); + RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); if (result.IsEmpty()) { - return isolate->heap()->undefined_value(); + return isolate->factory()->undefined_value(); } - Object* return_value = *v8::Utils::OpenHandle(*result); + Handle<Object> return_value = v8::Utils::OpenHandle(*result); return_value->VerifyApiCallResultType(); - return return_value; + return scope.CloseAndEscape(return_value); } // __defineGetter__ callback - if (structure->IsAccessorPair()) { - Object* getter = AccessorPair::cast(structure)->getter(); - if (getter->IsSpecFunction()) { - // TODO(rossberg): nicer would be to cast to some JSCallable here... - return GetPropertyWithDefinedGetter(receiver, JSReceiver::cast(getter)); - } - // Getter is not a function. - return isolate->heap()->undefined_value(); + Handle<Object> getter(Handle<AccessorPair>::cast(structure)->getter(), + isolate); + if (getter->IsSpecFunction()) { + // TODO(rossberg): nicer would be to cast to some JSCallable here... + CALL_HEAP_FUNCTION( + isolate, + object->GetPropertyWithDefinedGetter(*receiver, + JSReceiver::cast(*getter)), + Object); } - - UNREACHABLE(); - return NULL; + // Getter is not a function. + return isolate->factory()->undefined_value(); } @@ -455,18 +484,15 @@ Handle<Object> JSProxy::SetElementWithHandler(Handle<JSProxy> proxy, StrictModeFlag strict_mode) { Isolate* isolate = proxy->GetIsolate(); Handle<String> name = isolate->factory()->Uint32ToString(index); - CALL_HEAP_FUNCTION(isolate, - proxy->SetPropertyWithHandler( - *receiver, *name, *value, NONE, strict_mode), - Object); + return SetPropertyWithHandler( + proxy, receiver, name, value, NONE, strict_mode); } -bool JSProxy::HasElementWithHandler(uint32_t index) { - String* name; - MaybeObject* maybe = GetHeap()->Uint32ToString(index); - if (!maybe->To<String>(&name)) return maybe; - return HasPropertyWithHandler(name); +bool JSProxy::HasElementWithHandler(Handle<JSProxy> proxy, uint32_t index) { + Isolate* isolate = proxy->GetIsolate(); + Handle<String> name = isolate->factory()->Uint32ToString(index); + return HasPropertyWithHandler(proxy, name); } @@ -496,56 +522,51 @@ MaybeObject* Object::GetPropertyWithDefinedGetter(Object* receiver, // Only deal with CALLBACKS and INTERCEPTOR -MaybeObject* JSObject::GetPropertyWithFailedAccessCheck( - Object* receiver, +Handle<Object> JSObject::GetPropertyWithFailedAccessCheck( + Handle<JSObject> object, + Handle<Object> receiver, LookupResult* result, - Name* name, + Handle<Name> name, PropertyAttributes* attributes) { + Isolate* isolate = name->GetIsolate(); if (result->IsProperty()) { switch (result->type()) { case CALLBACKS: { // Only allow API accessors. - Object* obj = result->GetCallbackObject(); - if (obj->IsAccessorInfo()) { - AccessorInfo* info = AccessorInfo::cast(obj); - if (info->all_can_read()) { - *attributes = result->GetAttributes(); - return result->holder()->GetPropertyWithCallback( - receiver, result->GetCallbackObject(), name); - } - } else if (obj->IsAccessorPair()) { - AccessorPair* pair = AccessorPair::cast(obj); - if (pair->all_can_read()) { - return result->holder()->GetPropertyWithCallback( - receiver, result->GetCallbackObject(), name); - } + Handle<Object> callback_obj(result->GetCallbackObject(), isolate); + if (callback_obj->IsAccessorInfo()) { + if (!AccessorInfo::cast(*callback_obj)->all_can_read()) break; + *attributes = result->GetAttributes(); + // Fall through to GetPropertyWithCallback. + } else if (callback_obj->IsAccessorPair()) { + if (!AccessorPair::cast(*callback_obj)->all_can_read()) break; + // Fall through to GetPropertyWithCallback. + } else { + break; } - break; + Handle<JSObject> holder(result->holder(), isolate); + return GetPropertyWithCallback(holder, receiver, callback_obj, name); } case NORMAL: case FIELD: case CONSTANT: { // Search ALL_CAN_READ accessors in prototype chain. - LookupResult r(GetIsolate()); - result->holder()->LookupRealNamedPropertyInPrototypes(name, &r); + LookupResult r(isolate); + result->holder()->LookupRealNamedPropertyInPrototypes(*name, &r); if (r.IsProperty()) { - return GetPropertyWithFailedAccessCheck(receiver, - &r, - name, - attributes); + return GetPropertyWithFailedAccessCheck( + object, receiver, &r, name, attributes); } break; } case INTERCEPTOR: { // If the object has an interceptor, try real named properties. // No access check in GetPropertyAttributeWithInterceptor. - LookupResult r(GetIsolate()); - result->holder()->LookupRealNamedProperty(name, &r); + LookupResult r(isolate); + result->holder()->LookupRealNamedProperty(*name, &r); if (r.IsProperty()) { - return GetPropertyWithFailedAccessCheck(receiver, - &r, - name, - attributes); + return GetPropertyWithFailedAccessCheck( + object, receiver, &r, name, attributes); } break; } @@ -556,11 +577,9 @@ MaybeObject* JSObject::GetPropertyWithFailedAccessCheck( // No accessible property found. *attributes = ABSENT; - Heap* heap = name->GetHeap(); - Isolate* isolate = heap->isolate(); - isolate->ReportFailedAccessCheck(this, v8::ACCESS_GET); - RETURN_IF_SCHEDULED_EXCEPTION(isolate); - return heap->undefined_value(); + isolate->ReportFailedAccessCheck(*object, v8::ACCESS_GET); + RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); + return isolate->factory()->undefined_value(); } @@ -643,67 +662,63 @@ Object* JSObject::GetNormalizedProperty(LookupResult* result) { } -Handle<Object> JSObject::SetNormalizedProperty(Handle<JSObject> object, - LookupResult* result, - Handle<Object> value) { - CALL_HEAP_FUNCTION(object->GetIsolate(), - object->SetNormalizedProperty(result, *value), - Object); -} - - -MaybeObject* JSObject::SetNormalizedProperty(LookupResult* result, - Object* value) { - ASSERT(!HasFastProperties()); - if (IsGlobalObject()) { - PropertyCell* cell = PropertyCell::cast( - property_dictionary()->ValueAt(result->GetDictionaryEntry())); - MaybeObject* maybe_type = cell->SetValueInferType(value); - if (maybe_type->IsFailure()) return maybe_type; +void JSObject::SetNormalizedProperty(Handle<JSObject> object, + LookupResult* result, + Handle<Object> value) { + ASSERT(!object->HasFastProperties()); + NameDictionary* property_dictionary = object->property_dictionary(); + if (object->IsGlobalObject()) { + Handle<PropertyCell> cell(PropertyCell::cast( + property_dictionary->ValueAt(result->GetDictionaryEntry()))); + PropertyCell::SetValueInferType(cell, value); } else { - property_dictionary()->ValueAtPut(result->GetDictionaryEntry(), value); + property_dictionary->ValueAtPut(result->GetDictionaryEntry(), *value); } - return value; } -Handle<Object> JSObject::SetNormalizedProperty(Handle<JSObject> object, - Handle<Name> key, - Handle<Object> value, - PropertyDetails details) { - CALL_HEAP_FUNCTION(object->GetIsolate(), - object->SetNormalizedProperty(*key, *value, details), - Object); +// TODO(mstarzinger): Temporary wrapper until handlified. +static Handle<NameDictionary> NameDictionaryAdd(Handle<NameDictionary> dict, + Handle<Name> name, + Handle<Object> value, + PropertyDetails details) { + CALL_HEAP_FUNCTION(dict->GetIsolate(), + dict->Add(*name, *value, details), + NameDictionary); } -MaybeObject* JSObject::SetNormalizedProperty(Name* name, - Object* value, - PropertyDetails details) { - ASSERT(!HasFastProperties()); - int entry = property_dictionary()->FindEntry(name); +void JSObject::SetNormalizedProperty(Handle<JSObject> object, + Handle<Name> name, + Handle<Object> value, + PropertyDetails details) { + ASSERT(!object->HasFastProperties()); + Handle<NameDictionary> property_dictionary(object->property_dictionary()); + + if (!name->IsUniqueName()) { + name = object->GetIsolate()->factory()->InternalizedStringFromString( + Handle<String>::cast(name)); + } + + int entry = property_dictionary->FindEntry(*name); if (entry == NameDictionary::kNotFound) { - Object* store_value = value; - if (IsGlobalObject()) { - Heap* heap = name->GetHeap(); - MaybeObject* maybe_store_value = heap->AllocatePropertyCell(value); - if (!maybe_store_value->ToObject(&store_value)) return maybe_store_value; - } - Object* dict; - { MaybeObject* maybe_dict = - property_dictionary()->Add(name, store_value, details); - if (!maybe_dict->ToObject(&dict)) return maybe_dict; + Handle<Object> store_value = value; + if (object->IsGlobalObject()) { + store_value = object->GetIsolate()->factory()->NewPropertyCell(value); } - set_properties(NameDictionary::cast(dict)); - return value; + + property_dictionary = + NameDictionaryAdd(property_dictionary, name, store_value, details); + object->set_properties(*property_dictionary); + return; } - PropertyDetails original_details = property_dictionary()->DetailsAt(entry); + PropertyDetails original_details = property_dictionary->DetailsAt(entry); int enumeration_index; // Preserve the enumeration index unless the property was deleted. if (original_details.IsDeleted()) { - enumeration_index = property_dictionary()->NextEnumerationIndex(); - property_dictionary()->SetNextEnumerationIndex(enumeration_index + 1); + enumeration_index = property_dictionary->NextEnumerationIndex(); + property_dictionary->SetNextEnumerationIndex(enumeration_index + 1); } else { enumeration_index = original_details.dictionary_index(); ASSERT(enumeration_index > 0); @@ -712,17 +727,15 @@ MaybeObject* JSObject::SetNormalizedProperty(Name* name, details = PropertyDetails( details.attributes(), details.type(), enumeration_index); - if (IsGlobalObject()) { - PropertyCell* cell = - PropertyCell::cast(property_dictionary()->ValueAt(entry)); - MaybeObject* maybe_type = cell->SetValueInferType(value); - if (maybe_type->IsFailure()) return maybe_type; + if (object->IsGlobalObject()) { + Handle<PropertyCell> cell( + PropertyCell::cast(property_dictionary->ValueAt(entry))); + PropertyCell::SetValueInferType(cell, value); // Please note we have to update the property details. - property_dictionary()->DetailsAtPut(entry, details); + property_dictionary->DetailsAtPut(entry, details); } else { - property_dictionary()->SetEntry(entry, name, value, details); + property_dictionary->SetEntry(entry, *name, *value, details); } - return value; } @@ -733,12 +746,6 @@ Handle<NameDictionary> NameDictionaryShrink(Handle<NameDictionary> dict, } -static void CellSetValueInferType(Handle<PropertyCell> cell, - Handle<Object> value) { - CALL_HEAP_FUNCTION_VOID(cell->GetIsolate(), cell->SetValueInferType(*value)); -} - - Handle<Object> JSObject::DeleteNormalizedProperty(Handle<JSObject> object, Handle<Name> name, DeleteMode mode) { @@ -761,7 +768,8 @@ Handle<Object> JSObject::DeleteNormalizedProperty(Handle<JSObject> object, object->set_map(*new_map); } Handle<PropertyCell> cell(PropertyCell::cast(dictionary->ValueAt(entry))); - CellSetValueInferType(cell, isolate->factory()->the_hole_value()); + Handle<Object> value = isolate->factory()->the_hole_value(); + PropertyCell::SetValueInferType(cell, value); dictionary->DetailsAtPut(entry, details.AsDeleted()); } else { Handle<Object> deleted(dictionary->DeleteProperty(entry, mode), isolate); @@ -817,17 +825,24 @@ MaybeObject* Object::GetPropertyOrFail(Handle<Object> object, } +// TODO(yangguo): handlify this and get rid of. MaybeObject* Object::GetProperty(Object* receiver, LookupResult* result, Name* name, PropertyAttributes* attributes) { - // Make sure that the top context does not change when doing - // callbacks or interceptor calls. - AssertNoContextChangeWithHandleScope ncc; - Isolate* isolate = name->GetIsolate(); Heap* heap = isolate->heap(); +#ifdef DEBUG + // TODO(mstarzinger): Only because of the AssertNoContextChange, drop as soon + // as this method has been fully handlified. + HandleScope scope(isolate); +#endif + + // Make sure that the top context does not change when doing + // callbacks or interceptor calls. + AssertNoContextChange ncc(isolate); + // Traverse the prototype chain from the current object (this) to // the holder and check for access rights. This avoids traversing the // objects more than once in case of interceptors, because the @@ -849,11 +864,16 @@ MaybeObject* Object::GetProperty(Object* receiver, // property from the current object, we still check that we have // access to it. JSObject* checked = JSObject::cast(current); - if (!heap->isolate()->MayNamedAccess(checked, name, v8::ACCESS_GET)) { - return checked->GetPropertyWithFailedAccessCheck(receiver, - result, - name, - attributes); + if (!isolate->MayNamedAccess(checked, name, v8::ACCESS_GET)) { + HandleScope scope(isolate); + Handle<Object> value = JSObject::GetPropertyWithFailedAccessCheck( + handle(checked, isolate), + handle(receiver, isolate), + result, + handle(name, isolate), + attributes); + RETURN_IF_EMPTY_HANDLE(isolate, value); + return *value; } } // Stop traversing the chain once we reach the last object in the @@ -884,14 +904,28 @@ MaybeObject* Object::GetProperty(Object* receiver, } case CONSTANT: return result->GetConstant(); - case CALLBACKS: - return result->holder()->GetPropertyWithCallback( - receiver, result->GetCallbackObject(), name); + case CALLBACKS: { + HandleScope scope(isolate); + Handle<Object> value = JSObject::GetPropertyWithCallback( + handle(result->holder(), isolate), + handle(receiver, isolate), + handle(result->GetCallbackObject(), isolate), + handle(name, isolate)); + RETURN_IF_EMPTY_HANDLE(isolate, value); + return *value; + } case HANDLER: return result->proxy()->GetPropertyWithHandler(receiver, name); - case INTERCEPTOR: - return result->holder()->GetPropertyWithInterceptor( - receiver, name, attributes); + case INTERCEPTOR: { + HandleScope scope(isolate); + Handle<Object> value = JSObject::GetPropertyWithInterceptor( + handle(result->holder(), isolate), + handle(receiver, isolate), + handle(name, isolate), + attributes); + RETURN_IF_EMPTY_HANDLE(isolate, value); + return *value; + } case TRANSITION: case NONEXISTENT: UNREACHABLE(); @@ -1026,8 +1060,11 @@ bool Object::SameValue(Object* other) { if (IsNumber() && other->IsNumber()) { double this_value = Number(); double other_value = other->Number(); - return (this_value == other_value) || - (std::isnan(this_value) && std::isnan(other_value)); + bool equal = this_value == other_value; + // SameValue(NaN, NaN) is true. + if (!equal) return std::isnan(this_value) && std::isnan(other_value); + // SameValue(0.0, -0.0) is false. + return (this_value != 0) || ((1 / this_value) == (1 / other_value)); } if (IsString() && other->IsString()) { return String::cast(this)->Equals(String::cast(other)); @@ -1167,7 +1204,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) { // Externalizing twice leaks the external resource, so it's // prohibited by the API. ASSERT(!this->IsExternalString()); -#ifdef DEBUG +#ifdef ENABLE_SLOW_ASSERTS if (FLAG_enable_slow_asserts) { // Assert that the resource and the string are equivalent. ASSERT(static_cast<size_t>(this->length()) == resource->length()); @@ -1224,7 +1261,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) { bool String::MakeExternal(v8::String::ExternalAsciiStringResource* resource) { -#ifdef DEBUG +#ifdef ENABLE_SLOW_ASSERTS if (FLAG_enable_slow_asserts) { // Assert that the resource and the string are equivalent. ASSERT(static_cast<size_t>(this->length()) == resource->length()); @@ -1709,6 +1746,9 @@ void HeapObject::IterateBody(InstanceType type, int object_size, case FIXED_ARRAY_TYPE: FixedArray::BodyDescriptor::IterateBody(this, object_size, v); break; + case CONSTANT_POOL_ARRAY_TYPE: + reinterpret_cast<ConstantPoolArray*>(this)->ConstantPoolIterateBody(v); + break; case FIXED_DOUBLE_ARRAY_TYPE: break; case JS_OBJECT_TYPE: @@ -1871,211 +1911,248 @@ String* JSReceiver::constructor_name() { } -MaybeObject* JSObject::AddFastPropertyUsingMap(Map* new_map, - Name* name, - Object* value, - int field_index, - Representation representation) { +// TODO(mstarzinger): Temporary wrapper until handlified. +static Handle<Object> NewStorageFor(Isolate* isolate, + Handle<Object> object, + Representation representation) { + Heap* heap = isolate->heap(); + CALL_HEAP_FUNCTION(isolate, + object->AllocateNewStorageFor(heap, representation), + Object); +} + + +void JSObject::AddFastPropertyUsingMap(Handle<JSObject> object, + Handle<Map> new_map, + Handle<Name> name, + Handle<Object> value, + int field_index, + Representation representation) { + Isolate* isolate = object->GetIsolate(); + // This method is used to transition to a field. If we are transitioning to a // double field, allocate new storage. - Object* storage; - MaybeObject* maybe_storage = - value->AllocateNewStorageFor(GetHeap(), representation); - if (!maybe_storage->To(&storage)) return maybe_storage; + Handle<Object> storage = NewStorageFor(isolate, value, representation); - if (map()->unused_property_fields() == 0) { + if (object->map()->unused_property_fields() == 0) { int new_unused = new_map->unused_property_fields(); - FixedArray* values; - MaybeObject* maybe_values = - properties()->CopySize(properties()->length() + new_unused + 1); - if (!maybe_values->To(&values)) return maybe_values; + Handle<FixedArray> properties(object->properties()); + Handle<FixedArray> values = isolate->factory()->CopySizeFixedArray( + properties, properties->length() + new_unused + 1); + object->set_properties(*values); + } - set_properties(values); + object->set_map(*new_map); + object->FastPropertyAtPut(field_index, *storage); +} + + +static MaybeObject* CopyAddFieldDescriptor(Map* map, + Name* name, + int index, + PropertyAttributes attributes, + Representation representation, + TransitionFlag flag) { + Map* new_map; + FieldDescriptor new_field_desc(name, index, attributes, representation); + MaybeObject* maybe_map = map->CopyAddDescriptor(&new_field_desc, flag); + if (!maybe_map->To(&new_map)) return maybe_map; + int unused_property_fields = map->unused_property_fields() - 1; + if (unused_property_fields < 0) { + unused_property_fields += JSObject::kFieldsAdded; } + new_map->set_unused_property_fields(unused_property_fields); + return new_map; +} - set_map(new_map); - FastPropertyAtPut(field_index, storage); - return value; +static Handle<Map> CopyAddFieldDescriptor(Handle<Map> map, + Handle<Name> name, + int index, + PropertyAttributes attributes, + Representation representation, + TransitionFlag flag) { + CALL_HEAP_FUNCTION(map->GetIsolate(), + CopyAddFieldDescriptor( + *map, *name, index, attributes, representation, flag), + Map); } -MaybeObject* JSObject::AddFastProperty(Name* name, - Object* value, - PropertyAttributes attributes, - StoreFromKeyed store_mode, - ValueType value_type, - TransitionFlag flag) { - ASSERT(!IsJSGlobalProxy()); +void JSObject::AddFastProperty(Handle<JSObject> object, + Handle<Name> name, + Handle<Object> value, + PropertyAttributes attributes, + StoreFromKeyed store_mode, + ValueType value_type, + TransitionFlag flag) { + ASSERT(!object->IsJSGlobalProxy()); ASSERT(DescriptorArray::kNotFound == - map()->instance_descriptors()->Search( - name, map()->NumberOfOwnDescriptors())); + object->map()->instance_descriptors()->Search( + *name, object->map()->NumberOfOwnDescriptors())); // Normalize the object if the name is an actual name (not the // hidden strings) and is not a real identifier. // Normalize the object if it will have too many fast properties. - Isolate* isolate = GetHeap()->isolate(); - if (!name->IsCacheable(isolate) || TooManyFastProperties(store_mode)) { - MaybeObject* maybe_failure = - NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0); - if (maybe_failure->IsFailure()) return maybe_failure; - return AddSlowProperty(name, value, attributes); + Isolate* isolate = object->GetIsolate(); + if (!name->IsCacheable(isolate) || + object->TooManyFastProperties(store_mode)) { + NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0); + AddSlowProperty(object, name, value, attributes); + return; } // Compute the new index for new field. - int index = map()->NextFreePropertyIndex(); + int index = object->map()->NextFreePropertyIndex(); // Allocate new instance descriptors with (name, index) added - if (IsJSContextExtensionObject()) value_type = FORCE_TAGGED; + if (object->IsJSContextExtensionObject()) value_type = FORCE_TAGGED; Representation representation = value->OptimalRepresentation(value_type); + Handle<Map> new_map = CopyAddFieldDescriptor( + handle(object->map()), name, index, attributes, representation, flag); - FieldDescriptor new_field(name, index, attributes, representation); - - Map* new_map; - MaybeObject* maybe_new_map = map()->CopyAddDescriptor(&new_field, flag); - if (!maybe_new_map->To(&new_map)) return maybe_new_map; + AddFastPropertyUsingMap(object, new_map, name, value, index, representation); +} - int unused_property_fields = map()->unused_property_fields() - 1; - if (unused_property_fields < 0) { - unused_property_fields += kFieldsAdded; - } - new_map->set_unused_property_fields(unused_property_fields); - return AddFastPropertyUsingMap(new_map, name, value, index, representation); +static MaybeObject* CopyAddConstantDescriptor(Map* map, + Name* name, + Object* value, + PropertyAttributes attributes, + TransitionFlag flag) { + ConstantDescriptor new_constant_desc(name, value, attributes); + return map->CopyAddDescriptor(&new_constant_desc, flag); } -MaybeObject* JSObject::AddConstantProperty( - Name* name, - Object* constant, - PropertyAttributes attributes, - TransitionFlag initial_flag) { - // Allocate new instance descriptors with (name, constant) added - ConstantDescriptor d(name, constant, attributes); +static Handle<Map> CopyAddConstantDescriptor(Handle<Map> map, + Handle<Name> name, + Handle<Object> value, + PropertyAttributes attributes, + TransitionFlag flag) { + CALL_HEAP_FUNCTION(map->GetIsolate(), + CopyAddConstantDescriptor( + *map, *name, *value, attributes, flag), + Map); +} + +void JSObject::AddConstantProperty(Handle<JSObject> object, + Handle<Name> name, + Handle<Object> constant, + PropertyAttributes attributes, + TransitionFlag initial_flag) { TransitionFlag flag = // Do not add transitions to global objects. - (IsGlobalObject() || + (object->IsGlobalObject() || // Don't add transitions to special properties with non-trivial // attributes. attributes != NONE) ? OMIT_TRANSITION : initial_flag; - Map* new_map; - MaybeObject* maybe_new_map = map()->CopyAddDescriptor(&d, flag); - if (!maybe_new_map->To(&new_map)) return maybe_new_map; + // Allocate new instance descriptors with (name, constant) added. + Handle<Map> new_map = CopyAddConstantDescriptor( + handle(object->map()), name, constant, attributes, flag); - set_map(new_map); - return constant; + object->set_map(*new_map); } -// Add property in slow mode -MaybeObject* JSObject::AddSlowProperty(Name* name, - Object* value, - PropertyAttributes attributes) { - ASSERT(!HasFastProperties()); - NameDictionary* dict = property_dictionary(); - Object* store_value = value; - if (IsGlobalObject()) { +void JSObject::AddSlowProperty(Handle<JSObject> object, + Handle<Name> name, + Handle<Object> value, + PropertyAttributes attributes) { + ASSERT(!object->HasFastProperties()); + Isolate* isolate = object->GetIsolate(); + Handle<NameDictionary> dict(object->property_dictionary()); + if (object->IsGlobalObject()) { // In case name is an orphaned property reuse the cell. - int entry = dict->FindEntry(name); + int entry = dict->FindEntry(*name); if (entry != NameDictionary::kNotFound) { - store_value = dict->ValueAt(entry); - MaybeObject* maybe_type = - PropertyCell::cast(store_value)->SetValueInferType(value); - if (maybe_type->IsFailure()) return maybe_type; + Handle<PropertyCell> cell(PropertyCell::cast(dict->ValueAt(entry))); + PropertyCell::SetValueInferType(cell, value); // Assign an enumeration index to the property and update // SetNextEnumerationIndex. int index = dict->NextEnumerationIndex(); PropertyDetails details = PropertyDetails(attributes, NORMAL, index); dict->SetNextEnumerationIndex(index + 1); - dict->SetEntry(entry, name, store_value, details); - return value; - } - Heap* heap = GetHeap(); - { MaybeObject* maybe_store_value = - heap->AllocatePropertyCell(value); - if (!maybe_store_value->ToObject(&store_value)) return maybe_store_value; + dict->SetEntry(entry, *name, *cell, details); + return; } - MaybeObject* maybe_type = - PropertyCell::cast(store_value)->SetValueInferType(value); - if (maybe_type->IsFailure()) return maybe_type; + Handle<PropertyCell> cell = isolate->factory()->NewPropertyCell(value); + PropertyCell::SetValueInferType(cell, value); + value = cell; } PropertyDetails details = PropertyDetails(attributes, NORMAL, 0); - Object* result; - { MaybeObject* maybe_result = dict->Add(name, store_value, details); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - if (dict != result) set_properties(NameDictionary::cast(result)); - return value; + Handle<NameDictionary> result = NameDictionaryAdd(dict, name, value, details); + if (*dict != *result) object->set_properties(*result); } -MaybeObject* JSObject::AddProperty(Name* name, - Object* value, - PropertyAttributes attributes, - StrictModeFlag strict_mode, - JSReceiver::StoreFromKeyed store_mode, - ExtensibilityCheck extensibility_check, - ValueType value_type, - StoreMode mode, - TransitionFlag transition_flag) { - ASSERT(!IsJSGlobalProxy()); - Map* map_of_this = map(); - Heap* heap = GetHeap(); - Isolate* isolate = heap->isolate(); - MaybeObject* result; +Handle<Object> JSObject::AddProperty(Handle<JSObject> object, + Handle<Name> name, + Handle<Object> value, + PropertyAttributes attributes, + StrictModeFlag strict_mode, + JSReceiver::StoreFromKeyed store_mode, + ExtensibilityCheck extensibility_check, + ValueType value_type, + StoreMode mode, + TransitionFlag transition_flag) { + ASSERT(!object->IsJSGlobalProxy()); + Isolate* isolate = object->GetIsolate(); + + if (!name->IsUniqueName()) { + name = isolate->factory()->InternalizedStringFromString( + Handle<String>::cast(name)); + } + if (extensibility_check == PERFORM_EXTENSIBILITY_CHECK && - !map_of_this->is_extensible()) { + !object->map()->is_extensible()) { if (strict_mode == kNonStrictMode) { return value; } else { - Handle<Object> args[1] = {Handle<Name>(name)}; - return isolate->Throw( - *isolate->factory()->NewTypeError("object_not_extensible", - HandleVector(args, 1))); + Handle<Object> args[1] = { name }; + Handle<Object> error = isolate->factory()->NewTypeError( + "object_not_extensible", HandleVector(args, ARRAY_SIZE(args))); + isolate->Throw(*error); + return Handle<Object>(); } } - if (HasFastProperties()) { + if (object->HasFastProperties()) { // Ensure the descriptor array does not get too big. - if (map_of_this->NumberOfOwnDescriptors() < + if (object->map()->NumberOfOwnDescriptors() < DescriptorArray::kMaxNumberOfDescriptors) { // TODO(verwaest): Support other constants. // if (mode == ALLOW_AS_CONSTANT && // !value->IsTheHole() && // !value->IsConsString()) { if (value->IsJSFunction()) { - result = AddConstantProperty(name, value, attributes, transition_flag); + AddConstantProperty(object, name, value, attributes, transition_flag); } else { - result = AddFastProperty( - name, value, attributes, store_mode, value_type, transition_flag); + AddFastProperty(object, name, value, attributes, store_mode, + value_type, transition_flag); } } else { // Normalize the object to prevent very large instance descriptors. // This eliminates unwanted N^2 allocation and lookup behavior. - Object* obj; - MaybeObject* maybe = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0); - if (!maybe->To(&obj)) return maybe; - result = AddSlowProperty(name, value, attributes); + NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0); + AddSlowProperty(object, name, value, attributes); } } else { - result = AddSlowProperty(name, value, attributes); + AddSlowProperty(object, name, value, attributes); } - Handle<Object> hresult; - if (!result->ToHandle(&hresult, isolate)) return result; - - if (FLAG_harmony_observation && map()->is_observed()) { - EnqueueChangeRecord(handle(this, isolate), - "new", - handle(name, isolate), - handle(heap->the_hole_value(), isolate)); + if (FLAG_harmony_observation && + object->map()->is_observed() && + *name != isolate->heap()->hidden_string()) { + Handle<Object> old_value = isolate->factory()->the_hole_value(); + EnqueueChangeRecord(object, "new", name, old_value); } - return *hresult; + return value; } @@ -2115,37 +2192,39 @@ void JSObject::DeliverChangeRecords(Isolate* isolate) { } -MaybeObject* JSObject::SetPropertyPostInterceptor( - Name* name, - Object* value, +Handle<Object> JSObject::SetPropertyPostInterceptor( + Handle<JSObject> object, + Handle<Name> name, + Handle<Object> value, PropertyAttributes attributes, - StrictModeFlag strict_mode, - StoreMode mode) { + StrictModeFlag strict_mode) { // Check local property, ignore interceptor. - LookupResult result(GetIsolate()); - LocalLookupRealNamedProperty(name, &result); - if (!result.IsFound()) map()->LookupTransition(this, name, &result); + LookupResult result(object->GetIsolate()); + object->LocalLookupRealNamedProperty(*name, &result); + if (!result.IsFound()) { + object->map()->LookupTransition(*object, *name, &result); + } if (result.IsFound()) { // An existing property or a map transition was found. Use set property to // handle all these cases. - return SetProperty(&result, name, value, attributes, strict_mode); + return SetPropertyForResult(object, &result, name, value, attributes, + strict_mode, MAY_BE_STORE_FROM_KEYED); } bool done = false; - MaybeObject* result_object = - SetPropertyViaPrototypes(name, value, attributes, strict_mode, &done); + Handle<Object> result_object = SetPropertyViaPrototypes( + object, name, value, attributes, strict_mode, &done); if (done) return result_object; // Add a new real property. - return AddProperty(name, value, attributes, strict_mode, - MAY_BE_STORE_FROM_KEYED, PERFORM_EXTENSIBILITY_CHECK, - OPTIMAL_REPRESENTATION, mode); + return AddProperty(object, name, value, attributes, strict_mode); } -MaybeObject* JSObject::ReplaceSlowProperty(Name* name, - Object* value, - PropertyAttributes attributes) { - NameDictionary* dictionary = property_dictionary(); - int old_index = dictionary->FindEntry(name); +static void ReplaceSlowProperty(Handle<JSObject> object, + Handle<Name> name, + Handle<Object> value, + PropertyAttributes attributes) { + NameDictionary* dictionary = object->property_dictionary(); + int old_index = dictionary->FindEntry(*name); int new_enumeration_index = 0; // 0 means "Use the next available index." if (old_index != -1) { // All calls to ReplaceSlowProperty have had all transitions removed. @@ -2153,7 +2232,7 @@ MaybeObject* JSObject::ReplaceSlowProperty(Name* name, } PropertyDetails new_details(attributes, NORMAL, new_enumeration_index); - return SetNormalizedProperty(name, value, new_details); + JSObject::SetNormalizedProperty(object, name, value, new_details); } @@ -2219,6 +2298,13 @@ static void RightTrimFixedArray(Heap* heap, FixedArray* elms, int to_trim) { MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta); } } + + // The array may not be moved during GC, + // and size has to be adjusted nevertheless. + HeapProfiler* profiler = heap->isolate()->heap_profiler(); + if (profiler->is_tracking_allocations()) { + profiler->UpdateObjectSizeEvent(elms->address(), elms->Size()); + } } @@ -2275,28 +2361,27 @@ bool Map::InstancesNeedRewriting(Map* target, // to temporarily store the inobject properties. // * If there are properties left in the backing store, install the backing // store. -MaybeObject* JSObject::MigrateToMap(Map* new_map) { - Heap* heap = GetHeap(); - Map* old_map = map(); +void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map) { + Isolate* isolate = object->GetIsolate(); + Handle<Map> old_map(object->map()); int number_of_fields = new_map->NumberOfFields(); int inobject = new_map->inobject_properties(); int unused = new_map->unused_property_fields(); - // Nothing to do if no functions were converted to fields. + // Nothing to do if no functions were converted to fields and no smis were + // converted to doubles. if (!old_map->InstancesNeedRewriting( - new_map, number_of_fields, inobject, unused)) { - set_map(new_map); - return this; + *new_map, number_of_fields, inobject, unused)) { + object->set_map(*new_map); + return; } int total_size = number_of_fields + unused; int external = total_size - inobject; - FixedArray* array; - MaybeObject* maybe_array = heap->AllocateFixedArray(total_size); - if (!maybe_array->To(&array)) return maybe_array; + Handle<FixedArray> array = isolate->factory()->NewFixedArray(total_size); - DescriptorArray* old_descriptors = old_map->instance_descriptors(); - DescriptorArray* new_descriptors = new_map->instance_descriptors(); + Handle<DescriptorArray> old_descriptors(old_map->instance_descriptors()); + Handle<DescriptorArray> new_descriptors(new_map->instance_descriptors()); int descriptors = new_map->NumberOfOwnDescriptors(); for (int i = 0; i < descriptors; i++) { @@ -2309,69 +2394,72 @@ MaybeObject* JSObject::MigrateToMap(Map* new_map) { } ASSERT(old_details.type() == CONSTANT || old_details.type() == FIELD); - Object* value = old_details.type() == CONSTANT + Object* raw_value = old_details.type() == CONSTANT ? old_descriptors->GetValue(i) - : RawFastPropertyAt(old_descriptors->GetFieldIndex(i)); + : object->RawFastPropertyAt(old_descriptors->GetFieldIndex(i)); + Handle<Object> value(raw_value, isolate); if (FLAG_track_double_fields && !old_details.representation().IsDouble() && details.representation().IsDouble()) { - if (old_details.representation().IsNone()) value = Smi::FromInt(0); - // Objects must be allocated in the old object space, since the - // overall number of HeapNumbers needed for the conversion might - // exceed the capacity of new space, and we would fail repeatedly - // trying to migrate the instance. - MaybeObject* maybe_storage = - value->AllocateNewStorageFor(heap, details.representation(), TENURED); - if (!maybe_storage->To(&value)) return maybe_storage; + if (old_details.representation().IsNone()) { + value = handle(Smi::FromInt(0), isolate); + } + value = NewStorageFor(isolate, value, details.representation()); } ASSERT(!(FLAG_track_double_fields && details.representation().IsDouble() && value->IsSmi())); int target_index = new_descriptors->GetFieldIndex(i) - inobject; if (target_index < 0) target_index += total_size; - array->set(target_index, value); + array->set(target_index, *value); } - // From here on we cannot fail anymore. + // From here on we cannot fail and we shouldn't GC anymore. + DisallowHeapAllocation no_allocation; // Copy (real) inobject properties. If necessary, stop at number_of_fields to // avoid overwriting |one_pointer_filler_map|. int limit = Min(inobject, number_of_fields); for (int i = 0; i < limit; i++) { - FastPropertyAtPut(i, array->get(external + i)); + object->FastPropertyAtPut(i, array->get(external + i)); } // Create filler object past the new instance size. int new_instance_size = new_map->instance_size(); int instance_size_delta = old_map->instance_size() - new_instance_size; ASSERT(instance_size_delta >= 0); - Address address = this->address() + new_instance_size; - heap->CreateFillerObjectAt(address, instance_size_delta); + Address address = object->address() + new_instance_size; + isolate->heap()->CreateFillerObjectAt(address, instance_size_delta); // If there are properties in the new backing store, trim it to the correct // size and install the backing store into the object. if (external > 0) { - RightTrimFixedArray<FROM_MUTATOR>(heap, array, inobject); - set_properties(array); + RightTrimFixedArray<FROM_MUTATOR>(isolate->heap(), *array, inobject); + object->set_properties(*array); } - set_map(new_map); - - return this; + object->set_map(*new_map); } -MaybeObject* JSObject::GeneralizeFieldRepresentation( - int modify_index, - Representation new_representation, - StoreMode store_mode) { - Map* new_map; - MaybeObject* maybe_new_map = map()->GeneralizeRepresentation( - modify_index, new_representation, store_mode); - if (!maybe_new_map->To(&new_map)) return maybe_new_map; - if (map() == new_map) return this; +Handle<TransitionArray> Map::AddTransition(Handle<Map> map, + Handle<Name> key, + Handle<Map> target, + SimpleTransitionFlag flag) { + CALL_HEAP_FUNCTION(map->GetIsolate(), + map->AddTransition(*key, *target, flag), + TransitionArray); +} + - return MigrateToMap(new_map); +void JSObject::GeneralizeFieldRepresentation(Handle<JSObject> object, + int modify_index, + Representation new_representation, + StoreMode store_mode) { + Handle<Map> new_map = Map::GeneralizeRepresentation( + handle(object->map()), modify_index, new_representation, store_mode); + if (object->map() == *new_map) return; + return MigrateToMap(object, new_map); } @@ -2385,14 +2473,12 @@ int Map::NumberOfFields() { } -MaybeObject* Map::CopyGeneralizeAllRepresentations( - int modify_index, - StoreMode store_mode, - PropertyAttributes attributes, - const char* reason) { - Map* new_map; - MaybeObject* maybe_map = this->Copy(); - if (!maybe_map->To(&new_map)) return maybe_map; +Handle<Map> Map::CopyGeneralizeAllRepresentations(Handle<Map> map, + int modify_index, + StoreMode store_mode, + PropertyAttributes attributes, + const char* reason) { + Handle<Map> new_map = Copy(map); DescriptorArray* descriptors = new_map->instance_descriptors(); descriptors->InitializeRepresentations(Representation::Tagged()); @@ -2414,7 +2500,7 @@ MaybeObject* Map::CopyGeneralizeAllRepresentations( } if (FLAG_trace_generalization) { - PrintGeneralization(stdout, reason, modify_index, + map->PrintGeneralization(stdout, reason, modify_index, new_map->NumberOfOwnDescriptors(), new_map->NumberOfOwnDescriptors(), details.type() == CONSTANT && store_mode == FORCE_FIELD, @@ -2562,11 +2648,11 @@ Map* Map::FindLastMatchMap(int verbatim, // - If |updated| == |split_map|, |updated| is in the expected state. Return it. // - Otherwise, invalidate the outdated transition target from |updated|, and // replace its transition tree with a new branch for the updated descriptors. -MaybeObject* Map::GeneralizeRepresentation(int modify_index, - Representation new_representation, - StoreMode store_mode) { - Map* old_map = this; - DescriptorArray* old_descriptors = old_map->instance_descriptors(); +Handle<Map> Map::GeneralizeRepresentation(Handle<Map> old_map, + int modify_index, + Representation new_representation, + StoreMode store_mode) { + Handle<DescriptorArray> old_descriptors(old_map->instance_descriptors()); PropertyDetails old_details = old_descriptors->GetDetails(modify_index); Representation old_representation = old_details.representation(); @@ -2582,37 +2668,37 @@ MaybeObject* Map::GeneralizeRepresentation(int modify_index, } int descriptors = old_map->NumberOfOwnDescriptors(); - Map* root_map = old_map->FindRootMap(); + Handle<Map> root_map(old_map->FindRootMap()); // Check the state of the root map. - if (!old_map->EquivalentToForTransition(root_map)) { - return CopyGeneralizeAllRepresentations( - modify_index, store_mode, old_details.attributes(), "not equivalent"); + if (!old_map->EquivalentToForTransition(*root_map)) { + return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode, + old_details.attributes(), "not equivalent"); } int verbatim = root_map->NumberOfOwnDescriptors(); if (store_mode != ALLOW_AS_CONSTANT && modify_index < verbatim) { - return CopyGeneralizeAllRepresentations( - modify_index, store_mode, + return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode, old_details.attributes(), "root modification"); } - Map* updated = root_map->FindUpdatedMap( - verbatim, descriptors, old_descriptors); - if (updated == NULL) { - return CopyGeneralizeAllRepresentations( - modify_index, store_mode, old_details.attributes(), "incompatible"); + Map* raw_updated = root_map->FindUpdatedMap( + verbatim, descriptors, *old_descriptors); + if (raw_updated == NULL) { + return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode, + old_details.attributes(), "incompatible"); } - DescriptorArray* updated_descriptors = updated->instance_descriptors(); + Handle<Map> updated(raw_updated); + Handle<DescriptorArray> updated_descriptors(updated->instance_descriptors()); int valid = updated->NumberOfOwnDescriptors(); // Directly change the map if the target map is more general. Ensure that the // target type of the modify_index is a FIELD, unless we are migrating. if (updated_descriptors->IsMoreGeneralThan( - verbatim, valid, descriptors, old_descriptors) && + verbatim, valid, descriptors, *old_descriptors) && (store_mode == ALLOW_AS_CONSTANT || updated_descriptors->GetDetails(modify_index).type() == FIELD)) { Representation updated_representation = @@ -2620,10 +2706,9 @@ MaybeObject* Map::GeneralizeRepresentation(int modify_index, if (new_representation.fits_into(updated_representation)) return updated; } - DescriptorArray* new_descriptors; - MaybeObject* maybe_descriptors = updated_descriptors->Merge( - verbatim, valid, descriptors, modify_index, store_mode, old_descriptors); - if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors; + Handle<DescriptorArray> new_descriptors = DescriptorArray::Merge( + updated_descriptors, verbatim, valid, descriptors, modify_index, + store_mode, old_descriptors); ASSERT(store_mode == ALLOW_AS_CONSTANT || new_descriptors->GetDetails(modify_index).type() == FIELD); @@ -2635,8 +2720,8 @@ MaybeObject* Map::GeneralizeRepresentation(int modify_index, new_descriptors->SetRepresentation(modify_index, updated_representation); } - Map* split_map = root_map->FindLastMatchMap( - verbatim, descriptors, new_descriptors); + Handle<Map> split_map(root_map->FindLastMatchMap( + verbatim, descriptors, *new_descriptors)); int split_descriptors = split_map->NumberOfOwnDescriptors(); // This is shadowed by |updated_descriptors| being more general than @@ -2645,28 +2730,20 @@ MaybeObject* Map::GeneralizeRepresentation(int modify_index, int descriptor = split_descriptors; split_map->DeprecateTarget( - old_descriptors->GetKey(descriptor), new_descriptors); + old_descriptors->GetKey(descriptor), *new_descriptors); if (FLAG_trace_generalization) { - PrintGeneralization( + old_map->PrintGeneralization( stdout, "", modify_index, descriptor, descriptors, old_descriptors->GetDetails(modify_index).type() == CONSTANT && store_mode == FORCE_FIELD, old_representation, updated_representation); } - Map* new_map = split_map; // Add missing transitions. + Handle<Map> new_map = split_map; for (; descriptor < descriptors; descriptor++) { - MaybeObject* maybe_map = new_map->CopyInstallDescriptors( - descriptor, new_descriptors); - if (!maybe_map->To(&new_map)) { - // Create a handle for the last created map to ensure it stays alive - // during GC. Its descriptor array is too large, but it will be - // overwritten during retry anyway. - Handle<Map>(new_map); - return maybe_map; - } + new_map = Map::CopyInstallDescriptors(new_map, descriptor, new_descriptors); new_map->set_migration_target(true); } @@ -2675,6 +2752,21 @@ MaybeObject* Map::GeneralizeRepresentation(int modify_index, } +// Generalize the representation of all FIELD descriptors. +Handle<Map> Map::GeneralizeAllFieldRepresentations( + Handle<Map> map, + Representation new_representation) { + Handle<DescriptorArray> descriptors(map->instance_descriptors()); + for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) { + PropertyDetails details = descriptors->GetDetails(i); + if (details.type() == FIELD) { + map = GeneralizeRepresentation(map, i, new_representation, FORCE_FIELD); + } + } + return map; +} + + Map* Map::CurrentMapForDeprecated() { DisallowHeapAllocation no_allocation; if (!is_deprecated()) return this; @@ -2703,94 +2795,66 @@ Map* Map::CurrentMapForDeprecated() { } -MaybeObject* JSObject::SetPropertyWithInterceptor( - Name* name, - Object* value, +Handle<Object> JSObject::SetPropertyWithInterceptor( + Handle<JSObject> object, + Handle<Name> name, + Handle<Object> value, PropertyAttributes attributes, StrictModeFlag strict_mode) { // TODO(rossberg): Support symbols in the API. if (name->IsSymbol()) return value; - Isolate* isolate = GetIsolate(); - HandleScope scope(isolate); - Handle<JSObject> this_handle(this); - Handle<String> name_handle(String::cast(name)); - Handle<Object> value_handle(value, isolate); - Handle<InterceptorInfo> interceptor(GetNamedInterceptor()); + Isolate* isolate = object->GetIsolate(); + Handle<String> name_string = Handle<String>::cast(name); + Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor()); if (!interceptor->setter()->IsUndefined()) { - LOG(isolate, ApiNamedPropertyAccess("interceptor-named-set", this, name)); - PropertyCallbackArguments args(isolate, interceptor->data(), this, this); + LOG(isolate, + ApiNamedPropertyAccess("interceptor-named-set", *object, *name)); + PropertyCallbackArguments args( + isolate, interceptor->data(), *object, *object); v8::NamedPropertySetterCallback setter = v8::ToCData<v8::NamedPropertySetterCallback>(interceptor->setter()); - Handle<Object> value_unhole(value->IsTheHole() ? - isolate->heap()->undefined_value() : - value, - isolate); + Handle<Object> value_unhole = value->IsTheHole() + ? Handle<Object>(isolate->factory()->undefined_value()) : value; v8::Handle<v8::Value> result = args.Call(setter, - v8::Utils::ToLocal(name_handle), + v8::Utils::ToLocal(name_string), v8::Utils::ToLocal(value_unhole)); - RETURN_IF_SCHEDULED_EXCEPTION(isolate); - if (!result.IsEmpty()) return *value_handle; + RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); + if (!result.IsEmpty()) return value; } - MaybeObject* raw_result = - this_handle->SetPropertyPostInterceptor(*name_handle, - *value_handle, - attributes, - strict_mode); - RETURN_IF_SCHEDULED_EXCEPTION(isolate); - return raw_result; + Handle<Object> result = + SetPropertyPostInterceptor(object, name, value, attributes, strict_mode); + RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); + return result; } Handle<Object> JSReceiver::SetProperty(Handle<JSReceiver> object, - Handle<Name> key, + Handle<Name> name, Handle<Object> value, PropertyAttributes attributes, - StrictModeFlag strict_mode) { - CALL_HEAP_FUNCTION(object->GetIsolate(), - object->SetProperty(*key, *value, attributes, strict_mode), - Object); -} - - -MaybeObject* JSReceiver::SetPropertyOrFail( - Handle<JSReceiver> object, - Handle<Name> key, - Handle<Object> value, - PropertyAttributes attributes, - StrictModeFlag strict_mode, - JSReceiver::StoreFromKeyed store_mode) { - CALL_HEAP_FUNCTION_PASS_EXCEPTION( - object->GetIsolate(), - object->SetProperty(*key, *value, attributes, strict_mode, store_mode)); -} - - -MaybeObject* JSReceiver::SetProperty(Name* name, - Object* value, - PropertyAttributes attributes, - StrictModeFlag strict_mode, - JSReceiver::StoreFromKeyed store_mode) { - LookupResult result(GetIsolate()); - LocalLookup(name, &result, true); + StrictModeFlag strict_mode, + StoreFromKeyed store_mode) { + LookupResult result(object->GetIsolate()); + object->LocalLookup(*name, &result, true); if (!result.IsFound()) { - map()->LookupTransition(JSObject::cast(this), name, &result); + object->map()->LookupTransition(JSObject::cast(*object), *name, &result); } - return SetProperty(&result, name, value, attributes, strict_mode, store_mode); + return SetProperty(object, &result, name, value, attributes, strict_mode, + store_mode); } -MaybeObject* JSObject::SetPropertyWithCallback(Object* structure, - Name* name, - Object* value, - JSObject* holder, - StrictModeFlag strict_mode) { - Isolate* isolate = GetIsolate(); - HandleScope scope(isolate); +Handle<Object> JSObject::SetPropertyWithCallback(Handle<JSObject> object, + Handle<Object> structure, + Handle<Name> name, + Handle<Object> value, + Handle<JSObject> holder, + StrictModeFlag strict_mode) { + Isolate* isolate = object->GetIsolate(); // We should never get here to initialize a const with the hole // value since a const declaration would conflict with the setter. ASSERT(!value->IsTheHole()); - Handle<Object> value_handle(value, isolate); // To accommodate both the old and the new api we switch on the // data structure used to store the callbacks. Eventually foreign @@ -2798,26 +2862,27 @@ MaybeObject* JSObject::SetPropertyWithCallback(Object* structure, if (structure->IsForeign()) { AccessorDescriptor* callback = reinterpret_cast<AccessorDescriptor*>( - Foreign::cast(structure)->foreign_address()); - MaybeObject* obj = (callback->setter)( - isolate, this, value, callback->data); - RETURN_IF_SCHEDULED_EXCEPTION(isolate); - if (obj->IsFailure()) return obj; - return *value_handle; + Handle<Foreign>::cast(structure)->foreign_address()); + CALL_AND_RETRY_OR_DIE(isolate, + (callback->setter)( + isolate, *object, *value, callback->data), + break, + return Handle<Object>()); + RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); + return value; } if (structure->IsExecutableAccessorInfo()) { // api style callbacks - ExecutableAccessorInfo* data = ExecutableAccessorInfo::cast(structure); - if (!data->IsCompatibleReceiver(this)) { - Handle<Object> name_handle(name, isolate); - Handle<Object> receiver_handle(this, isolate); - Handle<Object> args[2] = { name_handle, receiver_handle }; + ExecutableAccessorInfo* data = ExecutableAccessorInfo::cast(*structure); + if (!data->IsCompatibleReceiver(*object)) { + Handle<Object> args[2] = { name, object }; Handle<Object> error = isolate->factory()->NewTypeError("incompatible_method_receiver", HandleVector(args, ARRAY_SIZE(args))); - return isolate->Throw(*error); + isolate->Throw(*error); + return Handle<Object>(); } // TODO(rossberg): Support symbols in the API. if (name->IsSymbol()) return value; @@ -2825,32 +2890,33 @@ MaybeObject* JSObject::SetPropertyWithCallback(Object* structure, v8::AccessorSetterCallback call_fun = v8::ToCData<v8::AccessorSetterCallback>(call_obj); if (call_fun == NULL) return value; - Handle<String> key(String::cast(name)); - LOG(isolate, ApiNamedPropertyAccess("store", this, name)); + Handle<String> key = Handle<String>::cast(name); + LOG(isolate, ApiNamedPropertyAccess("store", *object, *name)); PropertyCallbackArguments args( - isolate, data->data(), this, JSObject::cast(holder)); + isolate, data->data(), *object, JSObject::cast(*holder)); args.Call(call_fun, v8::Utils::ToLocal(key), - v8::Utils::ToLocal(value_handle)); - RETURN_IF_SCHEDULED_EXCEPTION(isolate); - return *value_handle; + v8::Utils::ToLocal(value)); + RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); + return value; } if (structure->IsAccessorPair()) { - Object* setter = AccessorPair::cast(structure)->setter(); + Handle<Object> setter(AccessorPair::cast(*structure)->setter(), isolate); if (setter->IsSpecFunction()) { // TODO(rossberg): nicer would be to cast to some JSCallable here... - return SetPropertyWithDefinedSetter(JSReceiver::cast(setter), value); + return SetPropertyWithDefinedSetter( + object, Handle<JSReceiver>::cast(setter), value); } else { if (strict_mode == kNonStrictMode) { return value; } - Handle<Name> key(name); - Handle<Object> holder_handle(holder, isolate); - Handle<Object> args[2] = { key, holder_handle }; - return isolate->Throw( - *isolate->factory()->NewTypeError("no_setter_in_callback", - HandleVector(args, 2))); + Handle<Object> args[2] = { name, holder }; + Handle<Object> error = + isolate->factory()->NewTypeError("no_setter_in_callback", + HandleVector(args, 2)); + isolate->Throw(*error); + return Handle<Object>(); } } @@ -2860,32 +2926,33 @@ MaybeObject* JSObject::SetPropertyWithCallback(Object* structure, } UNREACHABLE(); - return NULL; + return Handle<Object>(); } -MaybeObject* JSReceiver::SetPropertyWithDefinedSetter(JSReceiver* setter, - Object* value) { - Isolate* isolate = GetIsolate(); - Handle<Object> value_handle(value, isolate); - Handle<JSReceiver> fun(setter, isolate); - Handle<JSReceiver> self(this, isolate); +Handle<Object> JSReceiver::SetPropertyWithDefinedSetter( + Handle<JSReceiver> object, + Handle<JSReceiver> setter, + Handle<Object> value) { + Isolate* isolate = object->GetIsolate(); + #ifdef ENABLE_DEBUGGER_SUPPORT Debug* debug = isolate->debug(); // Handle stepping into a setter if step into is active. // TODO(rossberg): should this apply to getters that are function proxies? - if (debug->StepInActive() && fun->IsJSFunction()) { + if (debug->StepInActive() && setter->IsJSFunction()) { debug->HandleStepIn( - Handle<JSFunction>::cast(fun), Handle<Object>::null(), 0, false); + Handle<JSFunction>::cast(setter), Handle<Object>::null(), 0, false); } #endif + bool has_pending_exception; - Handle<Object> argv[] = { value_handle }; + Handle<Object> argv[] = { value }; Execution::Call( - isolate, fun, self, ARRAY_SIZE(argv), argv, &has_pending_exception); + isolate, setter, object, ARRAY_SIZE(argv), argv, &has_pending_exception); // Check for pending exception and return the result. - if (has_pending_exception) return Failure::Exception(); - return *value_handle; + if (has_pending_exception) return Handle<Object>(); + return value; } @@ -2899,14 +2966,16 @@ MaybeObject* JSObject::SetElementWithCallbackSetterInPrototypes( pt != heap->null_value(); pt = pt->GetPrototype(GetIsolate())) { if (pt->IsJSProxy()) { - String* name; - MaybeObject* maybe = heap->Uint32ToString(index); - if (!maybe->To<String>(&name)) { - *found = true; // Force abort - return maybe; - } - return JSProxy::cast(pt)->SetPropertyViaPrototypesWithHandler( - this, name, value, NONE, strict_mode, found); + Isolate* isolate = GetIsolate(); + HandleScope scope(isolate); + Handle<JSProxy> proxy(JSProxy::cast(pt)); + Handle<JSObject> self(this, isolate); + Handle<String> name = isolate->factory()->Uint32ToString(index); + Handle<Object> value_handle(value, isolate); + Handle<Object> result = JSProxy::SetPropertyViaPrototypesWithHandler( + proxy, self, name, value_handle, NONE, strict_mode, found); + RETURN_IF_EMPTY_HANDLE(isolate, result); + return *result; } if (!JSObject::cast(pt)->HasDictionaryElements()) { continue; @@ -2918,11 +2987,16 @@ MaybeObject* JSObject::SetElementWithCallbackSetterInPrototypes( PropertyDetails details = dictionary->DetailsAt(entry); if (details.type() == CALLBACKS) { *found = true; - return SetElementWithCallback(dictionary->ValueAt(entry), - index, - value, - JSObject::cast(pt), - strict_mode); + Isolate* isolate = GetIsolate(); + HandleScope scope(isolate); + Handle<JSObject> self(this, isolate); + Handle<Object> structure(dictionary->ValueAt(entry), isolate); + Handle<Object> value_handle(value, isolate); + Handle<JSObject> holder(JSObject::cast(pt)); + Handle<Object> result = SetElementWithCallback( + self, structure, index, value_handle, holder, strict_mode); + RETURN_IF_EMPTY_HANDLE(isolate, result); + return *result; } } } @@ -2930,21 +3004,21 @@ MaybeObject* JSObject::SetElementWithCallbackSetterInPrototypes( return heap->the_hole_value(); } -MaybeObject* JSObject::SetPropertyViaPrototypes( - Name* name, - Object* value, - PropertyAttributes attributes, - StrictModeFlag strict_mode, - bool* done) { - Heap* heap = GetHeap(); - Isolate* isolate = heap->isolate(); + +Handle<Object> JSObject::SetPropertyViaPrototypes(Handle<JSObject> object, + Handle<Name> name, + Handle<Object> value, + PropertyAttributes attributes, + StrictModeFlag strict_mode, + bool* done) { + Isolate* isolate = object->GetIsolate(); *done = false; // We could not find a local property so let's check whether there is an // accessor that wants to handle the property, or whether the property is // read-only on the prototype chain. LookupResult result(isolate); - LookupRealNamedPropertyInPrototypes(name, &result); + object->LookupRealNamedPropertyInPrototypes(*name, &result); if (result.IsFound()) { switch (result.type()) { case NORMAL: @@ -2955,19 +3029,21 @@ MaybeObject* JSObject::SetPropertyViaPrototypes( case INTERCEPTOR: { PropertyAttributes attr = result.holder()->GetPropertyAttributeWithInterceptor( - this, name, true); + *object, *name, true); *done = !!(attr & READ_ONLY); break; } case CALLBACKS: { if (!FLAG_es5_readonly && result.IsReadOnly()) break; *done = true; - return SetPropertyWithCallback(result.GetCallbackObject(), - name, value, result.holder(), strict_mode); + Handle<Object> callback_object(result.GetCallbackObject(), isolate); + return SetPropertyWithCallback(object, callback_object, name, value, + handle(result.holder()), strict_mode); } case HANDLER: { - return result.proxy()->SetPropertyViaPrototypesWithHandler( - this, name, value, attributes, strict_mode, done); + Handle<JSProxy> proxy(result.proxy()); + return JSProxy::SetPropertyViaPrototypesWithHandler( + proxy, object, name, value, attributes, strict_mode, done); } case TRANSITION: case NONEXISTENT: @@ -2980,12 +3056,13 @@ MaybeObject* JSObject::SetPropertyViaPrototypes( if (!FLAG_es5_readonly) *done = false; if (*done) { if (strict_mode == kNonStrictMode) return value; - Handle<Object> args[] = { Handle<Object>(name, isolate), - Handle<Object>(this, isolate)}; - return isolate->Throw(*isolate->factory()->NewTypeError( - "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args)))); + Handle<Object> args[] = { name, object }; + Handle<Object> error = isolate->factory()->NewTypeError( + "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args))); + isolate->Throw(*error); + return Handle<Object>(); } - return heap->the_hole_value(); + return isolate->factory()->the_hole_value(); } @@ -3340,14 +3417,15 @@ void JSObject::LookupRealNamedPropertyInPrototypes(Name* name, // We only need to deal with CALLBACKS and INTERCEPTORS -MaybeObject* JSObject::SetPropertyWithFailedAccessCheck( +Handle<Object> JSObject::SetPropertyWithFailedAccessCheck( + Handle<JSObject> object, LookupResult* result, - Name* name, - Object* value, + Handle<Name> name, + Handle<Object> value, bool check_prototype, StrictModeFlag strict_mode) { if (check_prototype && !result->IsProperty()) { - LookupRealNamedPropertyInPrototypes(name, result); + object->LookupRealNamedPropertyInPrototypes(*name, result); } if (result->IsProperty()) { @@ -3356,21 +3434,23 @@ MaybeObject* JSObject::SetPropertyWithFailedAccessCheck( case CALLBACKS: { Object* obj = result->GetCallbackObject(); if (obj->IsAccessorInfo()) { - AccessorInfo* info = AccessorInfo::cast(obj); + Handle<AccessorInfo> info(AccessorInfo::cast(obj)); if (info->all_can_write()) { - return SetPropertyWithCallback(result->GetCallbackObject(), + return SetPropertyWithCallback(object, + info, name, value, - result->holder(), + handle(result->holder()), strict_mode); } } else if (obj->IsAccessorPair()) { - AccessorPair* pair = AccessorPair::cast(obj); + Handle<AccessorPair> pair(AccessorPair::cast(obj)); if (pair->all_can_read()) { - return SetPropertyWithCallback(result->GetCallbackObject(), + return SetPropertyWithCallback(object, + pair, name, value, - result->holder(), + handle(result->holder()), strict_mode); } } @@ -3379,10 +3459,11 @@ MaybeObject* JSObject::SetPropertyWithFailedAccessCheck( case INTERCEPTOR: { // Try lookup real named properties. Note that only property can be // set is callbacks marked as ALL_CAN_WRITE on the prototype chain. - LookupResult r(GetIsolate()); - LookupRealNamedProperty(name, &r); + LookupResult r(object->GetIsolate()); + object->LookupRealNamedProperty(*name, &r); if (r.IsProperty()) { - return SetPropertyWithFailedAccessCheck(&r, + return SetPropertyWithFailedAccessCheck(object, + &r, name, value, check_prototype, @@ -3397,42 +3478,38 @@ MaybeObject* JSObject::SetPropertyWithFailedAccessCheck( } } - Isolate* isolate = GetIsolate(); - HandleScope scope(isolate); - Handle<Object> value_handle(value, isolate); - isolate->ReportFailedAccessCheck(this, v8::ACCESS_SET); - RETURN_IF_SCHEDULED_EXCEPTION(isolate); - return *value_handle; + Isolate* isolate = object->GetIsolate(); + isolate->ReportFailedAccessCheck(*object, v8::ACCESS_SET); + RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); + return value; } -MaybeObject* JSReceiver::SetProperty(LookupResult* result, - Name* key, - Object* value, - PropertyAttributes attributes, - StrictModeFlag strict_mode, - JSReceiver::StoreFromKeyed store_mode) { +Handle<Object> JSReceiver::SetProperty(Handle<JSReceiver> object, + LookupResult* result, + Handle<Name> key, + Handle<Object> value, + PropertyAttributes attributes, + StrictModeFlag strict_mode, + StoreFromKeyed store_mode) { if (result->IsHandler()) { - return result->proxy()->SetPropertyWithHandler( - this, key, value, attributes, strict_mode); + return JSProxy::SetPropertyWithHandler(handle(result->proxy()), + object, key, value, attributes, strict_mode); } else { - return JSObject::cast(this)->SetPropertyForResult( + return JSObject::SetPropertyForResult(Handle<JSObject>::cast(object), result, key, value, attributes, strict_mode, store_mode); } } -bool JSProxy::HasPropertyWithHandler(Name* name_raw) { - Isolate* isolate = GetIsolate(); - HandleScope scope(isolate); - Handle<Object> receiver(this, isolate); - Handle<Object> name(name_raw, isolate); +bool JSProxy::HasPropertyWithHandler(Handle<JSProxy> proxy, Handle<Name> name) { + Isolate* isolate = proxy->GetIsolate(); // TODO(rossberg): adjust once there is a story for symbols vs proxies. if (name->IsSymbol()) return false; Handle<Object> args[] = { name }; - Handle<Object> result = CallTrap( + Handle<Object> result = proxy->CallTrap( "has", isolate->derived_has_trap(), ARRAY_SIZE(args), args); if (isolate->has_pending_exception()) return false; @@ -3440,58 +3517,51 @@ bool JSProxy::HasPropertyWithHandler(Name* name_raw) { } -MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyWithHandler( - JSReceiver* receiver_raw, - Name* name_raw, - Object* value_raw, - PropertyAttributes attributes, - StrictModeFlag strict_mode) { - Isolate* isolate = GetIsolate(); - HandleScope scope(isolate); - Handle<JSReceiver> receiver(receiver_raw); - Handle<Object> name(name_raw, isolate); - Handle<Object> value(value_raw, isolate); +Handle<Object> JSProxy::SetPropertyWithHandler(Handle<JSProxy> proxy, + Handle<JSReceiver> receiver, + Handle<Name> name, + Handle<Object> value, + PropertyAttributes attributes, + StrictModeFlag strict_mode) { + Isolate* isolate = proxy->GetIsolate(); // TODO(rossberg): adjust once there is a story for symbols vs proxies. - if (name->IsSymbol()) return *value; + if (name->IsSymbol()) return value; Handle<Object> args[] = { receiver, name, value }; - CallTrap("set", isolate->derived_set_trap(), ARRAY_SIZE(args), args); - if (isolate->has_pending_exception()) return Failure::Exception(); + proxy->CallTrap("set", isolate->derived_set_trap(), ARRAY_SIZE(args), args); + if (isolate->has_pending_exception()) return Handle<Object>(); - return *value; + return value; } -MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyViaPrototypesWithHandler( - JSReceiver* receiver_raw, - Name* name_raw, - Object* value_raw, +Handle<Object> JSProxy::SetPropertyViaPrototypesWithHandler( + Handle<JSProxy> proxy, + Handle<JSReceiver> receiver, + Handle<Name> name, + Handle<Object> value, PropertyAttributes attributes, StrictModeFlag strict_mode, bool* done) { - Isolate* isolate = GetIsolate(); - Handle<JSProxy> proxy(this); - Handle<JSReceiver> receiver(receiver_raw); - Handle<Name> name(name_raw); - Handle<Object> value(value_raw, isolate); - Handle<Object> handler(this->handler(), isolate); // Trap might morph proxy. + Isolate* isolate = proxy->GetIsolate(); + Handle<Object> handler(proxy->handler(), isolate); // Trap might morph proxy. // TODO(rossberg): adjust once there is a story for symbols vs proxies. if (name->IsSymbol()) { *done = false; - return isolate->heap()->the_hole_value(); + return isolate->factory()->the_hole_value(); } *done = true; // except where redefined... Handle<Object> args[] = { name }; Handle<Object> result = proxy->CallTrap( "getPropertyDescriptor", Handle<Object>(), ARRAY_SIZE(args), args); - if (isolate->has_pending_exception()) return Failure::Exception(); + if (isolate->has_pending_exception()) return Handle<Object>(); if (result->IsUndefined()) { *done = false; - return isolate->heap()->the_hole_value(); + return isolate->factory()->the_hole_value(); } // Emulate [[GetProperty]] semantics for proxies. @@ -3500,7 +3570,7 @@ MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyViaPrototypesWithHandler( Handle<Object> desc = Execution::Call( isolate, isolate->to_complete_property_descriptor(), result, ARRAY_SIZE(argv), argv, &has_pending_exception); - if (has_pending_exception) return Failure::Exception(); + if (has_pending_exception) return Handle<Object>(); // [[GetProperty]] requires to check that all properties are configurable. Handle<String> configurable_name = @@ -3517,7 +3587,8 @@ MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyViaPrototypesWithHandler( Handle<Object> args[] = { handler, trap, name }; Handle<Object> error = isolate->factory()->NewTypeError( "proxy_prop_not_configurable", HandleVector(args, ARRAY_SIZE(args))); - return isolate->Throw(*error); + isolate->Throw(*error); + return Handle<Object>(); } ASSERT(configurable->IsTrue()); @@ -3538,12 +3609,13 @@ MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyViaPrototypesWithHandler( ASSERT(!isolate->has_pending_exception()); ASSERT(writable->IsTrue() || writable->IsFalse()); *done = writable->IsFalse(); - if (!*done) return GetHeap()->the_hole_value(); - if (strict_mode == kNonStrictMode) return *value; + if (!*done) return isolate->factory()->the_hole_value(); + if (strict_mode == kNonStrictMode) return value; Handle<Object> args[] = { name, receiver }; Handle<Object> error = isolate->factory()->NewTypeError( "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args))); - return isolate->Throw(*error); + isolate->Throw(*error); + return Handle<Object>(); } // We have an AccessorDescriptor. @@ -3553,15 +3625,16 @@ MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyViaPrototypesWithHandler( ASSERT(!isolate->has_pending_exception()); if (!setter->IsUndefined()) { // TODO(rossberg): nicer would be to cast to some JSCallable here... - return receiver->SetPropertyWithDefinedSetter( - JSReceiver::cast(*setter), *value); + return SetPropertyWithDefinedSetter( + receiver, Handle<JSReceiver>::cast(setter), value); } - if (strict_mode == kNonStrictMode) return *value; + if (strict_mode == kNonStrictMode) return value; Handle<Object> args2[] = { name, proxy }; Handle<Object> error = isolate->factory()->NewTypeError( "no_setter_in_callback", HandleVector(args2, ARRAY_SIZE(args2))); - return isolate->Throw(*error); + isolate->Throw(*error); + return Handle<Object>(); } @@ -3726,44 +3799,74 @@ MUST_USE_RESULT Handle<Object> JSProxy::CallTrap(const char* name, } -void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) { - CALL_HEAP_FUNCTION_VOID( - object->GetIsolate(), - object->AllocateStorageForMap(*map)); +// TODO(mstarzinger): Temporary wrapper until handlified. +static Handle<Map> MapAsElementsKind(Handle<Map> map, ElementsKind kind) { + CALL_HEAP_FUNCTION(map->GetIsolate(), map->AsElementsKind(kind), Map); } -void JSObject::MigrateInstance(Handle<JSObject> object) { - CALL_HEAP_FUNCTION_VOID( - object->GetIsolate(), - object->MigrateInstance()); +void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) { + ASSERT(object->map()->inobject_properties() == map->inobject_properties()); + ElementsKind obj_kind = object->map()->elements_kind(); + ElementsKind map_kind = map->elements_kind(); + if (map_kind != obj_kind) { + ElementsKind to_kind = map_kind; + if (IsMoreGeneralElementsKindTransition(map_kind, obj_kind) || + IsDictionaryElementsKind(obj_kind)) { + to_kind = obj_kind; + } + if (IsDictionaryElementsKind(to_kind)) { + NormalizeElements(object); + } else { + TransitionElementsKind(object, to_kind); + } + map = MapAsElementsKind(map, to_kind); + } + int total_size = + map->NumberOfOwnDescriptors() + map->unused_property_fields(); + int out_of_object = total_size - map->inobject_properties(); + if (out_of_object != object->properties()->length()) { + Isolate* isolate = object->GetIsolate(); + Handle<FixedArray> new_properties = isolate->factory()->CopySizeFixedArray( + handle(object->properties()), out_of_object); + object->set_properties(*new_properties); + } + object->set_map(*map); } -Handle<Object> JSObject::TryMigrateInstance(Handle<JSObject> object) { - CALL_HEAP_FUNCTION( - object->GetIsolate(), - object->MigrateInstance(), - Object); +void JSObject::MigrateInstance(Handle<JSObject> object) { + // Converting any field to the most specific type will cause the + // GeneralizeFieldRepresentation algorithm to create the most general existing + // transition that matches the object. This achieves what is needed. + Handle<Map> original_map(object->map()); + GeneralizeFieldRepresentation( + object, 0, Representation::None(), ALLOW_AS_CONSTANT); + if (FLAG_trace_migration) { + object->PrintInstanceMigration(stdout, *original_map, object->map()); + } } -Handle<Map> Map::GeneralizeRepresentation(Handle<Map> map, - int modify_index, - Representation representation, - StoreMode store_mode) { - CALL_HEAP_FUNCTION( - map->GetIsolate(), - map->GeneralizeRepresentation(modify_index, representation, store_mode), - Map); +Handle<Object> JSObject::TryMigrateInstance(Handle<JSObject> object) { + Map* new_map = object->map()->CurrentMapForDeprecated(); + if (new_map == NULL) return Handle<Object>(); + Handle<Map> original_map(object->map()); + JSObject::MigrateToMap(object, handle(new_map)); + if (FLAG_trace_migration) { + object->PrintInstanceMigration(stdout, *original_map, object->map()); + } + return object; } -static MaybeObject* SetPropertyUsingTransition(LookupResult* lookup, - Handle<Name> name, - Handle<Object> value, - PropertyAttributes attributes) { - Map* transition_map = lookup->GetTransitionTarget(); +Handle<Object> JSObject::SetPropertyUsingTransition( + Handle<JSObject> object, + LookupResult* lookup, + Handle<Name> name, + Handle<Object> value, + PropertyAttributes attributes) { + Handle<Map> transition_map(lookup->GetTransitionTarget()); int descriptor = transition_map->LastAdded(); DescriptorArray* descriptors = transition_map->instance_descriptors(); @@ -3773,8 +3876,8 @@ static MaybeObject* SetPropertyUsingTransition(LookupResult* lookup, // AddProperty will either normalize the object, or create a new fast copy // of the map. If we get a fast copy of the map, all field representations // will be tagged since the transition is omitted. - return lookup->holder()->AddProperty( - *name, *value, attributes, kNonStrictMode, + return JSObject::AddProperty( + object, name, value, attributes, kNonStrictMode, JSReceiver::CERTAINLY_NOT_STORE_FROM_KEYED, JSReceiver::OMIT_EXTENSIBILITY_CHECK, JSObject::FORCE_TAGGED, FORCE_FIELD, OMIT_TRANSITION); @@ -3785,45 +3888,41 @@ static MaybeObject* SetPropertyUsingTransition(LookupResult* lookup, // (value->IsUninitialized) as constant. if (details.type() == CONSTANT && descriptors->GetValue(descriptor) == *value) { - lookup->holder()->set_map(transition_map); - return *value; + object->set_map(*transition_map); + return value; } Representation representation = details.representation(); if (!value->FitsRepresentation(representation) || details.type() == CONSTANT) { - MaybeObject* maybe_map = transition_map->GeneralizeRepresentation( + transition_map = Map::GeneralizeRepresentation(transition_map, descriptor, value->OptimalRepresentation(), FORCE_FIELD); - if (!maybe_map->To(&transition_map)) return maybe_map; Object* back = transition_map->GetBackPointer(); if (back->IsMap()) { - MaybeObject* maybe_failure = - lookup->holder()->MigrateToMap(Map::cast(back)); - if (maybe_failure->IsFailure()) return maybe_failure; + MigrateToMap(object, handle(Map::cast(back))); } descriptors = transition_map->instance_descriptors(); representation = descriptors->GetDetails(descriptor).representation(); } int field_index = descriptors->GetFieldIndex(descriptor); - return lookup->holder()->AddFastPropertyUsingMap( - transition_map, *name, *value, field_index, representation); + AddFastPropertyUsingMap( + object, transition_map, name, value, field_index, representation); + return value; } -static MaybeObject* SetPropertyToField(LookupResult* lookup, - Handle<Name> name, - Handle<Object> value) { +static void SetPropertyToField(LookupResult* lookup, + Handle<Name> name, + Handle<Object> value) { Representation representation = lookup->representation(); if (!value->FitsRepresentation(representation) || lookup->type() == CONSTANT) { - MaybeObject* maybe_failure = - lookup->holder()->GeneralizeFieldRepresentation( - lookup->GetDescriptorIndex(), - value->OptimalRepresentation(), - FORCE_FIELD); - if (maybe_failure->IsFailure()) return maybe_failure; + JSObject::GeneralizeFieldRepresentation(handle(lookup->holder()), + lookup->GetDescriptorIndex(), + value->OptimalRepresentation(), + FORCE_FIELD); DescriptorArray* desc = lookup->holder()->map()->instance_descriptors(); int descriptor = lookup->GetDescriptorIndex(); representation = desc->GetDetails(descriptor).representation(); @@ -3833,199 +3932,182 @@ static MaybeObject* SetPropertyToField(LookupResult* lookup, HeapNumber* storage = HeapNumber::cast(lookup->holder()->RawFastPropertyAt( lookup->GetFieldIndex().field_index())); storage->set_value(value->Number()); - return *value; + return; } lookup->holder()->FastPropertyAtPut( lookup->GetFieldIndex().field_index(), *value); - return *value; } -static MaybeObject* ConvertAndSetLocalProperty(LookupResult* lookup, - Name* name, - Object* value, - PropertyAttributes attributes) { - JSObject* object = lookup->holder(); +static void ConvertAndSetLocalProperty(LookupResult* lookup, + Handle<Name> name, + Handle<Object> value, + PropertyAttributes attributes) { + Handle<JSObject> object(lookup->holder()); if (object->TooManyFastProperties()) { - MaybeObject* maybe_failure = object->NormalizeProperties( - CLEAR_INOBJECT_PROPERTIES, 0); - if (maybe_failure->IsFailure()) return maybe_failure; + JSObject::NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0); } if (!object->HasFastProperties()) { - return object->ReplaceSlowProperty(name, value, attributes); + ReplaceSlowProperty(object, name, value, attributes); + return; } int descriptor_index = lookup->GetDescriptorIndex(); if (lookup->GetAttributes() == attributes) { - MaybeObject* maybe_failure = object->GeneralizeFieldRepresentation( - descriptor_index, Representation::Tagged(), FORCE_FIELD); - if (maybe_failure->IsFailure()) return maybe_failure; + JSObject::GeneralizeFieldRepresentation( + object, descriptor_index, Representation::Tagged(), FORCE_FIELD); } else { - Map* map; - MaybeObject* maybe_map = object->map()->CopyGeneralizeAllRepresentations( + Handle<Map> old_map(object->map()); + Handle<Map> new_map = Map::CopyGeneralizeAllRepresentations(old_map, descriptor_index, FORCE_FIELD, attributes, "attributes mismatch"); - if (!maybe_map->To(&map)) return maybe_map; - MaybeObject* maybe_failure = object->MigrateToMap(map); - if (maybe_failure->IsFailure()) return maybe_failure; + JSObject::MigrateToMap(object, new_map); } DescriptorArray* descriptors = object->map()->instance_descriptors(); int index = descriptors->GetDetails(descriptor_index).field_index(); - object->FastPropertyAtPut(index, value); - return value; + object->FastPropertyAtPut(index, *value); } -static MaybeObject* SetPropertyToFieldWithAttributes( - LookupResult* lookup, - Handle<Name> name, - Handle<Object> value, - PropertyAttributes attributes) { +static void SetPropertyToFieldWithAttributes(LookupResult* lookup, + Handle<Name> name, + Handle<Object> value, + PropertyAttributes attributes) { if (lookup->GetAttributes() == attributes) { - if (value->IsUninitialized()) return *value; - return SetPropertyToField(lookup, name, value); + if (value->IsUninitialized()) return; + SetPropertyToField(lookup, name, value); } else { - return ConvertAndSetLocalProperty(lookup, *name, *value, attributes); + ConvertAndSetLocalProperty(lookup, name, value, attributes); } } -MaybeObject* JSObject::SetPropertyForResult(LookupResult* lookup, - Name* name_raw, - Object* value_raw, - PropertyAttributes attributes, - StrictModeFlag strict_mode, - StoreFromKeyed store_mode) { - Heap* heap = GetHeap(); - Isolate* isolate = heap->isolate(); +Handle<Object> JSObject::SetPropertyForResult(Handle<JSObject> object, + LookupResult* lookup, + Handle<Name> name, + Handle<Object> value, + PropertyAttributes attributes, + StrictModeFlag strict_mode, + StoreFromKeyed store_mode) { + Isolate* isolate = object->GetIsolate(); // Make sure that the top context does not change when doing callbacks or // interceptor calls. - AssertNoContextChangeWithHandleScope ncc; + AssertNoContextChange ncc(isolate); // Optimization for 2-byte strings often used as keys in a decompression // dictionary. We internalize these short keys to avoid constantly // reallocating them. - if (name_raw->IsString() && !name_raw->IsInternalizedString() && - String::cast(name_raw)->length() <= 2) { - Object* internalized_version; - { MaybeObject* maybe_string_version = - heap->InternalizeString(String::cast(name_raw)); - if (maybe_string_version->ToObject(&internalized_version)) { - name_raw = String::cast(internalized_version); - } - } + if (name->IsString() && !name->IsInternalizedString() && + Handle<String>::cast(name)->length() <= 2) { + name = isolate->factory()->InternalizeString(Handle<String>::cast(name)); } // Check access rights if needed. - if (IsAccessCheckNeeded()) { - if (!isolate->MayNamedAccess(this, name_raw, v8::ACCESS_SET)) { - return SetPropertyWithFailedAccessCheck( - lookup, name_raw, value_raw, true, strict_mode); + if (object->IsAccessCheckNeeded()) { + if (!isolate->MayNamedAccess(*object, *name, v8::ACCESS_SET)) { + return SetPropertyWithFailedAccessCheck(object, lookup, name, value, + true, strict_mode); } } - if (IsJSGlobalProxy()) { - Object* proto = GetPrototype(); - if (proto->IsNull()) return value_raw; + if (object->IsJSGlobalProxy()) { + Handle<Object> proto(object->GetPrototype(), isolate); + if (proto->IsNull()) return value; ASSERT(proto->IsJSGlobalObject()); - return JSObject::cast(proto)->SetPropertyForResult( - lookup, name_raw, value_raw, attributes, strict_mode, store_mode); + return SetPropertyForResult(Handle<JSObject>::cast(proto), + lookup, name, value, attributes, strict_mode, store_mode); } - ASSERT(!lookup->IsFound() || lookup->holder() == this || + ASSERT(!lookup->IsFound() || lookup->holder() == *object || lookup->holder()->map()->is_hidden_prototype()); - // From this point on everything needs to be handlified, because - // SetPropertyViaPrototypes might call back into JavaScript. - HandleScope scope(isolate); - Handle<JSObject> self(this); - Handle<Name> name(name_raw); - Handle<Object> value(value_raw, isolate); - - if (!lookup->IsProperty() && !self->IsJSContextExtensionObject()) { + if (!lookup->IsProperty() && !object->IsJSContextExtensionObject()) { bool done = false; - MaybeObject* result_object = self->SetPropertyViaPrototypes( - *name, *value, attributes, strict_mode, &done); + Handle<Object> result_object = SetPropertyViaPrototypes( + object, name, value, attributes, strict_mode, &done); if (done) return result_object; } if (!lookup->IsFound()) { // Neither properties nor transitions found. - return self->AddProperty( - *name, *value, attributes, strict_mode, store_mode); + return AddProperty( + object, name, value, attributes, strict_mode, store_mode); } if (lookup->IsProperty() && lookup->IsReadOnly()) { if (strict_mode == kStrictMode) { - Handle<Object> args[] = { name, self }; - return isolate->Throw(*isolate->factory()->NewTypeError( - "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args)))); + Handle<Object> args[] = { name, object }; + Handle<Object> error = isolate->factory()->NewTypeError( + "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args))); + isolate->Throw(*error); + return Handle<Object>(); } else { - return *value; + return value; } } - Handle<Object> old_value(heap->the_hole_value(), isolate); - if (FLAG_harmony_observation && - map()->is_observed() && lookup->IsDataProperty()) { - old_value = Object::GetProperty(self, name); + Handle<Object> old_value = isolate->factory()->the_hole_value(); + bool is_observed = FLAG_harmony_observation && + object->map()->is_observed() && + *name != isolate->heap()->hidden_string(); + if (is_observed && lookup->IsDataProperty()) { + old_value = Object::GetProperty(object, name); } // This is a real property that is not read-only, or it is a // transition or null descriptor and there are no setters in the prototypes. - MaybeObject* result = *value; + Handle<Object> result = value; switch (lookup->type()) { case NORMAL: - result = lookup->holder()->SetNormalizedProperty(lookup, *value); + SetNormalizedProperty(handle(lookup->holder()), lookup, value); break; case FIELD: - result = SetPropertyToField(lookup, name, value); + SetPropertyToField(lookup, name, value); break; case CONSTANT: // Only replace the constant if necessary. - if (*value == lookup->GetConstant()) return *value; - result = SetPropertyToField(lookup, name, value); + if (*value == lookup->GetConstant()) return value; + SetPropertyToField(lookup, name, value); break; case CALLBACKS: { - Object* callback_object = lookup->GetCallbackObject(); - return self->SetPropertyWithCallback( - callback_object, *name, *value, lookup->holder(), strict_mode); + Handle<Object> callback_object(lookup->GetCallbackObject(), isolate); + return SetPropertyWithCallback(object, callback_object, name, value, + handle(lookup->holder()), strict_mode); } case INTERCEPTOR: - result = lookup->holder()->SetPropertyWithInterceptor( - *name, *value, attributes, strict_mode); + result = SetPropertyWithInterceptor(handle(lookup->holder()), name, value, + attributes, strict_mode); break; - case TRANSITION: { - result = SetPropertyUsingTransition(lookup, name, value, attributes); + case TRANSITION: + result = SetPropertyUsingTransition(handle(lookup->holder()), lookup, + name, value, attributes); break; - } case HANDLER: case NONEXISTENT: UNREACHABLE(); } - Handle<Object> hresult; - if (!result->ToHandle(&hresult, isolate)) return result; + RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<Object>()); - if (FLAG_harmony_observation && self->map()->is_observed()) { + if (is_observed) { if (lookup->IsTransition()) { - EnqueueChangeRecord(self, "new", name, old_value); + EnqueueChangeRecord(object, "new", name, old_value); } else { LookupResult new_lookup(isolate); - self->LocalLookup(*name, &new_lookup, true); + object->LocalLookup(*name, &new_lookup, true); if (new_lookup.IsDataProperty()) { - Handle<Object> new_value = Object::GetProperty(self, name); + Handle<Object> new_value = Object::GetProperty(object, name); if (!new_value->SameValue(*old_value)) { - EnqueueChangeRecord(self, "updated", name, old_value); + EnqueueChangeRecord(object, "updated", name, old_value); } } } } - return *hresult; + return result; } @@ -4063,142 +4145,116 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributesTrampoline( // doesn't handle function prototypes correctly. Handle<Object> JSObject::SetLocalPropertyIgnoreAttributes( Handle<JSObject> object, - Handle<Name> key, + Handle<Name> name, Handle<Object> value, PropertyAttributes attributes, ValueType value_type, StoreMode mode, ExtensibilityCheck extensibility_check) { - CALL_HEAP_FUNCTION( - object->GetIsolate(), - object->SetLocalPropertyIgnoreAttributes( - *key, *value, attributes, value_type, mode, extensibility_check), - Object); -} - + Isolate* isolate = object->GetIsolate(); -MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes( - Name* name_raw, - Object* value_raw, - PropertyAttributes attributes, - ValueType value_type, - StoreMode mode, - ExtensibilityCheck extensibility_check) { // Make sure that the top context does not change when doing callbacks or // interceptor calls. - AssertNoContextChangeWithHandleScope ncc; - Isolate* isolate = GetIsolate(); + AssertNoContextChange ncc(isolate); + LookupResult lookup(isolate); - LocalLookup(name_raw, &lookup, true); - if (!lookup.IsFound()) map()->LookupTransition(this, name_raw, &lookup); + object->LocalLookup(*name, &lookup, true); + if (!lookup.IsFound()) { + object->map()->LookupTransition(*object, *name, &lookup); + } + // Check access rights if needed. - if (IsAccessCheckNeeded()) { - if (!isolate->MayNamedAccess(this, name_raw, v8::ACCESS_SET)) { - return SetPropertyWithFailedAccessCheck(&lookup, - name_raw, - value_raw, - false, - kNonStrictMode); + if (object->IsAccessCheckNeeded()) { + if (!isolate->MayNamedAccess(*object, *name, v8::ACCESS_SET)) { + return SetPropertyWithFailedAccessCheck(object, &lookup, name, value, + false, kNonStrictMode); } } - if (IsJSGlobalProxy()) { - Object* proto = GetPrototype(); - if (proto->IsNull()) return value_raw; + if (object->IsJSGlobalProxy()) { + Handle<Object> proto(object->GetPrototype(), isolate); + if (proto->IsNull()) return value; ASSERT(proto->IsJSGlobalObject()); - return JSObject::cast(proto)->SetLocalPropertyIgnoreAttributes( - name_raw, - value_raw, - attributes, - value_type, - mode, - extensibility_check); + return SetLocalPropertyIgnoreAttributes(Handle<JSObject>::cast(proto), + name, value, attributes, value_type, mode, extensibility_check); } if (lookup.IsFound() && (lookup.type() == INTERCEPTOR || lookup.type() == CALLBACKS)) { - LocalLookupRealNamedProperty(name_raw, &lookup); + object->LocalLookupRealNamedProperty(*name, &lookup); } // Check for accessor in prototype chain removed here in clone. if (!lookup.IsFound()) { // Neither properties nor transitions found. - return AddProperty( - name_raw, value_raw, attributes, kNonStrictMode, + return AddProperty(object, name, value, attributes, kNonStrictMode, MAY_BE_STORE_FROM_KEYED, extensibility_check, value_type, mode); } - // From this point on everything needs to be handlified. - HandleScope scope(isolate); - Handle<JSObject> self(this); - Handle<Name> name(name_raw); - Handle<Object> value(value_raw, isolate); - - Handle<Object> old_value(isolate->heap()->the_hole_value(), isolate); + Handle<Object> old_value = isolate->factory()->the_hole_value(); PropertyAttributes old_attributes = ABSENT; - bool is_observed = FLAG_harmony_observation && self->map()->is_observed(); + bool is_observed = FLAG_harmony_observation && + object->map()->is_observed() && + *name != isolate->heap()->hidden_string(); if (is_observed && lookup.IsProperty()) { if (lookup.IsDataProperty()) old_value = - Object::GetProperty(self, name); + Object::GetProperty(object, name); old_attributes = lookup.GetAttributes(); } // Check of IsReadOnly removed from here in clone. - MaybeObject* result = *value; switch (lookup.type()) { case NORMAL: - result = self->ReplaceSlowProperty(*name, *value, attributes); + ReplaceSlowProperty(object, name, value, attributes); break; case FIELD: - result = SetPropertyToFieldWithAttributes( - &lookup, name, value, attributes); + SetPropertyToFieldWithAttributes(&lookup, name, value, attributes); break; case CONSTANT: // Only replace the constant if necessary. if (lookup.GetAttributes() != attributes || *value != lookup.GetConstant()) { - result = SetPropertyToFieldWithAttributes( - &lookup, name, value, attributes); + SetPropertyToFieldWithAttributes(&lookup, name, value, attributes); } break; case CALLBACKS: - result = ConvertAndSetLocalProperty(&lookup, *name, *value, attributes); + ConvertAndSetLocalProperty(&lookup, name, value, attributes); break; - case TRANSITION: - result = SetPropertyUsingTransition(&lookup, name, value, attributes); + case TRANSITION: { + Handle<Object> result = SetPropertyUsingTransition( + handle(lookup.holder()), &lookup, name, value, attributes); + RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<Object>()); break; + } case NONEXISTENT: case HANDLER: case INTERCEPTOR: UNREACHABLE(); } - Handle<Object> hresult; - if (!result->ToHandle(&hresult, isolate)) return result; - if (is_observed) { if (lookup.IsTransition()) { - EnqueueChangeRecord(self, "new", name, old_value); + EnqueueChangeRecord(object, "new", name, old_value); } else if (old_value->IsTheHole()) { - EnqueueChangeRecord(self, "reconfigured", name, old_value); + EnqueueChangeRecord(object, "reconfigured", name, old_value); } else { LookupResult new_lookup(isolate); - self->LocalLookup(*name, &new_lookup, true); + object->LocalLookup(*name, &new_lookup, true); bool value_changed = false; if (new_lookup.IsDataProperty()) { - Handle<Object> new_value = Object::GetProperty(self, name); + Handle<Object> new_value = Object::GetProperty(object, name); value_changed = !old_value->SameValue(*new_value); } if (new_lookup.GetAttributes() != old_attributes) { if (!value_changed) old_value = isolate->factory()->the_hole_value(); - EnqueueChangeRecord(self, "reconfigured", name, old_value); + EnqueueChangeRecord(object, "reconfigured", name, old_value); } else if (value_changed) { - EnqueueChangeRecord(self, "updated", name, old_value); + EnqueueChangeRecord(object, "updated", name, old_value); } } } - return *hresult; + return value; } @@ -4235,7 +4291,7 @@ PropertyAttributes JSObject::GetPropertyAttributeWithInterceptor( // Make sure that the top context does not change when doing // callbacks or interceptor calls. - AssertNoContextChange ncc; + AssertNoContextChange ncc(isolate); Handle<InterceptorInfo> interceptor(GetNamedInterceptor()); Handle<JSObject> receiver_handle(receiver); @@ -4370,7 +4426,7 @@ PropertyAttributes JSObject::GetElementAttributeWithInterceptor( // Make sure that the top context does not change when doing // callbacks or interceptor calls. - AssertNoContextChange ncc; + AssertNoContextChange ncc(isolate); Handle<InterceptorInfo> interceptor(GetIndexedInterceptor()); Handle<JSReceiver> hreceiver(receiver); @@ -4422,52 +4478,49 @@ PropertyAttributes JSObject::GetElementAttributeWithoutInterceptor( } -MaybeObject* NormalizedMapCache::Get(JSObject* obj, - PropertyNormalizationMode mode) { - Isolate* isolate = obj->GetIsolate(); - Map* fast = obj->map(); - int index = fast->Hash() % kEntries; - Object* result = get(index); +Handle<Map> NormalizedMapCache::Get(Handle<NormalizedMapCache> cache, + Handle<JSObject> obj, + PropertyNormalizationMode mode) { + int index = obj->map()->Hash() % kEntries; + Handle<Object> result = handle(cache->get(index), cache->GetIsolate()); if (result->IsMap() && - Map::cast(result)->EquivalentToForNormalization(fast, mode)) { + Handle<Map>::cast(result)->EquivalentToForNormalization(obj->map(), + mode)) { #ifdef VERIFY_HEAP if (FLAG_verify_heap) { - Map::cast(result)->SharedMapVerify(); + Handle<Map>::cast(result)->SharedMapVerify(); } #endif -#ifdef DEBUG +#ifdef ENABLE_SLOW_ASSERTS if (FLAG_enable_slow_asserts) { // The cached map should match newly created normalized map bit-by-bit, // except for the code cache, which can contain some ics which can be // applied to the shared map. - Object* fresh; - MaybeObject* maybe_fresh = - fast->CopyNormalized(mode, SHARED_NORMALIZED_MAP); - if (maybe_fresh->ToObject(&fresh)) { - ASSERT(memcmp(Map::cast(fresh)->address(), - Map::cast(result)->address(), - Map::kCodeCacheOffset) == 0); - STATIC_ASSERT(Map::kDependentCodeOffset == - Map::kCodeCacheOffset + kPointerSize); - int offset = Map::kDependentCodeOffset + kPointerSize; - ASSERT(memcmp(Map::cast(fresh)->address() + offset, - Map::cast(result)->address() + offset, - Map::kSize - offset) == 0); - } + Handle<Map> fresh = Map::CopyNormalized(handle(obj->map()), mode, + SHARED_NORMALIZED_MAP); + + ASSERT(memcmp(fresh->address(), + Handle<Map>::cast(result)->address(), + Map::kCodeCacheOffset) == 0); + STATIC_ASSERT(Map::kDependentCodeOffset == + Map::kCodeCacheOffset + kPointerSize); + int offset = Map::kDependentCodeOffset + kPointerSize; + ASSERT(memcmp(fresh->address() + offset, + Handle<Map>::cast(result)->address() + offset, + Map::kSize - offset) == 0); } #endif - return result; + return Handle<Map>::cast(result); } - { MaybeObject* maybe_result = - fast->CopyNormalized(mode, SHARED_NORMALIZED_MAP); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - ASSERT(Map::cast(result)->is_dictionary_map()); - set(index, result); + Isolate* isolate = cache->GetIsolate(); + Handle<Map> map = Map::CopyNormalized(handle(obj->map()), mode, + SHARED_NORMALIZED_MAP); + ASSERT(map->is_dictionary_map()); + cache->set(index, *map); isolate->counters()->normalized_maps()->Increment(); - return result; + return map; } @@ -4483,16 +4536,6 @@ void HeapObject::UpdateMapCodeCache(Handle<HeapObject> object, Handle<Name> name, Handle<Code> code) { Handle<Map> map(object->map()); - if (map->is_shared()) { - Handle<JSObject> receiver = Handle<JSObject>::cast(object); - // Fast case maps are never marked as shared. - ASSERT(!receiver->HasFastProperties()); - // Replace the map with an identical copy that can be safely modified. - map = Map::CopyNormalized(map, KEEP_INOBJECT_PROPERTIES, - UNIQUE_NORMALIZED_MAP); - receiver->GetIsolate()->counters()->normalized_maps()->Increment(); - receiver->set_map(*map); - } Map::UpdateCodeCache(map, name, code); } @@ -4500,65 +4543,55 @@ void HeapObject::UpdateMapCodeCache(Handle<HeapObject> object, void JSObject::NormalizeProperties(Handle<JSObject> object, PropertyNormalizationMode mode, int expected_additional_properties) { - CALL_HEAP_FUNCTION_VOID(object->GetIsolate(), - object->NormalizeProperties( - mode, expected_additional_properties)); -} - - -MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode, - int expected_additional_properties) { - if (!HasFastProperties()) return this; + if (!object->HasFastProperties()) return; // The global object is always normalized. - ASSERT(!IsGlobalObject()); + ASSERT(!object->IsGlobalObject()); // JSGlobalProxy must never be normalized - ASSERT(!IsJSGlobalProxy()); + ASSERT(!object->IsJSGlobalProxy()); - Map* map_of_this = map(); + Isolate* isolate = object->GetIsolate(); + HandleScope scope(isolate); + Handle<Map> map(object->map()); // Allocate new content. - int real_size = map_of_this->NumberOfOwnDescriptors(); + int real_size = map->NumberOfOwnDescriptors(); int property_count = real_size; if (expected_additional_properties > 0) { property_count += expected_additional_properties; } else { property_count += 2; // Make space for two more properties. } - NameDictionary* dictionary; - MaybeObject* maybe_dictionary = - NameDictionary::Allocate(GetHeap(), property_count); - if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary; + Handle<NameDictionary> dictionary = + isolate->factory()->NewNameDictionary(property_count); - DescriptorArray* descs = map_of_this->instance_descriptors(); + Handle<DescriptorArray> descs(map->instance_descriptors()); for (int i = 0; i < real_size; i++) { PropertyDetails details = descs->GetDetails(i); switch (details.type()) { case CONSTANT: { + Handle<Name> key(descs->GetKey(i)); + Handle<Object> value(descs->GetConstant(i), isolate); PropertyDetails d = PropertyDetails( details.attributes(), NORMAL, i + 1); - Object* value = descs->GetConstant(i); - MaybeObject* maybe_dictionary = - dictionary->Add(descs->GetKey(i), value, d); - if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary; + dictionary = NameDictionaryAdd(dictionary, key, value, d); break; } case FIELD: { + Handle<Name> key(descs->GetKey(i)); + Handle<Object> value( + object->RawFastPropertyAt(descs->GetFieldIndex(i)), isolate); PropertyDetails d = PropertyDetails(details.attributes(), NORMAL, i + 1); - Object* value = RawFastPropertyAt(descs->GetFieldIndex(i)); - MaybeObject* maybe_dictionary = - dictionary->Add(descs->GetKey(i), value, d); - if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary; + dictionary = NameDictionaryAdd(dictionary, key, value, d); break; } case CALLBACKS: { - Object* value = descs->GetCallbacksObject(i); + Handle<Name> key(descs->GetKey(i)); + Handle<Object> value(descs->GetCallbacksObject(i), isolate); PropertyDetails d = PropertyDetails( details.attributes(), CALLBACKS, i + 1); - MaybeObject* maybe_dictionary = - dictionary->Add(descs->GetKey(i), value, d); - if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary; + dictionary = NameDictionaryAdd(dictionary, key, value, d); break; } case INTERCEPTOR: @@ -4572,62 +4605,52 @@ MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode, } } - Heap* current_heap = GetHeap(); - // Copy the next enumeration index from instance descriptor. dictionary->SetNextEnumerationIndex(real_size + 1); - Map* new_map; - MaybeObject* maybe_map = - current_heap->isolate()->context()->native_context()-> - normalized_map_cache()->Get(this, mode); - if (!maybe_map->To(&new_map)) return maybe_map; + Handle<NormalizedMapCache> cache( + isolate->context()->native_context()->normalized_map_cache()); + Handle<Map> new_map = NormalizedMapCache::Get(cache, object, mode); ASSERT(new_map->is_dictionary_map()); - // We have now successfully allocated all the necessary objects. - // Changes can now be made with the guarantee that all of them take effect. + // From here on we cannot fail and we shouldn't GC anymore. + DisallowHeapAllocation no_allocation; // Resize the object in the heap if necessary. int new_instance_size = new_map->instance_size(); - int instance_size_delta = map_of_this->instance_size() - new_instance_size; + int instance_size_delta = map->instance_size() - new_instance_size; ASSERT(instance_size_delta >= 0); - current_heap->CreateFillerObjectAt(this->address() + new_instance_size, - instance_size_delta); - if (Marking::IsBlack(Marking::MarkBitFrom(this))) { - MemoryChunk::IncrementLiveBytesFromMutator(this->address(), + isolate->heap()->CreateFillerObjectAt(object->address() + new_instance_size, + instance_size_delta); + if (Marking::IsBlack(Marking::MarkBitFrom(*object))) { + MemoryChunk::IncrementLiveBytesFromMutator(object->address(), -instance_size_delta); } - set_map(new_map); - map_of_this->NotifyLeafMapLayoutChange(); + object->set_map(*new_map); + map->NotifyLeafMapLayoutChange(); - set_properties(dictionary); + object->set_properties(*dictionary); - current_heap->isolate()->counters()->props_to_dictionary()->Increment(); + isolate->counters()->props_to_dictionary()->Increment(); #ifdef DEBUG if (FLAG_trace_normalization) { PrintF("Object properties have been normalized:\n"); - Print(); + object->Print(); } #endif - return this; } void JSObject::TransformToFastProperties(Handle<JSObject> object, int unused_property_fields) { + if (object->HasFastProperties()) return; + ASSERT(!object->IsGlobalObject()); CALL_HEAP_FUNCTION_VOID( object->GetIsolate(), - object->TransformToFastProperties(unused_property_fields)); -} - - -MaybeObject* JSObject::TransformToFastProperties(int unused_property_fields) { - if (HasFastProperties()) return this; - ASSERT(!IsGlobalObject()); - return property_dictionary()-> - TransformPropertiesToFastFor(this, unused_property_fields); + object->property_dictionary()->TransformPropertiesToFastFor( + *object, unused_property_fields)); } @@ -4667,6 +4690,18 @@ static MUST_USE_RESULT MaybeObject* CopyFastElementsToDictionary( } +static Handle<SeededNumberDictionary> CopyFastElementsToDictionary( + Handle<FixedArrayBase> array, + int length, + Handle<SeededNumberDictionary> dict) { + Isolate* isolate = array->GetIsolate(); + CALL_HEAP_FUNCTION(isolate, + CopyFastElementsToDictionary( + isolate, *array, length, *dict), + SeededNumberDictionary); +} + + Handle<SeededNumberDictionary> JSObject::NormalizeElements( Handle<JSObject> object) { CALL_HEAP_FUNCTION(object->GetIsolate(), @@ -5089,7 +5124,7 @@ Handle<Object> JSObject::DeleteElementWithInterceptor(Handle<JSObject> object, // Make sure that the top context does not change when doing // callbacks or interceptor calls. - AssertNoContextChange ncc; + AssertNoContextChange ncc(isolate); Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor()); if (interceptor->deleter()->IsUndefined()) return factory->false_value(); @@ -5152,7 +5187,7 @@ Handle<Object> JSObject::DeleteElement(Handle<JSObject> object, Handle<Object> old_value; bool should_enqueue_change_record = false; if (FLAG_harmony_observation && object->map()->is_observed()) { - should_enqueue_change_record = object->HasLocalElement(index); + should_enqueue_change_record = HasLocalElement(object, index); if (should_enqueue_change_record) { old_value = object->GetLocalElementAccessorPair(index) != NULL ? Handle<Object>::cast(factory->the_hole_value()) @@ -5168,7 +5203,7 @@ Handle<Object> JSObject::DeleteElement(Handle<JSObject> object, result = AccessorDelete(object, index, mode); } - if (should_enqueue_change_record && !object->HasLocalElement(index)) { + if (should_enqueue_change_record && !HasLocalElement(object, index)) { Handle<String> name = factory->Uint32ToString(index); EnqueueChangeRecord(object, "deleted", name, old_value); } @@ -5222,7 +5257,9 @@ Handle<Object> JSObject::DeleteProperty(Handle<JSObject> object, } Handle<Object> old_value = isolate->factory()->the_hole_value(); - bool is_observed = FLAG_harmony_observation && object->map()->is_observed(); + bool is_observed = FLAG_harmony_observation && + object->map()->is_observed() && + *name != isolate->heap()->hidden_string(); if (is_observed && lookup.IsDataProperty()) { old_value = Object::GetProperty(object, name); } @@ -5243,7 +5280,7 @@ Handle<Object> JSObject::DeleteProperty(Handle<JSObject> object, result = DeleteNormalizedProperty(object, name, mode); } - if (is_observed && !object->HasLocalProperty(*name)) { + if (is_observed && !HasLocalProperty(object, name)) { EnqueueChangeRecord(object, "deleted", name, old_value); } @@ -5405,59 +5442,50 @@ bool JSObject::ReferencesObject(Object* obj) { Handle<Object> JSObject::PreventExtensions(Handle<JSObject> object) { - CALL_HEAP_FUNCTION(object->GetIsolate(), object->PreventExtensions(), Object); -} - - -MaybeObject* JSObject::PreventExtensions() { - Isolate* isolate = GetIsolate(); - if (IsAccessCheckNeeded() && - !isolate->MayNamedAccess(this, + Isolate* isolate = object->GetIsolate(); + if (object->IsAccessCheckNeeded() && + !isolate->MayNamedAccess(*object, isolate->heap()->undefined_value(), v8::ACCESS_KEYS)) { - isolate->ReportFailedAccessCheck(this, v8::ACCESS_KEYS); - RETURN_IF_SCHEDULED_EXCEPTION(isolate); - return isolate->heap()->false_value(); + isolate->ReportFailedAccessCheck(*object, v8::ACCESS_KEYS); + RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); + return isolate->factory()->false_value(); } - if (IsJSGlobalProxy()) { - Object* proto = GetPrototype(); - if (proto->IsNull()) return this; + if (object->IsJSGlobalProxy()) { + Handle<Object> proto(object->GetPrototype(), isolate); + if (proto->IsNull()) return object; ASSERT(proto->IsJSGlobalObject()); - return JSObject::cast(proto)->PreventExtensions(); + return PreventExtensions(Handle<JSObject>::cast(proto)); } // It's not possible to seal objects with external array elements - if (HasExternalArrayElements()) { - HandleScope scope(isolate); - Handle<Object> object(this, isolate); + if (object->HasExternalArrayElements()) { Handle<Object> error = isolate->factory()->NewTypeError( "cant_prevent_ext_external_array_elements", HandleVector(&object, 1)); - return isolate->Throw(*error); + isolate->Throw(*error); + return Handle<Object>(); } // If there are fast elements we normalize. - SeededNumberDictionary* dictionary = NULL; - { MaybeObject* maybe = NormalizeElements(); - if (!maybe->To<SeededNumberDictionary>(&dictionary)) return maybe; - } - ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements()); + Handle<SeededNumberDictionary> dictionary = NormalizeElements(object); + ASSERT(object->HasDictionaryElements() || + object->HasDictionaryArgumentsElements()); + // Make sure that we never go back to fast case. dictionary->set_requires_slow_elements(); // Do a map transition, other objects with this map may still // be extensible. // TODO(adamk): Extend the NormalizedMapCache to handle non-extensible maps. - Map* new_map; - MaybeObject* maybe = map()->Copy(); - if (!maybe->To(&new_map)) return maybe; + Handle<Map> new_map = Map::Copy(handle(object->map())); new_map->set_is_extensible(false); - set_map(new_map); - ASSERT(!map()->is_extensible()); - return new_map; + object->set_map(*new_map); + ASSERT(!object->map()->is_extensible()); + return object; } @@ -5482,223 +5510,318 @@ static void FreezeDictionary(Dictionary* dictionary) { } -MUST_USE_RESULT MaybeObject* JSObject::Freeze(Isolate* isolate) { +Handle<Object> JSObject::Freeze(Handle<JSObject> object) { // Freezing non-strict arguments should be handled elsewhere. - ASSERT(!HasNonStrictArgumentsElements()); - - Heap* heap = isolate->heap(); + ASSERT(!object->HasNonStrictArgumentsElements()); - if (map()->is_frozen()) return this; + if (object->map()->is_frozen()) return object; - if (IsAccessCheckNeeded() && - !isolate->MayNamedAccess(this, - heap->undefined_value(), + Isolate* isolate = object->GetIsolate(); + if (object->IsAccessCheckNeeded() && + !isolate->MayNamedAccess(*object, + isolate->heap()->undefined_value(), v8::ACCESS_KEYS)) { - isolate->ReportFailedAccessCheck(this, v8::ACCESS_KEYS); - RETURN_IF_SCHEDULED_EXCEPTION(isolate); - return heap->false_value(); + isolate->ReportFailedAccessCheck(*object, v8::ACCESS_KEYS); + RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); + return isolate->factory()->false_value(); } - if (IsJSGlobalProxy()) { - Object* proto = GetPrototype(); - if (proto->IsNull()) return this; + if (object->IsJSGlobalProxy()) { + Handle<Object> proto(object->GetPrototype(), isolate); + if (proto->IsNull()) return object; ASSERT(proto->IsJSGlobalObject()); - return JSObject::cast(proto)->Freeze(isolate); + return Freeze(Handle<JSObject>::cast(proto)); } // It's not possible to freeze objects with external array elements - if (HasExternalArrayElements()) { - HandleScope scope(isolate); - Handle<Object> object(this, isolate); + if (object->HasExternalArrayElements()) { Handle<Object> error = isolate->factory()->NewTypeError( "cant_prevent_ext_external_array_elements", HandleVector(&object, 1)); - return isolate->Throw(*error); + isolate->Throw(*error); + return Handle<Object>(); } - SeededNumberDictionary* new_element_dictionary = NULL; - if (!elements()->IsDictionary()) { - int length = IsJSArray() - ? Smi::cast(JSArray::cast(this)->length())->value() - : elements()->length(); + Handle<SeededNumberDictionary> new_element_dictionary; + if (!object->elements()->IsDictionary()) { + int length = object->IsJSArray() + ? Smi::cast(Handle<JSArray>::cast(object)->length())->value() + : object->elements()->length(); if (length > 0) { int capacity = 0; int used = 0; - GetElementsCapacityAndUsage(&capacity, &used); - MaybeObject* maybe_dict = SeededNumberDictionary::Allocate(heap, used); - if (!maybe_dict->To(&new_element_dictionary)) return maybe_dict; + object->GetElementsCapacityAndUsage(&capacity, &used); + new_element_dictionary = + isolate->factory()->NewSeededNumberDictionary(used); // Move elements to a dictionary; avoid calling NormalizeElements to avoid // unnecessary transitions. - maybe_dict = CopyFastElementsToDictionary(isolate, elements(), length, - new_element_dictionary); - if (!maybe_dict->To(&new_element_dictionary)) return maybe_dict; + new_element_dictionary = CopyFastElementsToDictionary( + handle(object->elements()), length, new_element_dictionary); } else { // No existing elements, use a pre-allocated empty backing store - new_element_dictionary = heap->empty_slow_element_dictionary(); + new_element_dictionary = + isolate->factory()->empty_slow_element_dictionary(); } } LookupResult result(isolate); - map()->LookupTransition(this, heap->frozen_symbol(), &result); + Handle<Map> old_map(object->map()); + old_map->LookupTransition(*object, isolate->heap()->frozen_symbol(), &result); if (result.IsTransition()) { Map* transition_map = result.GetTransitionTarget(); ASSERT(transition_map->has_dictionary_elements()); ASSERT(transition_map->is_frozen()); ASSERT(!transition_map->is_extensible()); - set_map(transition_map); - } else if (HasFastProperties() && map()->CanHaveMoreTransitions()) { + object->set_map(transition_map); + } else if (object->HasFastProperties() && old_map->CanHaveMoreTransitions()) { // Create a new descriptor array with fully-frozen properties - int num_descriptors = map()->NumberOfOwnDescriptors(); - DescriptorArray* new_descriptors; - MaybeObject* maybe_descriptors = - map()->instance_descriptors()->CopyUpToAddAttributes(num_descriptors, - FROZEN); - if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors; - - Map* new_map; - MaybeObject* maybe_new_map = map()->CopyReplaceDescriptors( - new_descriptors, INSERT_TRANSITION, heap->frozen_symbol()); - if (!maybe_new_map->To(&new_map)) return maybe_new_map; + int num_descriptors = old_map->NumberOfOwnDescriptors(); + Handle<DescriptorArray> new_descriptors = + DescriptorArray::CopyUpToAddAttributes( + handle(old_map->instance_descriptors()), num_descriptors, FROZEN); + Handle<Map> new_map = Map::CopyReplaceDescriptors( + old_map, new_descriptors, INSERT_TRANSITION, + isolate->factory()->frozen_symbol()); new_map->freeze(); new_map->set_is_extensible(false); new_map->set_elements_kind(DICTIONARY_ELEMENTS); - set_map(new_map); + object->set_map(*new_map); } else { // Slow path: need to normalize properties for safety - MaybeObject* maybe = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0); - if (maybe->IsFailure()) return maybe; + NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0); // Create a new map, since other objects with this map may be extensible. // TODO(adamk): Extend the NormalizedMapCache to handle non-extensible maps. - Map* new_map; - MaybeObject* maybe_copy = map()->Copy(); - if (!maybe_copy->To(&new_map)) return maybe_copy; + Handle<Map> new_map = Map::Copy(handle(object->map())); new_map->freeze(); new_map->set_is_extensible(false); new_map->set_elements_kind(DICTIONARY_ELEMENTS); - set_map(new_map); + object->set_map(*new_map); // Freeze dictionary-mode properties - FreezeDictionary(property_dictionary()); + FreezeDictionary(object->property_dictionary()); } - ASSERT(map()->has_dictionary_elements()); - if (new_element_dictionary != NULL) { - set_elements(new_element_dictionary); + ASSERT(object->map()->has_dictionary_elements()); + if (!new_element_dictionary.is_null()) { + object->set_elements(*new_element_dictionary); } - if (elements() != heap->empty_slow_element_dictionary()) { - SeededNumberDictionary* dictionary = element_dictionary(); + if (object->elements() != isolate->heap()->empty_slow_element_dictionary()) { + SeededNumberDictionary* dictionary = object->element_dictionary(); // Make sure we never go back to the fast case dictionary->set_requires_slow_elements(); // Freeze all elements in the dictionary FreezeDictionary(dictionary); } - return this; + return object; } -MUST_USE_RESULT MaybeObject* JSObject::SetObserved(Isolate* isolate) { - if (map()->is_observed()) - return isolate->heap()->undefined_value(); +void JSObject::SetObserved(Handle<JSObject> object) { + Isolate* isolate = object->GetIsolate(); - Heap* heap = isolate->heap(); + if (object->map()->is_observed()) + return; - if (!HasExternalArrayElements()) { + if (!object->HasExternalArrayElements()) { // Go to dictionary mode, so that we don't skip map checks. - MaybeObject* maybe = NormalizeElements(); - if (maybe->IsFailure()) return maybe; - ASSERT(!HasFastElements()); + NormalizeElements(object); + ASSERT(!object->HasFastElements()); } LookupResult result(isolate); - map()->LookupTransition(this, heap->observed_symbol(), &result); + object->map()->LookupTransition(*object, + isolate->heap()->observed_symbol(), + &result); - Map* new_map; + Handle<Map> new_map; if (result.IsTransition()) { - new_map = result.GetTransitionTarget(); + new_map = handle(result.GetTransitionTarget()); ASSERT(new_map->is_observed()); - } else if (map()->CanHaveMoreTransitions()) { - MaybeObject* maybe_new_map = map()->CopyForObserved(); - if (!maybe_new_map->To(&new_map)) return maybe_new_map; + } else if (object->map()->CanHaveMoreTransitions()) { + new_map = Map::CopyForObserved(handle(object->map())); } else { - MaybeObject* maybe_copy = map()->Copy(); - if (!maybe_copy->To(&new_map)) return maybe_copy; + new_map = Map::Copy(handle(object->map())); new_map->set_is_observed(true); } - set_map(new_map); + object->set_map(*new_map); +} - return heap->undefined_value(); + +Handle<JSObject> JSObject::Copy(Handle<JSObject> object, + Handle<AllocationSite> site) { + Isolate* isolate = object->GetIsolate(); + CALL_HEAP_FUNCTION(isolate, + isolate->heap()->CopyJSObject(*object, *site), JSObject); } -MUST_USE_RESULT MaybeObject* JSObject::DeepCopy(Isolate* isolate) { - StackLimitCheck check(isolate); - if (check.HasOverflowed()) return isolate->StackOverflow(); +Handle<JSObject> JSObject::Copy(Handle<JSObject> object) { + Isolate* isolate = object->GetIsolate(); + CALL_HEAP_FUNCTION(isolate, + isolate->heap()->CopyJSObject(*object), JSObject); +} - if (map()->is_deprecated()) { - MaybeObject* maybe_failure = MigrateInstance(); - if (maybe_failure->IsFailure()) return maybe_failure; + +class JSObjectWalkVisitor { + public: + explicit JSObjectWalkVisitor(AllocationSiteContext* site_context) : + site_context_(site_context) {} + virtual ~JSObjectWalkVisitor() {} + + Handle<JSObject> Visit(Handle<JSObject> object) { + return StructureWalk(object); } - Heap* heap = isolate->heap(); - Object* result; - { MaybeObject* maybe_result = heap->CopyJSObject(this); - if (!maybe_result->ToObject(&result)) return maybe_result; + virtual bool is_copying() = 0; + + protected: + Handle<JSObject> StructureWalk(Handle<JSObject> object); + + // The returned handle will be used for the object in all subsequent usages. + // This allows VisitObject to make a copy of the object if desired. + virtual Handle<JSObject> VisitObject(Handle<JSObject> object) = 0; + virtual Handle<JSObject> VisitElementOrProperty(Handle<JSObject> object, + Handle<JSObject> value) = 0; + + AllocationSiteContext* site_context() { return site_context_; } + + private: + AllocationSiteContext* site_context_; +}; + + +class JSObjectCopyVisitor: public JSObjectWalkVisitor { + public: + explicit JSObjectCopyVisitor(AllocationSiteContext* site_context) + : JSObjectWalkVisitor(site_context) {} + + virtual bool is_copying() V8_OVERRIDE { return true; } + + // The returned handle will be used for the object in all + // subsequent usages. This allows VisitObject to make a copy + // of the object if desired. + virtual Handle<JSObject> VisitObject(Handle<JSObject> object) V8_OVERRIDE { + // Only create a memento if + // 1) we have a JSArray, and + // 2) the elements kind is palatable + // 3) allow_mementos is true + Handle<JSObject> copy; + if (site_context()->activated() && + AllocationSite::CanTrack(object->map()->instance_type()) && + AllocationSite::GetMode(object->GetElementsKind()) == + TRACK_ALLOCATION_SITE) { + copy = JSObject::Copy(object, site_context()->current()); + } else { + copy = JSObject::Copy(object); + } + + return copy; + } + + virtual Handle<JSObject> VisitElementOrProperty( + Handle<JSObject> object, + Handle<JSObject> value) V8_OVERRIDE { + Handle<AllocationSite> current_site = site_context()->EnterNewScope(); + Handle<JSObject> copy_of_value = StructureWalk(value); + site_context()->ExitScope(current_site, value); + return copy_of_value; + } +}; + + +class JSObjectCreateAllocationSitesVisitor: public JSObjectWalkVisitor { + public: + explicit JSObjectCreateAllocationSitesVisitor( + AllocationSiteContext* site_context) + : JSObjectWalkVisitor(site_context) {} + + virtual bool is_copying() V8_OVERRIDE { return false; } + + // The returned handle will be used for the object in all + // subsequent usages. This allows VisitObject to make a copy + // of the object if desired. + virtual Handle<JSObject> VisitObject(Handle<JSObject> object) V8_OVERRIDE { + return object; + } + + virtual Handle<JSObject> VisitElementOrProperty( + Handle<JSObject> object, + Handle<JSObject> value) V8_OVERRIDE { + Handle<AllocationSite> current_site = site_context()->EnterNewScope(); + value = StructureWalk(value); + site_context()->ExitScope(current_site, value); + return value; + } +}; + + +Handle<JSObject> JSObjectWalkVisitor::StructureWalk(Handle<JSObject> object) { + bool copying = is_copying(); + Isolate* isolate = object->GetIsolate(); + StackLimitCheck check(isolate); + if (check.HasOverflowed()) { + isolate->StackOverflow(); + return Handle<JSObject>::null(); } - JSObject* copy = JSObject::cast(result); + + if (object->map()->is_deprecated()) { + JSObject::MigrateInstance(object); + } + + Handle<JSObject> copy = VisitObject(object); + ASSERT(copying || copy.is_identical_to(object)); + + HandleScope scope(isolate); // Deep copy local properties. if (copy->HasFastProperties()) { - DescriptorArray* descriptors = copy->map()->instance_descriptors(); + Handle<DescriptorArray> descriptors(copy->map()->instance_descriptors()); int limit = copy->map()->NumberOfOwnDescriptors(); for (int i = 0; i < limit; i++) { PropertyDetails details = descriptors->GetDetails(i); if (details.type() != FIELD) continue; int index = descriptors->GetFieldIndex(i); - Object* value = RawFastPropertyAt(index); + Handle<Object> value(object->RawFastPropertyAt(index), isolate); if (value->IsJSObject()) { - JSObject* js_object = JSObject::cast(value); - MaybeObject* maybe_copy = js_object->DeepCopy(isolate); - if (!maybe_copy->To(&value)) return maybe_copy; + value = VisitElementOrProperty(copy, Handle<JSObject>::cast(value)); + RETURN_IF_EMPTY_HANDLE_VALUE(isolate, value, Handle<JSObject>()); } else { Representation representation = details.representation(); - MaybeObject* maybe_storage = - value->AllocateNewStorageFor(heap, representation); - if (!maybe_storage->To(&value)) return maybe_storage; + value = NewStorageFor(isolate, value, representation); + } + if (copying) { + copy->FastPropertyAtPut(index, *value); } - copy->FastPropertyAtPut(index, value); } } else { - { MaybeObject* maybe_result = - heap->AllocateFixedArray(copy->NumberOfLocalProperties()); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - FixedArray* names = FixedArray::cast(result); - copy->GetLocalPropertyNames(names, 0); + Handle<FixedArray> names = + isolate->factory()->NewFixedArray(copy->NumberOfLocalProperties()); + copy->GetLocalPropertyNames(*names, 0); for (int i = 0; i < names->length(); i++) { ASSERT(names->get(i)->IsString()); - String* key_string = String::cast(names->get(i)); + Handle<String> key_string(String::cast(names->get(i))); PropertyAttributes attributes = - copy->GetLocalPropertyAttribute(key_string); + copy->GetLocalPropertyAttribute(*key_string); // Only deep copy fields from the object literal expression. // In particular, don't try to copy the length attribute of // an array. if (attributes != NONE) continue; - Object* value = - copy->GetProperty(key_string, &attributes)->ToObjectUnchecked(); + Handle<Object> value( + copy->GetProperty(*key_string, &attributes)->ToObjectUnchecked(), + isolate); if (value->IsJSObject()) { - JSObject* js_object = JSObject::cast(value); - { MaybeObject* maybe_result = js_object->DeepCopy(isolate); - if (!maybe_result->ToObject(&result)) return maybe_result; - } - { MaybeObject* maybe_result = - // Creating object copy for literals. No strict mode needed. - copy->SetProperty(key_string, result, NONE, kNonStrictMode); - if (!maybe_result->ToObject(&result)) return maybe_result; + Handle<JSObject> result = VisitElementOrProperty( + copy, Handle<JSObject>::cast(value)); + RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<JSObject>()); + if (copying) { + // Creating object copy for literals. No strict mode needed. + CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetProperty( + copy, key_string, result, NONE, kNonStrictMode)); } } } @@ -5712,8 +5835,8 @@ MUST_USE_RESULT MaybeObject* JSObject::DeepCopy(Isolate* isolate) { case FAST_ELEMENTS: case FAST_HOLEY_SMI_ELEMENTS: case FAST_HOLEY_ELEMENTS: { - FixedArray* elements = FixedArray::cast(copy->elements()); - if (elements->map() == heap->fixed_cow_array_map()) { + Handle<FixedArray> elements(FixedArray::cast(copy->elements())); + if (elements->map() == isolate->heap()->fixed_cow_array_map()) { isolate->counters()->cow_arrays_created_runtime()->Increment(); #ifdef DEBUG for (int i = 0; i < elements->length(); i++) { @@ -5722,34 +5845,37 @@ MUST_USE_RESULT MaybeObject* JSObject::DeepCopy(Isolate* isolate) { #endif } else { for (int i = 0; i < elements->length(); i++) { - Object* value = elements->get(i); + Handle<Object> value(elements->get(i), isolate); ASSERT(value->IsSmi() || value->IsTheHole() || (IsFastObjectElementsKind(copy->GetElementsKind()))); if (value->IsJSObject()) { - JSObject* js_object = JSObject::cast(value); - { MaybeObject* maybe_result = js_object->DeepCopy(isolate); - if (!maybe_result->ToObject(&result)) return maybe_result; + Handle<JSObject> result = VisitElementOrProperty( + copy, Handle<JSObject>::cast(value)); + RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<JSObject>()); + if (copying) { + elements->set(i, *result); } - elements->set(i, result); } } } break; } case DICTIONARY_ELEMENTS: { - SeededNumberDictionary* element_dictionary = copy->element_dictionary(); + Handle<SeededNumberDictionary> element_dictionary( + copy->element_dictionary()); int capacity = element_dictionary->Capacity(); for (int i = 0; i < capacity; i++) { Object* k = element_dictionary->KeyAt(i); if (element_dictionary->IsKey(k)) { - Object* value = element_dictionary->ValueAt(i); + Handle<Object> value(element_dictionary->ValueAt(i), isolate); if (value->IsJSObject()) { - JSObject* js_object = JSObject::cast(value); - { MaybeObject* maybe_result = js_object->DeepCopy(isolate); - if (!maybe_result->ToObject(&result)) return maybe_result; + Handle<JSObject> result = VisitElementOrProperty( + copy, Handle<JSObject>::cast(value)); + RETURN_IF_EMPTY_HANDLE_VALUE(isolate, result, Handle<JSObject>()); + if (copying) { + element_dictionary->ValueAtPut(i, *result); } - element_dictionary->ValueAtPut(i, result); } } } @@ -5776,6 +5902,25 @@ MUST_USE_RESULT MaybeObject* JSObject::DeepCopy(Isolate* isolate) { } +Handle<JSObject> JSObject::DeepWalk(Handle<JSObject> object, + AllocationSiteContext* site_context) { + JSObjectCreateAllocationSitesVisitor v(site_context); + Handle<JSObject> result = v.Visit(object); + ASSERT(!v.is_copying() && + (result.is_null() || result.is_identical_to(object))); + return result; +} + + +Handle<JSObject> JSObject::DeepCopy(Handle<JSObject> object, + AllocationSiteContext* site_context) { + JSObjectCopyVisitor v(site_context); + Handle<JSObject> copy = v.Visit(object); + ASSERT(v.is_copying() && !copy.is_identical_to(object)); + return copy; +} + + // Tests for the fast common case for property enumeration: // - This object and all prototypes has an enum cache (which means that // it is no proxy, has no interceptors and needs no access checks). @@ -6175,7 +6320,7 @@ void JSObject::DefineAccessor(Handle<JSObject> object, // Make sure that the top context does not change when doing callbacks or // interceptor calls. - AssertNoContextChangeWithHandleScope ncc; + AssertNoContextChange ncc(isolate); // Try to flatten before operating on the string. if (name->IsString()) String::cast(*name)->TryFlatten(); @@ -6186,11 +6331,13 @@ void JSObject::DefineAccessor(Handle<JSObject> object, bool is_element = name->AsArrayIndex(&index); Handle<Object> old_value = isolate->factory()->the_hole_value(); - bool is_observed = FLAG_harmony_observation && object->map()->is_observed(); + bool is_observed = FLAG_harmony_observation && + object->map()->is_observed() && + *name != isolate->heap()->hidden_string(); bool preexists = false; if (is_observed) { if (is_element) { - preexists = object->HasLocalElement(index); + preexists = HasLocalElement(object, index); if (preexists && object->GetLocalElementAccessorPair(index) == NULL) { old_value = Object::GetElement(isolate, object, index); } @@ -6361,7 +6508,7 @@ Handle<Object> JSObject::SetAccessor(Handle<JSObject> object, // Make sure that the top context does not change when doing callbacks or // interceptor calls. - AssertNoContextChange ncc; + AssertNoContextChange ncc(isolate); // Try to flatten before operating on the string. if (name->IsString()) FlattenString(Handle<String>::cast(name)); @@ -6420,58 +6567,62 @@ Handle<Object> JSObject::SetAccessor(Handle<JSObject> object, } -MaybeObject* JSObject::LookupAccessor(Name* name, AccessorComponent component) { - Heap* heap = GetHeap(); +Handle<Object> JSObject::GetAccessor(Handle<JSObject> object, + Handle<Name> name, + AccessorComponent component) { + Isolate* isolate = object->GetIsolate(); // Make sure that the top context does not change when doing callbacks or // interceptor calls. - AssertNoContextChangeWithHandleScope ncc; + AssertNoContextChange ncc(isolate); // Check access rights if needed. - if (IsAccessCheckNeeded() && - !heap->isolate()->MayNamedAccess(this, name, v8::ACCESS_HAS)) { - heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS); - RETURN_IF_SCHEDULED_EXCEPTION(heap->isolate()); - return heap->undefined_value(); + if (object->IsAccessCheckNeeded() && + !isolate->MayNamedAccess(*object, *name, v8::ACCESS_HAS)) { + isolate->ReportFailedAccessCheck(*object, v8::ACCESS_HAS); + RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); + return isolate->factory()->undefined_value(); } // Make the lookup and include prototypes. uint32_t index = 0; if (name->AsArrayIndex(&index)) { - for (Object* obj = this; - obj != heap->null_value(); - obj = JSReceiver::cast(obj)->GetPrototype()) { - if (obj->IsJSObject() && JSObject::cast(obj)->HasDictionaryElements()) { - JSObject* js_object = JSObject::cast(obj); + for (Handle<Object> obj = object; + !obj->IsNull(); + obj = handle(JSReceiver::cast(*obj)->GetPrototype(), isolate)) { + if (obj->IsJSObject() && JSObject::cast(*obj)->HasDictionaryElements()) { + JSObject* js_object = JSObject::cast(*obj); SeededNumberDictionary* dictionary = js_object->element_dictionary(); int entry = dictionary->FindEntry(index); if (entry != SeededNumberDictionary::kNotFound) { Object* element = dictionary->ValueAt(entry); if (dictionary->DetailsAt(entry).type() == CALLBACKS && element->IsAccessorPair()) { - return AccessorPair::cast(element)->GetComponent(component); + return handle(AccessorPair::cast(element)->GetComponent(component), + isolate); } } } } } else { - for (Object* obj = this; - obj != heap->null_value(); - obj = JSReceiver::cast(obj)->GetPrototype()) { - LookupResult result(heap->isolate()); - JSReceiver::cast(obj)->LocalLookup(name, &result); + for (Handle<Object> obj = object; + !obj->IsNull(); + obj = handle(JSReceiver::cast(*obj)->GetPrototype(), isolate)) { + LookupResult result(isolate); + JSReceiver::cast(*obj)->LocalLookup(*name, &result); if (result.IsFound()) { - if (result.IsReadOnly()) return heap->undefined_value(); + if (result.IsReadOnly()) return isolate->factory()->undefined_value(); if (result.IsPropertyCallbacks()) { Object* obj = result.GetCallbackObject(); if (obj->IsAccessorPair()) { - return AccessorPair::cast(obj)->GetComponent(component); + return handle(AccessorPair::cast(obj)->GetComponent(component), + isolate); } } } } } - return heap->undefined_value(); + return isolate->factory()->undefined_value(); } @@ -6504,6 +6655,14 @@ Object* JSObject::SlowReverseLookup(Object* value) { } +Handle<Map> Map::RawCopy(Handle<Map> map, + int instance_size) { + CALL_HEAP_FUNCTION(map->GetIsolate(), + map->RawCopy(instance_size), + Map); +} + + MaybeObject* Map::RawCopy(int instance_size) { Map* result; MaybeObject* maybe_result = @@ -6528,25 +6687,15 @@ MaybeObject* Map::RawCopy(int instance_size) { Handle<Map> Map::CopyNormalized(Handle<Map> map, PropertyNormalizationMode mode, NormalizedMapSharingMode sharing) { - CALL_HEAP_FUNCTION(map->GetIsolate(), - map->CopyNormalized(mode, sharing), - Map); -} - - -MaybeObject* Map::CopyNormalized(PropertyNormalizationMode mode, - NormalizedMapSharingMode sharing) { - int new_instance_size = instance_size(); + int new_instance_size = map->instance_size(); if (mode == CLEAR_INOBJECT_PROPERTIES) { - new_instance_size -= inobject_properties() * kPointerSize; + new_instance_size -= map->inobject_properties() * kPointerSize; } - Map* result; - MaybeObject* maybe_result = RawCopy(new_instance_size); - if (!maybe_result->To(&result)) return maybe_result; + Handle<Map> result = Map::RawCopy(map, new_instance_size); if (mode != CLEAR_INOBJECT_PROPERTIES) { - result->set_inobject_properties(inobject_properties()); + result->set_inobject_properties(map->inobject_properties()); } result->set_is_shared(sharing == SHARED_NORMALIZED_MAP); @@ -6660,6 +6809,16 @@ MaybeObject* Map::ShareDescriptor(DescriptorArray* descriptors, } +Handle<Map> Map::CopyReplaceDescriptors(Handle<Map> map, + Handle<DescriptorArray> descriptors, + TransitionFlag flag, + Handle<Name> name) { + CALL_HEAP_FUNCTION(map->GetIsolate(), + map->CopyReplaceDescriptors(*descriptors, flag, *name), + Map); +} + + MaybeObject* Map::CopyReplaceDescriptors(DescriptorArray* descriptors, TransitionFlag flag, Name* name, @@ -6688,20 +6847,19 @@ MaybeObject* Map::CopyReplaceDescriptors(DescriptorArray* descriptors, // Since this method is used to rewrite an existing transition tree, it can // always insert transitions without checking. -MaybeObject* Map::CopyInstallDescriptors(int new_descriptor, - DescriptorArray* descriptors) { +Handle<Map> Map::CopyInstallDescriptors(Handle<Map> map, + int new_descriptor, + Handle<DescriptorArray> descriptors) { ASSERT(descriptors->IsSortedNoDuplicates()); - Map* result; - MaybeObject* maybe_result = CopyDropDescriptors(); - if (!maybe_result->To(&result)) return maybe_result; + Handle<Map> result = Map::CopyDropDescriptors(map); - result->InitializeDescriptors(descriptors); + result->InitializeDescriptors(*descriptors); result->SetNumberOfOwnDescriptors(new_descriptor + 1); - int unused_property_fields = this->unused_property_fields(); + int unused_property_fields = map->unused_property_fields(); if (descriptors->GetDetails(new_descriptor).type() == FIELD) { - unused_property_fields = this->unused_property_fields() - 1; + unused_property_fields = map->unused_property_fields() - 1; if (unused_property_fields < 0) { unused_property_fields += JSObject::kFieldsAdded; } @@ -6710,14 +6868,12 @@ MaybeObject* Map::CopyInstallDescriptors(int new_descriptor, result->set_unused_property_fields(unused_property_fields); result->set_owns_descriptors(false); - Name* name = descriptors->GetKey(new_descriptor); - TransitionArray* transitions; - MaybeObject* maybe_transitions = - AddTransition(name, result, SIMPLE_TRANSITION); - if (!maybe_transitions->To(&transitions)) return maybe_transitions; + Handle<Name> name = handle(descriptors->GetKey(new_descriptor)); + Handle<TransitionArray> transitions = Map::AddTransition(map, name, result, + SIMPLE_TRANSITION); - set_transitions(transitions); - result->SetBackPointer(this); + map->set_transitions(*transitions); + result->SetBackPointer(*map); return result; } @@ -6775,35 +6931,34 @@ MaybeObject* Map::CopyAsElementsKind(ElementsKind kind, TransitionFlag flag) { } -MaybeObject* Map::CopyForObserved() { - ASSERT(!is_observed()); +Handle<Map> Map::CopyForObserved(Handle<Map> map) { + ASSERT(!map->is_observed()); + + Isolate* isolate = map->GetIsolate(); // In case the map owned its own descriptors, share the descriptors and // transfer ownership to the new map. - Map* new_map; - MaybeObject* maybe_new_map; - if (owns_descriptors()) { - maybe_new_map = CopyDropDescriptors(); + Handle<Map> new_map; + if (map->owns_descriptors()) { + new_map = Map::CopyDropDescriptors(map); } else { - maybe_new_map = Copy(); + new_map = Map::Copy(map); } - if (!maybe_new_map->To(&new_map)) return maybe_new_map; - TransitionArray* transitions; - MaybeObject* maybe_transitions = AddTransition(GetHeap()->observed_symbol(), - new_map, - FULL_TRANSITION); - if (!maybe_transitions->To(&transitions)) return maybe_transitions; - set_transitions(transitions); + Handle<TransitionArray> transitions = + Map::AddTransition(map, isolate->factory()->observed_symbol(), new_map, + FULL_TRANSITION); + + map->set_transitions(*transitions); new_map->set_is_observed(true); - if (owns_descriptors()) { - new_map->InitializeDescriptors(instance_descriptors()); - set_owns_descriptors(false); + if (map->owns_descriptors()) { + new_map->InitializeDescriptors(map->instance_descriptors()); + map->set_owns_descriptors(false); } - new_map->SetBackPointer(this); + new_map->SetBackPointer(*map); return new_map; } @@ -6904,6 +7059,16 @@ MaybeObject* Map::CopyInsertDescriptor(Descriptor* descriptor, } +Handle<DescriptorArray> DescriptorArray::CopyUpToAddAttributes( + Handle<DescriptorArray> desc, + int enumeration_index, + PropertyAttributes attributes) { + CALL_HEAP_FUNCTION(desc->GetIsolate(), + desc->CopyUpToAddAttributes(enumeration_index, attributes), + DescriptorArray); +} + + MaybeObject* DescriptorArray::CopyUpToAddAttributes( int enumeration_index, PropertyAttributes attributes) { if (enumeration_index == 0) return GetHeap()->empty_descriptor_array(); @@ -6992,8 +7157,6 @@ void Map::UpdateCodeCache(Handle<Map> map, MaybeObject* Map::UpdateCodeCache(Name* name, Code* code) { - ASSERT(!is_shared() || code->allowed_in_shared_map_code_cache()); - // Allocate the code cache if not present. if (code_cache()->IsFixedArray()) { Object* result; @@ -7320,11 +7483,10 @@ MaybeObject* CodeCache::UpdateNormalTypeCache(Name* name, Code* code) { Object* CodeCache::Lookup(Name* name, Code::Flags flags) { - if (Code::ExtractTypeFromFlags(flags) == Code::NORMAL) { - return LookupNormalTypeCache(name, flags); - } else { - return LookupDefaultCache(name, flags); - } + flags = Code::RemoveTypeFromFlags(flags); + Object* result = LookupDefaultCache(name, flags); + if (result->IsCode()) return result; + return LookupNormalTypeCache(name, flags); } @@ -7338,7 +7500,7 @@ Object* CodeCache::LookupDefaultCache(Name* name, Code::Flags flags) { if (key->IsUndefined()) return key; if (name->Equals(Name::cast(key))) { Code* code = Code::cast(cache->get(i + kCodeCacheEntryCodeOffset)); - if (code->flags() == flags) { + if (Code::RemoveTypeFromFlags(code->flags()) == flags) { return code; } } @@ -7402,9 +7564,7 @@ class CodeCacheHashTableKey : public HashTableKey { : name_(name), flags_(flags), code_(NULL) { } CodeCacheHashTableKey(Name* name, Code* code) - : name_(name), - flags_(code->flags()), - code_(code) { } + : name_(name), flags_(code->flags()), code_(code) { } bool IsMatch(Object* other) { @@ -7676,7 +7836,7 @@ MaybeObject* FixedArray::AddKeysFromJSArray(JSArray* array) { accessor->AddElementsToFixedArray(array, array, this); FixedArray* result; if (!maybe_result->To<FixedArray>(&result)) return maybe_result; -#ifdef DEBUG +#ifdef ENABLE_SLOW_ASSERTS if (FLAG_enable_slow_asserts) { for (int i = 0; i < result->length(); i++) { Object* current = result->get(i); @@ -7694,7 +7854,7 @@ MaybeObject* FixedArray::UnionOfKeys(FixedArray* other) { accessor->AddElementsToFixedArray(NULL, NULL, this, other); FixedArray* result; if (!maybe_result->To(&result)) return maybe_result; -#ifdef DEBUG +#ifdef ENABLE_SLOW_ASSERTS if (FLAG_enable_slow_asserts) { for (int i = 0; i < result->length(); i++) { Object* current = result->get(i); @@ -7706,11 +7866,11 @@ MaybeObject* FixedArray::UnionOfKeys(FixedArray* other) { } -MaybeObject* FixedArray::CopySize(int new_length) { +MaybeObject* FixedArray::CopySize(int new_length, PretenureFlag pretenure) { Heap* heap = GetHeap(); if (new_length == 0) return heap->empty_fixed_array(); Object* obj; - { MaybeObject* maybe_obj = heap->AllocateFixedArray(new_length); + { MaybeObject* maybe_obj = heap->AllocateFixedArray(new_length, pretenure); if (!maybe_obj->ToObject(&obj)) return maybe_obj; } FixedArray* result = FixedArray::cast(obj); @@ -7798,6 +7958,20 @@ void DescriptorArray::CopyFrom(int dst_index, } +Handle<DescriptorArray> DescriptorArray::Merge(Handle<DescriptorArray> desc, + int verbatim, + int valid, + int new_size, + int modify_index, + StoreMode store_mode, + Handle<DescriptorArray> other) { + CALL_HEAP_FUNCTION(desc->GetIsolate(), + desc->Merge(verbatim, valid, new_size, modify_index, + store_mode, *other), + DescriptorArray); +} + + // Generalize the |other| descriptor array by merging it into the (at least // partly) updated |this| descriptor array. // The method merges two descriptor array in three parts. Both descriptor arrays @@ -8735,7 +8909,7 @@ bool String::SlowEquals(String* other) { // Fast check: if hash code is computed for both strings // a fast negative check can be performed. if (HasHashCode() && other->HasHashCode()) { -#ifdef DEBUG +#ifdef ENABLE_SLOW_ASSERTS if (FLAG_enable_slow_asserts) { if (Hash() != other->Hash()) { bool found_difference = false; @@ -8990,7 +9164,7 @@ Handle<String> SeqString::Truncate(Handle<SeqString> string, int new_length) { if (newspace->Contains(start_of_string) && newspace->top() == start_of_string + old_size) { // Last allocated object in new space. Simply lower allocation top. - *(newspace->allocation_top_address()) = start_of_string + new_size; + newspace->set_top(start_of_string + new_size); } else { // Sizes are pointer size aligned, so that we can use filler objects // that are a multiple of pointer size. @@ -9006,17 +9180,22 @@ Handle<String> SeqString::Truncate(Handle<SeqString> string, int new_length) { } -AllocationMemento* AllocationMemento::FindForJSObject(JSObject* object) { +AllocationMemento* AllocationMemento::FindForJSObject(JSObject* object, + bool in_GC) { // Currently, AllocationMemento objects are only allocated immediately // after JSArrays in NewSpace, and detecting whether a JSArray has one // involves carefully checking the object immediately after the JSArray // (if there is one) to see if it's an AllocationMemento. if (FLAG_track_allocation_sites && object->GetHeap()->InNewSpace(object)) { - ASSERT(object->GetHeap()->InToSpace(object)); Address ptr_end = (reinterpret_cast<Address>(object) - kHeapObjectTag) + object->Size(); - if ((ptr_end + AllocationMemento::kSize) <= - object->GetHeap()->NewSpaceTop()) { + Address top; + if (in_GC) { + top = object->GetHeap()->new_space()->FromSpacePageHigh(); + } else { + top = object->GetHeap()->NewSpaceTop(); + } + if ((ptr_end + AllocationMemento::kSize) <= top) { // There is room in newspace for allocation info. Do we have some? Map** possible_allocation_memento_map = reinterpret_cast<Map**>(ptr_end); @@ -9221,6 +9400,7 @@ void Map::ClearNonLiveTransitions(Heap* heap) { if (number_of_own_descriptors > 0) { TrimDescriptorArray(heap, this, descriptors, number_of_own_descriptors); ASSERT(descriptors->number_of_descriptors() == number_of_own_descriptors); + set_owns_descriptors(true); } else { ASSERT(descriptors == GetHeap()->empty_descriptor_array()); } @@ -9277,6 +9457,16 @@ bool Map::EquivalentToForNormalization(Map* other, } +void ConstantPoolArray::ConstantPoolIterateBody(ObjectVisitor* v) { + int first_ptr_offset = OffsetOfElementAt(first_ptr_index()); + int last_ptr_offset = + OffsetOfElementAt(first_ptr_index() + count_of_ptr_entries()); + v->VisitPointers( + HeapObject::RawField(this, first_ptr_offset), + HeapObject::RawField(this, last_ptr_offset)); +} + + void JSFunction::JSFunctionIterateBody(int object_size, ObjectVisitor* v) { // Iterate over all fields in the body but take care in dealing with // the code entry. @@ -9722,9 +9912,13 @@ bool JSFunction::PassesFilter(const char* raw_filter) { String* name = shared()->DebugName(); Vector<const char> filter = CStrVector(raw_filter); if (filter.length() == 0) return name->length() == 0; - if (filter[0] != '-' && name->IsUtf8EqualTo(filter)) return true; - if (filter[0] == '-' && - !name->IsUtf8EqualTo(filter.SubVector(1, filter.length()))) { + if (filter[0] == '-') { + if (filter.length() == 1) { + return (name->length() != 0); + } else if (!name->IsUtf8EqualTo(filter.SubVector(1, filter.length()))) { + return true; + } + } else if (name->IsUtf8EqualTo(filter)) { return true; } if (filter[filter.length() - 1] == '*' && @@ -9768,7 +9962,8 @@ bool SharedFunctionInfo::HasSourceCode() { Handle<Object> SharedFunctionInfo::GetSourceCode() { if (!HasSourceCode()) return GetIsolate()->factory()->undefined_value(); Handle<String> source(String::cast(Script::cast(script())->source())); - return SubString(source, start_position(), end_position()); + return GetIsolate()->factory()->NewSubString( + source, start_position(), end_position()); } @@ -10128,7 +10323,7 @@ void ObjectVisitor::VisitEmbeddedPointer(RelocInfo* rinfo) { void ObjectVisitor::VisitExternalReference(RelocInfo* rinfo) { Address* p = rinfo->target_reference_address(); - VisitExternalReferences(p, p + 1); + VisitExternalReference(p); } @@ -10185,6 +10380,10 @@ void Code::CopyFrom(const CodeDesc& desc) { } else if (RelocInfo::IsRuntimeEntry(mode)) { Address p = it.rinfo()->target_runtime_entry(origin); it.rinfo()->set_target_runtime_entry(p, SKIP_WRITE_BARRIER); + } else if (mode == RelocInfo::CODE_AGE_SEQUENCE) { + Handle<Object> p = it.rinfo()->code_age_stub_handle(origin); + Code* code = Code::cast(*p); + it.rinfo()->set_code_age_stub(code); } else { it.rinfo()->apply(delta); } @@ -10317,31 +10516,35 @@ void Code::ReplaceFirstMap(Map* replace_with) { } -Code* Code::FindFirstCode() { +Code* Code::FindFirstHandler() { ASSERT(is_inline_cache_stub()); DisallowHeapAllocation no_allocation; int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET); for (RelocIterator it(this, mask); !it.done(); it.next()) { RelocInfo* info = it.rinfo(); - return Code::GetCodeFromTargetAddress(info->target_address()); + Code* code = Code::GetCodeFromTargetAddress(info->target_address()); + if (code->kind() == Code::HANDLER) return code; } return NULL; } -void Code::FindAllCode(CodeHandleList* code_list, int length) { +bool Code::FindHandlers(CodeHandleList* code_list, int length) { ASSERT(is_inline_cache_stub()); DisallowHeapAllocation no_allocation; int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET); int i = 0; for (RelocIterator it(this, mask); !it.done(); it.next()) { - if (i++ == length) return; + if (i == length) return true; RelocInfo* info = it.rinfo(); Code* code = Code::GetCodeFromTargetAddress(info->target_address()); - ASSERT(code->kind() == Code::STUB); + // IC stubs with handlers never contain non-handler code objects before + // handler targets. + if (code->kind() != Code::HANDLER) break; code_list->Add(Handle<Code>(code)); + i++; } - UNREACHABLE(); + return i == length; } @@ -10409,24 +10612,22 @@ void Code::ClearTypeFeedbackCells(Heap* heap) { BailoutId Code::TranslatePcOffsetToAstId(uint32_t pc_offset) { DisallowHeapAllocation no_gc; ASSERT(kind() == FUNCTION); - for (FullCodeGenerator::BackEdgeTableIterator it(this, &no_gc); - !it.Done(); - it.Next()) { - if (it.pc_offset() == pc_offset) return it.ast_id(); + BackEdgeTable back_edges(this, &no_gc); + for (uint32_t i = 0; i < back_edges.length(); i++) { + if (back_edges.pc_offset(i) == pc_offset) return back_edges.ast_id(i); } return BailoutId::None(); } -bool Code::allowed_in_shared_map_code_cache() { - return is_keyed_load_stub() || is_keyed_store_stub() || - (is_compare_ic_stub() && - ICCompareStub::CompareState(stub_info()) == CompareIC::KNOWN_OBJECT); +void Code::MakeCodeAgeSequenceYoung(byte* sequence, Isolate* isolate) { + PatchPlatformCodeAge(isolate, sequence, kNoAgeCodeAge, NO_MARKING_PARITY); } -void Code::MakeCodeAgeSequenceYoung(byte* sequence) { - PatchPlatformCodeAge(sequence, kNoAge, NO_MARKING_PARITY); +void Code::MarkCodeAsExecuted(byte* sequence, Isolate* isolate) { + PatchPlatformCodeAge(isolate, sequence, kExecutedOnceCodeAge, + NO_MARKING_PARITY); } @@ -10437,7 +10638,9 @@ void Code::MakeOlder(MarkingParity current_parity) { MarkingParity code_parity; GetCodeAgeAndParity(sequence, &age, &code_parity); if (age != kLastCodeAge && code_parity != current_parity) { - PatchPlatformCodeAge(sequence, static_cast<Age>(age + 1), + PatchPlatformCodeAge(GetIsolate(), + sequence, + static_cast<Age>(age + 1), current_parity); } } @@ -10445,18 +10648,14 @@ void Code::MakeOlder(MarkingParity current_parity) { bool Code::IsOld() { - byte* sequence = FindCodeAgeSequence(); - if (sequence == NULL) return false; - Age age; - MarkingParity parity; - GetCodeAgeAndParity(sequence, &age, &parity); - return age >= kSexagenarianCodeAge; + Age age = GetAge(); + return age >= kIsOldCodeAge; } byte* Code::FindCodeAgeSequence() { return FLAG_age_code && - prologue_offset() != kPrologueOffsetNotSet && + prologue_offset() != Code::kPrologueOffsetNotSet && (kind() == OPTIMIZED_FUNCTION || (kind() == FUNCTION && !has_debug_break_slots())) ? instruction_start() + prologue_offset() @@ -10464,10 +10663,10 @@ byte* Code::FindCodeAgeSequence() { } -int Code::GetAge() { +Code::Age Code::GetAge() { byte* sequence = FindCodeAgeSequence(); if (sequence == NULL) { - return Code::kNoAge; + return Code::kNoAgeCodeAge; } Age age; MarkingParity parity; @@ -10496,12 +10695,25 @@ void Code::GetCodeAgeAndParity(Code* code, Age* age, } CODE_AGE_LIST(HANDLE_CODE_AGE) #undef HANDLE_CODE_AGE + stub = *builtins->MarkCodeAsExecutedOnce(); + if (code == stub) { + // Treat that's never been executed as old immediatly. + *age = kIsOldCodeAge; + *parity = NO_MARKING_PARITY; + return; + } + stub = *builtins->MarkCodeAsExecutedTwice(); + if (code == stub) { + // Pre-age code that has only been executed once. + *age = kPreAgedCodeAge; + *parity = NO_MARKING_PARITY; + return; + } UNREACHABLE(); } -Code* Code::GetCodeAgeStub(Age age, MarkingParity parity) { - Isolate* isolate = Isolate::Current(); +Code* Code::GetCodeAgeStub(Isolate* isolate, Age age, MarkingParity parity) { Builtins* builtins = isolate->builtins(); switch (age) { #define HANDLE_CODE_AGE(AGE) \ @@ -10513,6 +10725,14 @@ Code* Code::GetCodeAgeStub(Age age, MarkingParity parity) { } CODE_AGE_LIST(HANDLE_CODE_AGE) #undef HANDLE_CODE_AGE + case kNotExecutedCodeAge: { + ASSERT(parity == NO_MARKING_PARITY); + return *builtins->MarkCodeAsExecutedOnce(); + } + case kExecutedOnceCodeAge: { + ASSERT(parity == NO_MARKING_PARITY); + return *builtins->MarkCodeAsExecutedTwice(); + } default: UNREACHABLE(); break; @@ -10772,7 +10992,7 @@ const char* Code::StubType2String(StubType type) { case CONSTANT: return "CONSTANT"; case CALLBACKS: return "CALLBACKS"; case INTERCEPTOR: return "INTERCEPTOR"; - case MAP_TRANSITION: return "MAP_TRANSITION"; + case TRANSITION: return "TRANSITION"; case NONEXISTENT: return "NONEXISTENT"; } UNREACHABLE(); // keep the compiler happy @@ -10879,15 +11099,15 @@ void Code::Disassemble(const char* name, FILE* out) { // (due to alignment) the end of the instruction stream. if (static_cast<int>(offset) < instruction_size()) { DisallowHeapAllocation no_gc; - FullCodeGenerator::BackEdgeTableIterator back_edges(this, &no_gc); + BackEdgeTable back_edges(this, &no_gc); - PrintF(out, "Back edges (size = %u)\n", back_edges.table_length()); + PrintF(out, "Back edges (size = %u)\n", back_edges.length()); PrintF(out, "ast_id pc_offset loop_depth\n"); - for ( ; !back_edges.Done(); back_edges.Next()) { - PrintF(out, "%6d %9u %10u\n", back_edges.ast_id().ToInt(), - back_edges.pc_offset(), - back_edges.loop_depth()); + for (uint32_t i = 0; i < back_edges.length(); i++) { + PrintF(out, "%6d %9u %10u\n", back_edges.ast_id(i).ToInt(), + back_edges.pc_offset(i), + back_edges.loop_depth(i)); } PrintF(out, "\n"); @@ -10958,6 +11178,10 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength( } ValidateElements(); set_map_and_elements(new_map, new_elements); + + // Transition through the allocation site as well if present. + maybe_obj = UpdateAllocationSite(new_elements_kind); + if (maybe_obj->IsFailure()) return maybe_obj; } else { FixedArray* parameter_map = FixedArray::cast(old_elements); parameter_map->set(1, new_elements); @@ -10975,6 +11199,22 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength( } +bool Code::IsWeakEmbeddedObject(Kind kind, Object* object) { + if (kind != Code::OPTIMIZED_FUNCTION) return false; + + if (object->IsMap()) { + return Map::cast(object)->CanTransition() && + FLAG_collect_maps && + FLAG_weak_embedded_maps_in_optimized_code; + } + + if (object->IsJSObject()) { + return FLAG_weak_embedded_objects_in_optimized_code; + } + + return false; +} + MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength( int capacity, int length) { @@ -11335,7 +11575,7 @@ Handle<DependentCode> DependentCode::Insert(Handle<DependentCode> entries, int capacity = kCodesStartIndex + number_of_entries + 1; if (capacity > 5) capacity = capacity * 5 / 4; Handle<DependentCode> new_entries = Handle<DependentCode>::cast( - factory->CopySizeFixedArray(entries, capacity)); + factory->CopySizeFixedArray(entries, capacity, TENURED)); // The number of codes can change after GC. starts.Recompute(*entries); start = starts.at(group); @@ -11569,22 +11809,6 @@ MaybeObject* JSObject::EnsureCanContainElements(Arguments* args, } -PropertyType JSObject::GetLocalPropertyType(Name* name) { - uint32_t index = 0; - if (name->AsArrayIndex(&index)) { - return GetLocalElementType(index); - } - LookupResult lookup(GetIsolate()); - LocalLookup(name, &lookup, true); - return lookup.type(); -} - - -PropertyType JSObject::GetLocalElementType(uint32_t index) { - return GetElementsAccessor()->GetType(this, this, index); -} - - AccessorPair* JSObject::GetLocalPropertyAccessorPair(Name* name) { uint32_t index = 0; if (name->AsArrayIndex(&index)) { @@ -11628,7 +11852,7 @@ MaybeObject* JSObject::SetElementWithInterceptor(uint32_t index, // Make sure that the top context does not change when doing // callbacks or interceptor calls. - AssertNoContextChange ncc; + AssertNoContextChange ncc(isolate); Handle<InterceptorInfo> interceptor(GetIndexedInterceptor()); Handle<JSObject> this_handle(this); @@ -11709,18 +11933,17 @@ MaybeObject* JSObject::GetElementWithCallback(Object* receiver, } -MaybeObject* JSObject::SetElementWithCallback(Object* structure, - uint32_t index, - Object* value, - JSObject* holder, - StrictModeFlag strict_mode) { - Isolate* isolate = GetIsolate(); - HandleScope scope(isolate); +Handle<Object> JSObject::SetElementWithCallback(Handle<JSObject> object, + Handle<Object> structure, + uint32_t index, + Handle<Object> value, + Handle<JSObject> holder, + StrictModeFlag strict_mode) { + Isolate* isolate = object->GetIsolate(); // We should never get here to initialize a const with the hole // value since a const declaration would conflict with the setter. ASSERT(!value->IsTheHole()); - Handle<Object> value_handle(value, isolate); // To accommodate both the old and the new api we switch on the // data structure used to store the callbacks. Eventually foreign @@ -11729,41 +11952,40 @@ MaybeObject* JSObject::SetElementWithCallback(Object* structure, if (structure->IsExecutableAccessorInfo()) { // api style callbacks - Handle<JSObject> self(this); - Handle<JSObject> holder_handle(JSObject::cast(holder)); - Handle<ExecutableAccessorInfo> data( - ExecutableAccessorInfo::cast(structure)); + Handle<ExecutableAccessorInfo> data = + Handle<ExecutableAccessorInfo>::cast(structure); Object* call_obj = data->setter(); v8::AccessorSetterCallback call_fun = v8::ToCData<v8::AccessorSetterCallback>(call_obj); if (call_fun == NULL) return value; Handle<Object> number = isolate->factory()->NewNumberFromUint(index); Handle<String> key(isolate->factory()->NumberToString(number)); - LOG(isolate, ApiNamedPropertyAccess("store", *self, *key)); + LOG(isolate, ApiNamedPropertyAccess("store", *object, *key)); PropertyCallbackArguments - args(isolate, data->data(), *self, *holder_handle); + args(isolate, data->data(), *object, *holder); args.Call(call_fun, v8::Utils::ToLocal(key), - v8::Utils::ToLocal(value_handle)); - RETURN_IF_SCHEDULED_EXCEPTION(isolate); - return *value_handle; + v8::Utils::ToLocal(value)); + RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); + return value; } if (structure->IsAccessorPair()) { - Handle<Object> setter(AccessorPair::cast(structure)->setter(), isolate); + Handle<Object> setter(AccessorPair::cast(*structure)->setter(), isolate); if (setter->IsSpecFunction()) { // TODO(rossberg): nicer would be to cast to some JSCallable here... - return SetPropertyWithDefinedSetter(JSReceiver::cast(*setter), value); + return SetPropertyWithDefinedSetter( + object, Handle<JSReceiver>::cast(setter), value); } else { if (strict_mode == kNonStrictMode) { return value; } - Handle<Object> holder_handle(holder, isolate); Handle<Object> key(isolate->factory()->NewNumberFromUint(index)); - Handle<Object> args[2] = { key, holder_handle }; - return isolate->Throw( - *isolate->factory()->NewTypeError("no_setter_in_callback", - HandleVector(args, 2))); + Handle<Object> args[2] = { key, holder }; + Handle<Object> error = isolate->factory()->NewTypeError( + "no_setter_in_callback", HandleVector(args, 2)); + isolate->Throw(*error); + return Handle<Object>(); } } @@ -11771,7 +11993,7 @@ MaybeObject* JSObject::SetElementWithCallback(Object* structure, if (structure->IsDeclaredAccessorInfo()) return value; UNREACHABLE(); - return NULL; + return Handle<Object>(); } @@ -11968,10 +12190,13 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index, int entry = dictionary->FindEntry(index); if (entry != SeededNumberDictionary::kNotFound) { - Object* element = dictionary->ValueAt(entry); + Handle<Object> element(dictionary->ValueAt(entry), isolate); PropertyDetails details = dictionary->DetailsAt(entry); if (details.type() == CALLBACKS && set_mode == SET_PROPERTY) { - return SetElementWithCallback(element, index, *value, this, strict_mode); + Handle<Object> result = SetElementWithCallback(self, element, index, + value, self, strict_mode); + RETURN_IF_EMPTY_HANDLE(isolate, result); + return *result; } else { dictionary->UpdateMaxNumberKey(index); // If a value has not been initialized we allow writing to it even if it @@ -11996,13 +12221,13 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index, } // Elements of the arguments object in slow mode might be slow aliases. if (is_arguments && element->IsAliasedArgumentsEntry()) { - AliasedArgumentsEntry* entry = AliasedArgumentsEntry::cast(element); + AliasedArgumentsEntry* entry = AliasedArgumentsEntry::cast(*element); Context* context = Context::cast(elements->get(0)); int context_index = entry->aliased_context_slot(); ASSERT(!context->get(context_index)->IsTheHole()); context->set(context_index, *value); // For elements that are still writable we keep slow aliasing. - if (!details.IsReadOnly()) value = handle(element, isolate); + if (!details.IsReadOnly()) value = element; } dictionary->ValueAtPut(entry, *value); } @@ -12465,11 +12690,24 @@ MaybeObject* JSObject::SetElementWithoutInterceptor(uint32_t index, } -Handle<Object> JSObject::TransitionElementsKind(Handle<JSObject> object, - ElementsKind to_kind) { - CALL_HEAP_FUNCTION(object->GetIsolate(), - object->TransitionElementsKind(to_kind), - Object); +void JSObject::TransitionElementsKind(Handle<JSObject> object, + ElementsKind to_kind) { + CALL_HEAP_FUNCTION_VOID(object->GetIsolate(), + object->TransitionElementsKind(to_kind)); +} + + +bool AllocationSite::IsNestedSite() { + ASSERT(FLAG_trace_track_allocation_sites); + Object* current = GetHeap()->allocation_sites_list(); + while (current != NULL && current->IsAllocationSite()) { + AllocationSite* current_site = AllocationSite::cast(current); + if (current_site->nested_site() == this) { + return true; + } + current = current_site->weak_next(); + } + return false; } @@ -12485,23 +12723,26 @@ MaybeObject* JSObject::UpdateAllocationSite(ElementsKind to_kind) { // Walk through to the Allocation Site AllocationSite* site = memento->GetAllocationSite(); - if (site->IsLiteralSite()) { + if (site->SitePointsToLiteral() && + site->transition_info()->IsJSArray()) { JSArray* transition_info = JSArray::cast(site->transition_info()); ElementsKind kind = transition_info->GetElementsKind(); // if kind is holey ensure that to_kind is as well. if (IsHoleyElementsKind(kind)) { to_kind = GetHoleyElementsKind(to_kind); } - if (AllocationSite::GetMode(kind, to_kind) == TRACK_ALLOCATION_SITE) { + if (IsMoreGeneralElementsKindTransition(kind, to_kind)) { // If the array is huge, it's not likely to be defined in a local // function, so we shouldn't make new instances of it very often. uint32_t length = 0; CHECK(transition_info->length()->ToArrayIndex(&length)); if (length <= AllocationSite::kMaximumArrayBytesToPretransition) { if (FLAG_trace_track_allocation_sites) { + bool is_nested = site->IsNestedSite(); PrintF( - "AllocationSite: JSArray %p boilerplate updated %s->%s\n", + "AllocationSite: JSArray %p boilerplate %s updated %s->%s\n", reinterpret_cast<void*>(this), + is_nested ? "(nested)" : "", ElementsKindToString(kind), ElementsKindToString(to_kind)); } @@ -12514,7 +12755,7 @@ MaybeObject* JSObject::UpdateAllocationSite(ElementsKind to_kind) { if (IsHoleyElementsKind(kind)) { to_kind = GetHoleyElementsKind(to_kind); } - if (AllocationSite::GetMode(kind, to_kind) == TRACK_ALLOCATION_SITE) { + if (IsMoreGeneralElementsKindTransition(kind, to_kind)) { if (FLAG_trace_track_allocation_sites) { PrintF("AllocationSite: JSArray %p site updated %s->%s\n", reinterpret_cast<void*>(this), @@ -12640,7 +12881,7 @@ MaybeObject* JSObject::GetElementWithInterceptor(Object* receiver, // Make sure that the top context does not change when doing // callbacks or interceptor calls. - AssertNoContextChange ncc; + AssertNoContextChange ncc(isolate); Handle<InterceptorInfo> interceptor(GetIndexedInterceptor(), isolate); Handle<Object> this_handle(receiver, isolate); @@ -12904,21 +13145,26 @@ InterceptorInfo* JSObject::GetIndexedInterceptor() { } -MaybeObject* JSObject::GetPropertyPostInterceptor( - Object* receiver, - Name* name, +Handle<Object> JSObject::GetPropertyPostInterceptor( + Handle<JSObject> object, + Handle<Object> receiver, + Handle<Name> name, PropertyAttributes* attributes) { // Check local property in holder, ignore interceptor. - LookupResult result(GetIsolate()); - LocalLookupRealNamedProperty(name, &result); - if (result.IsFound()) { - return GetProperty(receiver, &result, name, attributes); + Isolate* isolate = object->GetIsolate(); + LookupResult lookup(isolate); + object->LocalLookupRealNamedProperty(*name, &lookup); + Handle<Object> result; + if (lookup.IsFound()) { + result = GetProperty(object, receiver, &lookup, name, attributes); + } else { + // Continue searching via the prototype chain. + Handle<Object> prototype(object->GetPrototype(), isolate); + *attributes = ABSENT; + if (prototype->IsNull()) return isolate->factory()->undefined_value(); + result = GetPropertyWithReceiver(prototype, receiver, name, attributes); } - // Continue searching via the prototype chain. - Object* pt = GetPrototype(); - *attributes = ABSENT; - if (pt->IsNull()) return GetHeap()->undefined_value(); - return pt->GetPropertyWithReceiver(receiver, name, attributes); + return result; } @@ -12936,93 +13182,98 @@ MaybeObject* JSObject::GetLocalPropertyPostInterceptor( } -MaybeObject* JSObject::GetPropertyWithInterceptor( - Object* receiver, - Name* name, +Handle<Object> JSObject::GetPropertyWithInterceptor( + Handle<JSObject> object, + Handle<Object> receiver, + Handle<Name> name, PropertyAttributes* attributes) { + Isolate* isolate = object->GetIsolate(); + // TODO(rossberg): Support symbols in the API. - if (name->IsSymbol()) return GetHeap()->undefined_value(); + if (name->IsSymbol()) return isolate->factory()->undefined_value(); - Isolate* isolate = GetIsolate(); - InterceptorInfo* interceptor = GetNamedInterceptor(); - HandleScope scope(isolate); - Handle<Object> receiver_handle(receiver, isolate); - Handle<JSObject> holder_handle(this); - Handle<String> name_handle(String::cast(name)); + Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor(), isolate); + Handle<String> name_string = Handle<String>::cast(name); if (!interceptor->getter()->IsUndefined()) { v8::NamedPropertyGetterCallback getter = v8::ToCData<v8::NamedPropertyGetterCallback>(interceptor->getter()); LOG(isolate, - ApiNamedPropertyAccess("interceptor-named-get", *holder_handle, name)); + ApiNamedPropertyAccess("interceptor-named-get", *object, *name)); PropertyCallbackArguments - args(isolate, interceptor->data(), receiver, this); + args(isolate, interceptor->data(), *receiver, *object); v8::Handle<v8::Value> result = - args.Call(getter, v8::Utils::ToLocal(name_handle)); - RETURN_IF_SCHEDULED_EXCEPTION(isolate); + args.Call(getter, v8::Utils::ToLocal(name_string)); + RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); if (!result.IsEmpty()) { *attributes = NONE; Handle<Object> result_internal = v8::Utils::OpenHandle(*result); result_internal->VerifyApiCallResultType(); - return *result_internal; + // Rebox handle to escape this scope. + return handle(*result_internal, isolate); } } - MaybeObject* result = holder_handle->GetPropertyPostInterceptor( - *receiver_handle, - *name_handle, - attributes); - RETURN_IF_SCHEDULED_EXCEPTION(isolate); - return result; + return GetPropertyPostInterceptor(object, receiver, name, attributes); } -bool JSObject::HasRealNamedProperty(Isolate* isolate, Name* key) { +bool JSObject::HasRealNamedProperty(Handle<JSObject> object, + Handle<Name> key) { + Isolate* isolate = object->GetIsolate(); + SealHandleScope shs(isolate); // Check access rights if needed. - if (IsAccessCheckNeeded()) { - if (!isolate->MayNamedAccess(this, key, v8::ACCESS_HAS)) { - isolate->ReportFailedAccessCheck(this, v8::ACCESS_HAS); + if (object->IsAccessCheckNeeded()) { + if (!isolate->MayNamedAccess(*object, *key, v8::ACCESS_HAS)) { + isolate->ReportFailedAccessCheck(*object, v8::ACCESS_HAS); return false; } } LookupResult result(isolate); - LocalLookupRealNamedProperty(key, &result); + object->LocalLookupRealNamedProperty(*key, &result); return result.IsFound() && !result.IsInterceptor(); } -bool JSObject::HasRealElementProperty(Isolate* isolate, uint32_t index) { +bool JSObject::HasRealElementProperty(Handle<JSObject> object, uint32_t index) { + Isolate* isolate = object->GetIsolate(); + SealHandleScope shs(isolate); // Check access rights if needed. - if (IsAccessCheckNeeded()) { - if (!isolate->MayIndexedAccess(this, index, v8::ACCESS_HAS)) { - isolate->ReportFailedAccessCheck(this, v8::ACCESS_HAS); + if (object->IsAccessCheckNeeded()) { + if (!isolate->MayIndexedAccess(*object, index, v8::ACCESS_HAS)) { + isolate->ReportFailedAccessCheck(*object, v8::ACCESS_HAS); return false; } } - if (IsJSGlobalProxy()) { - Object* proto = GetPrototype(); + if (object->IsJSGlobalProxy()) { + HandleScope scope(isolate); + Handle<Object> proto(object->GetPrototype(), isolate); if (proto->IsNull()) return false; ASSERT(proto->IsJSGlobalObject()); - return JSObject::cast(proto)->HasRealElementProperty(isolate, index); + return HasRealElementProperty(Handle<JSObject>::cast(proto), index); } - return GetElementAttributeWithoutInterceptor(this, index, false) != ABSENT; + return object->GetElementAttributeWithoutInterceptor( + *object, index, false) != ABSENT; } -bool JSObject::HasRealNamedCallbackProperty(Isolate* isolate, Name* key) { +bool JSObject::HasRealNamedCallbackProperty(Handle<JSObject> object, + Handle<Name> key) { + Isolate* isolate = object->GetIsolate(); + SealHandleScope shs(isolate); // Check access rights if needed. - if (IsAccessCheckNeeded()) { - if (!isolate->MayNamedAccess(this, key, v8::ACCESS_HAS)) { - isolate->ReportFailedAccessCheck(this, v8::ACCESS_HAS); + if (object->IsAccessCheckNeeded()) { + if (!isolate->MayNamedAccess(*object, *key, v8::ACCESS_HAS)) { + isolate->ReportFailedAccessCheck(*object, v8::ACCESS_HAS); return false; } } LookupResult result(isolate); - LocalLookupRealNamedProperty(key, &result); + object->LocalLookupRealNamedProperty(*key, &result); return result.IsPropertyCallbacks(); } @@ -13856,7 +14107,9 @@ void HashTable<Shape, Key>::Rehash(Key key) { template<typename Shape, typename Key> -MaybeObject* HashTable<Shape, Key>::EnsureCapacity(int n, Key key) { +MaybeObject* HashTable<Shape, Key>::EnsureCapacity(int n, + Key key, + PretenureFlag pretenure) { int capacity = Capacity(); int nof = NumberOfElements() + n; int nod = NumberOfDeletedElements(); @@ -13869,14 +14122,14 @@ MaybeObject* HashTable<Shape, Key>::EnsureCapacity(int n, Key key) { } const int kMinCapacityForPretenure = 256; - bool pretenure = - (capacity > kMinCapacityForPretenure) && !GetHeap()->InNewSpace(this); + bool should_pretenure = pretenure == TENURED || + ((capacity > kMinCapacityForPretenure) && !GetHeap()->InNewSpace(this)); Object* obj; { MaybeObject* maybe_obj = Allocate(GetHeap(), nof * 2, USE_DEFAULT_MINIMUM_CAPACITY, - pretenure ? TENURED : NOT_TENURED); + should_pretenure ? TENURED : NOT_TENURED); if (!maybe_obj->ToObject(&obj)) return maybe_obj; } @@ -13944,6 +14197,8 @@ template class HashTable<ObjectHashTableShape<1>, Object*>; template class HashTable<ObjectHashTableShape<2>, Object*>; +template class HashTable<WeakHashTableShape<2>, Object*>; + template class Dictionary<NameDictionaryShape, Name*>; template class Dictionary<SeededNumberDictionaryShape, uint32_t>; @@ -14044,6 +14299,14 @@ template int HashTable<SeededNumberDictionaryShape, uint32_t>::FindEntry(uint32_t); +Handle<Object> JSObject::PrepareSlowElementsForSort( + Handle<JSObject> object, uint32_t limit) { + CALL_HEAP_FUNCTION(object->GetIsolate(), + object->PrepareSlowElementsForSort(limit), + Object); +} + + // Collates undefined and unexisting elements below limit from position // zero of the elements. The object stays in Dictionary mode. MaybeObject* JSObject::PrepareSlowElementsForSort(uint32_t limit) { @@ -14146,74 +14409,57 @@ MaybeObject* JSObject::PrepareSlowElementsForSort(uint32_t limit) { // the start of the elements array. // If the object is in dictionary mode, it is converted to fast elements // mode. -MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) { - Heap* heap = GetHeap(); +Handle<Object> JSObject::PrepareElementsForSort(Handle<JSObject> object, + uint32_t limit) { + Isolate* isolate = object->GetIsolate(); - ASSERT(!map()->is_observed()); - if (HasDictionaryElements()) { + ASSERT(!object->map()->is_observed()); + if (object->HasDictionaryElements()) { // Convert to fast elements containing only the existing properties. // Ordering is irrelevant, since we are going to sort anyway. - SeededNumberDictionary* dict = element_dictionary(); - if (IsJSArray() || dict->requires_slow_elements() || + Handle<SeededNumberDictionary> dict(object->element_dictionary()); + if (object->IsJSArray() || dict->requires_slow_elements() || dict->max_number_key() >= limit) { - return PrepareSlowElementsForSort(limit); + return JSObject::PrepareSlowElementsForSort(object, limit); } // Convert to fast elements. - Object* obj; - MaybeObject* maybe_obj = GetElementsTransitionMap(GetIsolate(), - FAST_HOLEY_ELEMENTS); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - Map* new_map = Map::cast(obj); + Handle<Map> new_map = + JSObject::GetElementsTransitionMap(object, FAST_HOLEY_ELEMENTS); - PretenureFlag tenure = heap->InNewSpace(this) ? NOT_TENURED: TENURED; - Object* new_array; - { MaybeObject* maybe_new_array = - heap->AllocateFixedArray(dict->NumberOfElements(), tenure); - if (!maybe_new_array->ToObject(&new_array)) return maybe_new_array; - } - FixedArray* fast_elements = FixedArray::cast(new_array); - dict->CopyValuesTo(fast_elements); - ValidateElements(); + PretenureFlag tenure = isolate->heap()->InNewSpace(*object) ? + NOT_TENURED: TENURED; + Handle<FixedArray> fast_elements = + isolate->factory()->NewFixedArray(dict->NumberOfElements(), tenure); + dict->CopyValuesTo(*fast_elements); + object->ValidateElements(); - set_map_and_elements(new_map, fast_elements); - } else if (HasExternalArrayElements()) { + object->set_map_and_elements(*new_map, *fast_elements); + } else if (object->HasExternalArrayElements()) { // External arrays cannot have holes or undefined elements. - return Smi::FromInt(ExternalArray::cast(elements())->length()); - } else if (!HasFastDoubleElements()) { - Object* obj; - { MaybeObject* maybe_obj = EnsureWritableFastElements(); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } + return handle(Smi::FromInt( + ExternalArray::cast(object->elements())->length()), isolate); + } else if (!object->HasFastDoubleElements()) { + JSObject::EnsureWritableFastElements(object); } - ASSERT(HasFastSmiOrObjectElements() || HasFastDoubleElements()); + ASSERT(object->HasFastSmiOrObjectElements() || + object->HasFastDoubleElements()); // Collect holes at the end, undefined before that and the rest at the // start, and return the number of non-hole, non-undefined values. - FixedArrayBase* elements_base = FixedArrayBase::cast(this->elements()); + Handle<FixedArrayBase> elements_base(object->elements()); uint32_t elements_length = static_cast<uint32_t>(elements_base->length()); if (limit > elements_length) { limit = elements_length ; } if (limit == 0) { - return Smi::FromInt(0); - } - - HeapNumber* result_double = NULL; - if (limit > static_cast<uint32_t>(Smi::kMaxValue)) { - // Pessimistically allocate space for return value before - // we start mutating the array. - Object* new_double; - { MaybeObject* maybe_new_double = heap->AllocateHeapNumber(0.0); - if (!maybe_new_double->ToObject(&new_double)) return maybe_new_double; - } - result_double = HeapNumber::cast(new_double); + return handle(Smi::FromInt(0), isolate); } uint32_t result = 0; - if (elements_base->map() == heap->fixed_double_array_map()) { - FixedDoubleArray* elements = FixedDoubleArray::cast(elements_base); + if (elements_base->map() == isolate->heap()->fixed_double_array_map()) { + FixedDoubleArray* elements = FixedDoubleArray::cast(*elements_base); // Split elements into defined and the_hole, in that order. unsigned int holes = limit; // Assume most arrays contain no holes and undefined values, so minimize the @@ -14240,7 +14486,7 @@ MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) { holes++; } } else { - FixedArray* elements = FixedArray::cast(elements_base); + FixedArray* elements = FixedArray::cast(*elements_base); DisallowHeapAllocation no_gc; // Split elements into defined, undefined and the_hole, in that order. Only @@ -14285,12 +14531,7 @@ MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) { } } - if (result <= static_cast<uint32_t>(Smi::kMaxValue)) { - return Smi::FromInt(static_cast<int>(result)); - } - ASSERT_NE(NULL, result_double); - result_double->set_value(static_cast<double>(result)); - return result_double; + return isolate->factory()->NewNumberFromUint(result); } @@ -14508,17 +14749,6 @@ PropertyCell* GlobalObject::GetPropertyCell(LookupResult* result) { } -// TODO(mstarzinger): Temporary wrapper until handlified. -static Handle<NameDictionary> NameDictionaryAdd(Handle<NameDictionary> dict, - Handle<Name> name, - Handle<Object> value, - PropertyDetails details) { - CALL_HEAP_FUNCTION(dict->GetIsolate(), - dict->Add(*name, *value, details), - NameDictionary); -} - - Handle<PropertyCell> GlobalObject::EnsurePropertyCell( Handle<GlobalObject> global, Handle<Name> name) { @@ -15597,6 +15827,41 @@ void ObjectHashTable::RemoveEntry(int entry) { } +Object* WeakHashTable::Lookup(Object* key) { + ASSERT(IsKey(key)); + int entry = FindEntry(key); + if (entry == kNotFound) return GetHeap()->the_hole_value(); + return get(EntryToValueIndex(entry)); +} + + +MaybeObject* WeakHashTable::Put(Object* key, Object* value) { + ASSERT(IsKey(key)); + int entry = FindEntry(key); + // Key is already in table, just overwrite value. + if (entry != kNotFound) { + set(EntryToValueIndex(entry), value); + return this; + } + + // Check whether the hash table should be extended. + Object* obj; + { MaybeObject* maybe_obj = EnsureCapacity(1, key, TENURED); + if (!maybe_obj->ToObject(&obj)) return maybe_obj; + } + WeakHashTable* table = WeakHashTable::cast(obj); + table->AddEntry(table->FindInsertionEntry(Hash(key)), key, value); + return table; +} + + +void WeakHashTable::AddEntry(int entry, Object* key, Object* value) { + set(EntryToIndex(entry), key); + set(EntryToValueIndex(entry), value); + ElementAdded(); +} + + DeclaredAccessorDescriptorIterator::DeclaredAccessorDescriptorIterator( DeclaredAccessorDescriptor* descriptor) : array_(descriptor->serialized_data()->GetDataStartAddress()), @@ -16072,8 +16337,8 @@ void PropertyCell::set_type(Type* type, WriteBarrierMode ignored) { } -Type* PropertyCell::UpdateType(Handle<PropertyCell> cell, - Handle<Object> value) { +Handle<Type> PropertyCell::UpdatedType(Handle<PropertyCell> cell, + Handle<Object> value) { Isolate* isolate = cell->GetIsolate(); Handle<Type> old_type(cell->type(), isolate); // TODO(2803): Do not track ConsString as constant because they cannot be @@ -16083,34 +16348,27 @@ Type* PropertyCell::UpdateType(Handle<PropertyCell> cell, : Type::Constant(value, isolate), isolate); if (new_type->Is(old_type)) { - return *old_type; + return old_type; } cell->dependent_code()->DeoptimizeDependentCodeGroup( isolate, DependentCode::kPropertyCellChangedGroup); if (old_type->Is(Type::None()) || old_type->Is(Type::Undefined())) { - return *new_type; + return new_type; } - return Type::Any(); + return handle(Type::Any(), isolate); } -MaybeObject* PropertyCell::SetValueInferType(Object* value, - WriteBarrierMode ignored) { - set_value(value, ignored); - if (!Type::Any()->Is(type())) { - IdempotentPointerToHandleCodeTrampoline trampoline(GetIsolate()); - MaybeObject* maybe_type = trampoline.CallWithReturnValue( - &PropertyCell::UpdateType, - Handle<PropertyCell>(this), - Handle<Object>(value, GetIsolate())); - Type* new_type = NULL; - if (!maybe_type->To(&new_type)) return maybe_type; - set_type(new_type); +void PropertyCell::SetValueInferType(Handle<PropertyCell> cell, + Handle<Object> value) { + cell->set_value(*value); + if (!Type::Any()->Is(cell->type())) { + Handle<Type> new_type = UpdatedType(cell, value); + cell->set_type(*new_type); } - return value; } diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h index d3593b6edc..e8c9850484 100644 --- a/deps/v8/src/objects.h +++ b/deps/v8/src/objects.h @@ -333,7 +333,7 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits; // NOTE: Everything following JS_VALUE_TYPE is considered a // JSObject for GC purposes. The first four entries here have typeof // 'object', whereas JS_FUNCTION_TYPE has typeof 'function'. -#define INSTANCE_TYPE_LIST_ALL(V) \ +#define INSTANCE_TYPE_LIST(V) \ V(STRING_TYPE) \ V(ASCII_STRING_TYPE) \ V(CONS_STRING_TYPE) \ @@ -405,6 +405,7 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits; \ V(FIXED_ARRAY_TYPE) \ V(FIXED_DOUBLE_ARRAY_TYPE) \ + V(CONSTANT_POOL_ARRAY_TYPE) \ V(SHARED_FUNCTION_INFO_TYPE) \ \ V(JS_MESSAGE_OBJECT_TYPE) \ @@ -431,18 +432,8 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits; \ V(JS_FUNCTION_TYPE) \ V(JS_FUNCTION_PROXY_TYPE) \ - -#ifdef ENABLE_DEBUGGER_SUPPORT -#define INSTANCE_TYPE_LIST_DEBUGGER(V) \ V(DEBUG_INFO_TYPE) \ V(BREAK_POINT_INFO_TYPE) -#else -#define INSTANCE_TYPE_LIST_DEBUGGER(V) -#endif - -#define INSTANCE_TYPE_LIST(V) \ - INSTANCE_TYPE_LIST_ALL(V) \ - INSTANCE_TYPE_LIST_DEBUGGER(V) // Since string types are not consecutive, this macro is used to @@ -725,6 +716,7 @@ enum InstanceType { EXTERNAL_DOUBLE_ARRAY_TYPE, EXTERNAL_PIXEL_ARRAY_TYPE, // LAST_EXTERNAL_ARRAY_TYPE FIXED_DOUBLE_ARRAY_TYPE, + CONSTANT_POOL_ARRAY_TYPE, FILLER_TYPE, // LAST_DATA_TYPE // Structs. @@ -873,8 +865,9 @@ enum CompareResult { inline void set_##name(type* value, \ WriteBarrierMode mode = UPDATE_WRITE_BARRIER); \ - class AccessorPair; +class AllocationSite; +class AllocationSiteContext; class DictionaryElementsAccessor; class ElementsAccessor; class Failure; @@ -1010,6 +1003,7 @@ class MaybeObject BASE_EMBEDDED { V(TypeFeedbackCells) \ V(FixedArray) \ V(FixedDoubleArray) \ + V(ConstantPoolArray) \ V(Context) \ V(NativeContext) \ V(ScopeInfo) \ @@ -1054,7 +1048,8 @@ class MaybeObject BASE_EMBEDDED { V(AccessCheckNeeded) \ V(Cell) \ V(PropertyCell) \ - V(ObjectHashTable) + V(ObjectHashTable) \ + V(WeakHashTable) #define ERROR_MESSAGES_LIST(V) \ @@ -1206,6 +1201,7 @@ class MaybeObject BASE_EMBEDDED { V(kModuleStatement, "Module statement") \ V(kModuleVariable, "Module variable") \ V(kModuleUrl, "Module url") \ + V(kNativeFunctionLiteral, "Native function literal") \ V(kNoCasesLeft, "no cases left") \ V(kNoEmptyArraysHereInEmitFastAsciiArrayJoin, \ "No empty arrays here in EmitFastAsciiArrayJoin") \ @@ -1249,7 +1245,6 @@ class MaybeObject BASE_EMBEDDED { V(kRegisterDidNotMatchExpectedRoot, "Register did not match expected root") \ V(kRegisterWasClobbered, "register was clobbered") \ V(kScopedBlock, "ScopedBlock") \ - V(kSharedFunctionInfoLiteral, "Shared function info literal") \ V(kSmiAdditionOverflow, "Smi addition overflow") \ V(kSmiSubtractionOverflow, "Smi subtraction overflow") \ V(kStackFrameTypesMustMatch, "stack frame types must match") \ @@ -1440,8 +1435,7 @@ class Object : public MaybeObject { } inline MaybeObject* AllocateNewStorageFor(Heap* heap, - Representation representation, - PretenureFlag tenure = NOT_TENURED); + Representation representation); // Returns true if the object is of the correct type to be used as a // implementation of a JSObject's elements. @@ -1467,6 +1461,12 @@ class Object : public MaybeObject { MUST_USE_RESULT inline MaybeObject* GetProperty( Name* key, PropertyAttributes* attributes); + + // TODO(yangguo): this should eventually replace the non-handlified version. + static Handle<Object> GetPropertyWithReceiver(Handle<Object> object, + Handle<Object> receiver, + Handle<Name> name, + PropertyAttributes* attributes); MUST_USE_RESULT MaybeObject* GetPropertyWithReceiver( Object* receiver, Name* key, @@ -1950,42 +1950,27 @@ class JSReceiver: public HeapObject { // Casting. static inline JSReceiver* cast(Object* obj); + // Implementation of [[Put]], ECMA-262 5th edition, section 8.12.5. static Handle<Object> SetProperty(Handle<JSReceiver> object, Handle<Name> key, Handle<Object> value, PropertyAttributes attributes, - StrictModeFlag strict_mode); + StrictModeFlag strict_mode, + StoreFromKeyed store_mode = + MAY_BE_STORE_FROM_KEYED); static Handle<Object> SetElement(Handle<JSReceiver> object, uint32_t index, Handle<Object> value, PropertyAttributes attributes, StrictModeFlag strict_mode); - MUST_USE_RESULT static MaybeObject* SetPropertyOrFail( - Handle<JSReceiver> object, - Handle<Name> key, - Handle<Object> value, - PropertyAttributes attributes, - StrictModeFlag strict_mode, - StoreFromKeyed store_from_keyed = MAY_BE_STORE_FROM_KEYED); - - // Can cause GC. - MUST_USE_RESULT MaybeObject* SetProperty( - Name* key, - Object* value, - PropertyAttributes attributes, - StrictModeFlag strict_mode, - StoreFromKeyed store_from_keyed = MAY_BE_STORE_FROM_KEYED); - MUST_USE_RESULT MaybeObject* SetProperty( - LookupResult* result, - Name* key, - Object* value, - PropertyAttributes attributes, - StrictModeFlag strict_mode, - StoreFromKeyed store_from_keyed = MAY_BE_STORE_FROM_KEYED); - MUST_USE_RESULT MaybeObject* SetPropertyWithDefinedSetter(JSReceiver* setter, - Object* value); + // Implementation of [[HasProperty]], ECMA-262 5th edition, section 8.12.6. + static inline bool HasProperty(Handle<JSReceiver> object, Handle<Name> name); + static inline bool HasLocalProperty(Handle<JSReceiver>, Handle<Name> name); + static inline bool HasElement(Handle<JSReceiver> object, uint32_t index); + static inline bool HasLocalElement(Handle<JSReceiver> object, uint32_t index); + // Implementation of [[Delete]], ECMA-262 5th edition, section 8.12.7. static Handle<Object> DeleteProperty(Handle<JSReceiver> object, Handle<Name> name, DeleteMode mode = NORMAL_DELETION); @@ -2011,12 +1996,6 @@ class JSReceiver: public HeapObject { inline PropertyAttributes GetElementAttribute(uint32_t index); inline PropertyAttributes GetLocalElementAttribute(uint32_t index); - // Can cause a GC. - inline bool HasProperty(Name* name); - inline bool HasLocalProperty(Name* name); - inline bool HasElement(uint32_t index); - inline bool HasLocalElement(uint32_t index); - // Return the object's prototype (might be Heap::null_value()). inline Object* GetPrototype(); @@ -2036,12 +2015,24 @@ class JSReceiver: public HeapObject { protected: Smi* GenerateIdentityHash(); + static Handle<Object> SetPropertyWithDefinedSetter(Handle<JSReceiver> object, + Handle<JSReceiver> setter, + Handle<Object> value); + private: PropertyAttributes GetPropertyAttributeForResult(JSReceiver* receiver, LookupResult* result, Name* name, bool continue_search); + static Handle<Object> SetProperty(Handle<JSReceiver> receiver, + LookupResult* result, + Handle<Name> key, + Handle<Object> value, + PropertyAttributes attributes, + StrictModeFlag strict_mode, + StoreFromKeyed store_from_keyed); + DISALLOW_IMPLICIT_CONSTRUCTORS(JSReceiver); }; @@ -2121,50 +2112,49 @@ class JSObject: public JSReceiver { WriteBarrierMode mode = UPDATE_WRITE_BARRIER); // Requires: HasFastElements(). + static Handle<FixedArray> EnsureWritableFastElements( + Handle<JSObject> object); MUST_USE_RESULT inline MaybeObject* EnsureWritableFastElements(); // Collects elements starting at index 0. // Undefined values are placed after non-undefined values. // Returns the number of non-undefined values. - MUST_USE_RESULT MaybeObject* PrepareElementsForSort(uint32_t limit); + static Handle<Object> PrepareElementsForSort(Handle<JSObject> object, + uint32_t limit); // As PrepareElementsForSort, but only on objects where elements is // a dictionary, and it will stay a dictionary. + static Handle<Object> PrepareSlowElementsForSort(Handle<JSObject> object, + uint32_t limit); MUST_USE_RESULT MaybeObject* PrepareSlowElementsForSort(uint32_t limit); - MUST_USE_RESULT MaybeObject* GetPropertyWithCallback(Object* receiver, - Object* structure, - Name* name); + static Handle<Object> GetPropertyWithCallback(Handle<JSObject> object, + Handle<Object> receiver, + Handle<Object> structure, + Handle<Name> name); - // Can cause GC. - MUST_USE_RESULT MaybeObject* SetPropertyForResult(LookupResult* result, - Name* key, - Object* value, - PropertyAttributes attributes, - StrictModeFlag strict_mode, - StoreFromKeyed store_mode); - MUST_USE_RESULT MaybeObject* SetPropertyWithFailedAccessCheck( - LookupResult* result, - Name* name, - Object* value, - bool check_prototype, - StrictModeFlag strict_mode); - MUST_USE_RESULT MaybeObject* SetPropertyWithCallback( - Object* structure, - Name* name, - Object* value, - JSObject* holder, + static Handle<Object> SetPropertyWithCallback( + Handle<JSObject> object, + Handle<Object> structure, + Handle<Name> name, + Handle<Object> value, + Handle<JSObject> holder, StrictModeFlag strict_mode); - MUST_USE_RESULT MaybeObject* SetPropertyWithInterceptor( - Name* name, - Object* value, + + static Handle<Object> SetPropertyWithInterceptor( + Handle<JSObject> object, + Handle<Name> name, + Handle<Object> value, PropertyAttributes attributes, StrictModeFlag strict_mode); - MUST_USE_RESULT MaybeObject* SetPropertyPostInterceptor( - Name* name, - Object* value, + + static Handle<Object> SetPropertyForResult( + Handle<JSObject> object, + LookupResult* result, + Handle<Name> name, + Handle<Object> value, PropertyAttributes attributes, StrictModeFlag strict_mode, - StoreMode mode = ALLOW_AS_CONSTANT); + StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED); static Handle<Object> SetLocalPropertyIgnoreAttributes( Handle<JSObject> object, @@ -2183,18 +2173,17 @@ class JSObject: public JSReceiver { static inline Handle<Map> FindTransitionToField(Handle<Map> map, Handle<Name> key); - inline int LastAddedFieldIndex(); - // Extend the receiver with a single fast property appeared first in the // passed map. This also extends the property backing store if necessary. static void AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map); - inline MUST_USE_RESULT MaybeObject* AllocateStorageForMap(Map* map); + // Migrates the given object to a map whose field representations are the + // lowest upper bound of all known representations for that field. static void MigrateInstance(Handle<JSObject> instance); - inline MUST_USE_RESULT MaybeObject* MigrateInstance(); + // Migrates the given object only if the target map is already available, + // or returns an empty handle if such a map is not yet available. static Handle<Object> TryMigrateInstance(Handle<JSObject> instance); - inline MUST_USE_RESULT MaybeObject* TryMigrateInstance(); // Can cause GC. MUST_USE_RESULT MaybeObject* SetLocalPropertyIgnoreAttributesTrampoline( @@ -2209,27 +2198,18 @@ class JSObject: public JSReceiver { // Handles the special representation of JS global objects. Object* GetNormalizedProperty(LookupResult* result); - // Sets the property value in a normalized object given (key, value). - // Handles the special representation of JS global objects. - static Handle<Object> SetNormalizedProperty(Handle<JSObject> object, - LookupResult* result, - Handle<Object> value); - // Sets the property value in a normalized object given a lookup result. // Handles the special representation of JS global objects. - MUST_USE_RESULT MaybeObject* SetNormalizedProperty(LookupResult* result, - Object* value); + static void SetNormalizedProperty(Handle<JSObject> object, + LookupResult* result, + Handle<Object> value); // Sets the property value in a normalized object given (key, value, details). // Handles the special representation of JS global objects. - static Handle<Object> SetNormalizedProperty(Handle<JSObject> object, - Handle<Name> key, - Handle<Object> value, - PropertyDetails details); - - MUST_USE_RESULT MaybeObject* SetNormalizedProperty(Name* name, - Object* value, - PropertyDetails details); + static void SetNormalizedProperty(Handle<JSObject> object, + Handle<Name> key, + Handle<Object> value, + PropertyDetails details); static void OptimizeAsPrototype(Handle<JSObject> object); @@ -2253,6 +2233,15 @@ class JSObject: public JSReceiver { uint32_t index, bool continue_search); + // Retrieves an AccessorPair property from the given object. Might return + // undefined if the property doesn't exist or is of a different kind. + static Handle<Object> GetAccessor(Handle<JSObject> object, + Handle<Name> name, + AccessorComponent component); + + // Defines an AccessorPair property on the given object. + // TODO(mstarzinger): Rename to SetAccessor() and return empty handle on + // exception instead of letting callers check for scheduled exception. static void DefineAccessor(Handle<JSObject> object, Handle<Name> name, Handle<Object> getter, @@ -2260,24 +2249,19 @@ class JSObject: public JSReceiver { PropertyAttributes attributes, v8::AccessControl access_control = v8::DEFAULT); - MaybeObject* LookupAccessor(Name* name, AccessorComponent component); - + // Defines an AccessorInfo property on the given object. static Handle<Object> SetAccessor(Handle<JSObject> object, Handle<AccessorInfo> info); - // Used from Object::GetProperty(). - MUST_USE_RESULT MaybeObject* GetPropertyWithFailedAccessCheck( - Object* receiver, - LookupResult* result, - Name* name, - PropertyAttributes* attributes); - MUST_USE_RESULT MaybeObject* GetPropertyWithInterceptor( - Object* receiver, - Name* name, + static Handle<Object> GetPropertyWithInterceptor( + Handle<JSObject> object, + Handle<Object> receiver, + Handle<Name> name, PropertyAttributes* attributes); - MUST_USE_RESULT MaybeObject* GetPropertyPostInterceptor( - Object* receiver, - Name* name, + static Handle<Object> GetPropertyPostInterceptor( + Handle<JSObject> object, + Handle<Object> receiver, + Handle<Name> name, PropertyAttributes* attributes); MUST_USE_RESULT MaybeObject* GetLocalPropertyPostInterceptor( Object* receiver, @@ -2361,9 +2345,6 @@ class JSObject: public JSReceiver { return old_capacity + (old_capacity >> 1) + 16; } - PropertyType GetLocalPropertyType(Name* name); - PropertyType GetLocalElementType(uint32_t index); - // These methods do not perform access checks! AccessorPair* GetLocalPropertyAccessorPair(Name* name); AccessorPair* GetLocalElementAccessorPair(uint32_t index); @@ -2438,9 +2419,11 @@ class JSObject: public JSReceiver { inline bool HasIndexedInterceptor(); // Support functions for v8 api (needed for correct interceptor behavior). - bool HasRealNamedProperty(Isolate* isolate, Name* key); - bool HasRealElementProperty(Isolate* isolate, uint32_t index); - bool HasRealNamedCallbackProperty(Isolate* isolate, Name* key); + static bool HasRealNamedProperty(Handle<JSObject> object, + Handle<Name> key); + static bool HasRealElementProperty(Handle<JSObject> object, uint32_t index); + static bool HasRealNamedCallbackProperty(Handle<JSObject> object, + Handle<Name> key); // Get the header size for a JSObject. Used to compute the index of // internal fields as well as the number of internal fields. @@ -2456,8 +2439,6 @@ class JSObject: public JSReceiver { void LocalLookupRealNamedProperty(Name* name, LookupResult* result); void LookupRealNamedProperty(Name* name, LookupResult* result); void LookupRealNamedPropertyInPrototypes(Name* name, LookupResult* result); - MUST_USE_RESULT MaybeObject* SetElementWithCallbackSetterInPrototypes( - uint32_t index, Object* value, bool* found, StrictModeFlag strict_mode); void LookupCallbackProperty(Name* name, LookupResult* result); // Returns the number of properties on this object filtering out properties @@ -2483,32 +2464,6 @@ class JSObject: public JSReceiver { // Returns the number of enumerable elements. int GetEnumElementKeys(FixedArray* storage); - // Add a property to a fast-case object using a map transition to - // new_map. - MUST_USE_RESULT MaybeObject* AddFastPropertyUsingMap( - Map* new_map, - Name* name, - Object* value, - int field_index, - Representation representation); - - // Add a constant function property to a fast-case object. - // This leaves a CONSTANT_TRANSITION in the old map, and - // if it is called on a second object with this map, a - // normal property is added instead, with a map transition. - // This avoids the creation of many maps with the same constant - // function, all orphaned. - MUST_USE_RESULT MaybeObject* AddConstantProperty( - Name* name, - Object* constant, - PropertyAttributes attributes, - TransitionFlag flag); - - MUST_USE_RESULT MaybeObject* ReplaceSlowProperty( - Name* name, - Object* value, - PropertyAttributes attributes); - // Returns a new map with all transitions dropped from the object's current // map and the ElementsKind set. static Handle<Map> GetElementsTransitionMap(Handle<JSObject> object, @@ -2519,43 +2474,18 @@ class JSObject: public JSReceiver { MUST_USE_RESULT MaybeObject* GetElementsTransitionMapSlow( ElementsKind elements_kind); - static Handle<Object> TransitionElementsKind(Handle<JSObject> object, - ElementsKind to_kind); + static void TransitionElementsKind(Handle<JSObject> object, + ElementsKind to_kind); MUST_USE_RESULT MaybeObject* TransitionElementsKind(ElementsKind to_kind); MUST_USE_RESULT MaybeObject* UpdateAllocationSite(ElementsKind to_kind); - MUST_USE_RESULT MaybeObject* MigrateToMap(Map* new_map); - MUST_USE_RESULT MaybeObject* GeneralizeFieldRepresentation( - int modify_index, - Representation new_representation, - StoreMode store_mode); - - // Add a property to a fast-case object. - MUST_USE_RESULT MaybeObject* AddFastProperty( - Name* name, - Object* value, - PropertyAttributes attributes, - StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED, - ValueType value_type = OPTIMAL_REPRESENTATION, - TransitionFlag flag = INSERT_TRANSITION); - - // Add a property to a slow-case object. - MUST_USE_RESULT MaybeObject* AddSlowProperty(Name* name, - Object* value, - PropertyAttributes attributes); - - // Add a property to an object. May cause GC. - MUST_USE_RESULT MaybeObject* AddProperty( - Name* name, - Object* value, - PropertyAttributes attributes, - StrictModeFlag strict_mode, - StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED, - ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK, - ValueType value_type = OPTIMAL_REPRESENTATION, - StoreMode mode = ALLOW_AS_CONSTANT, - TransitionFlag flag = INSERT_TRANSITION); + // TODO(mstarzinger): Both public because of ConvertAnsSetLocalProperty(). + static void MigrateToMap(Handle<JSObject> object, Handle<Map> new_map); + static void GeneralizeFieldRepresentation(Handle<JSObject> object, + int modify_index, + Representation new_representation, + StoreMode store_mode); // Convert the object to use the canonical dictionary // representation. If the object is expected to have additional properties @@ -2565,10 +2495,6 @@ class JSObject: public JSReceiver { PropertyNormalizationMode mode, int expected_additional_properties); - MUST_USE_RESULT MaybeObject* NormalizeProperties( - PropertyNormalizationMode mode, - int expected_additional_properties); - // Convert and update the elements backing store to be a // SeededNumberDictionary dictionary. Returns the backing after conversion. static Handle<SeededNumberDictionary> NormalizeElements( @@ -2577,13 +2503,9 @@ class JSObject: public JSReceiver { MUST_USE_RESULT MaybeObject* NormalizeElements(); // Transform slow named properties to fast variants. - // Returns failure if allocation failed. static void TransformToFastProperties(Handle<JSObject> object, int unused_property_fields); - MUST_USE_RESULT MaybeObject* TransformToFastProperties( - int unused_property_fields); - // Access fast-case object properties at index. MUST_USE_RESULT inline MaybeObject* FastPropertyAt( Representation representation, @@ -2616,22 +2538,26 @@ class JSObject: public JSReceiver { // Check whether this object references another object bool ReferencesObject(Object* obj); - // Casting. - static inline JSObject* cast(Object* obj); - // Disalow further properties to be added to the object. static Handle<Object> PreventExtensions(Handle<JSObject> object); - MUST_USE_RESULT MaybeObject* PreventExtensions(); // ES5 Object.freeze - MUST_USE_RESULT MaybeObject* Freeze(Isolate* isolate); - + static Handle<Object> Freeze(Handle<JSObject> object); // Called the first time an object is observed with ES7 Object.observe. - MUST_USE_RESULT MaybeObject* SetObserved(Isolate* isolate); + static void SetObserved(Handle<JSObject> object); - // Copy object - MUST_USE_RESULT MaybeObject* DeepCopy(Isolate* isolate); + // Copy object. + static Handle<JSObject> Copy(Handle<JSObject> object, + Handle<AllocationSite> site); + static Handle<JSObject> Copy(Handle<JSObject> object); + static Handle<JSObject> DeepCopy(Handle<JSObject> object, + AllocationSiteContext* site_context); + static Handle<JSObject> DeepWalk(Handle<JSObject> object, + AllocationSiteContext* site_context); + + // Casting. + static inline JSObject* cast(Object* obj); // Dispatched behavior. void JSObjectShortPrint(StringStream* accumulator); @@ -2670,6 +2596,14 @@ class JSObject: public JSReceiver { void IncrementSpillStatistics(SpillInformation* info); #endif + +#ifdef VERIFY_HEAP + // If a GC was caused while constructing this object, the elements pointer + // may point to a one pointer filler map. The object won't be rooted, but + // our heap verification code could stumble across it. + bool ElementsAreSafeToExamine(); +#endif + Object* SlowReverseLookup(Object* value); // Maximal number of fast properties for the JSObject. Used to @@ -2733,15 +2667,15 @@ class JSObject: public JSReceiver { private: friend class DictionaryElementsAccessor; friend class JSReceiver; + friend class Object; - // TODO(mstarzinger): Soon to be handlified. - MUST_USE_RESULT MaybeObject* SetLocalPropertyIgnoreAttributes( - Name* key, - Object* value, - PropertyAttributes attributes, - ValueType value_type = OPTIMAL_REPRESENTATION, - StoreMode mode = ALLOW_AS_CONSTANT, - ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK); + // Used from Object::GetProperty(). + static Handle<Object> GetPropertyWithFailedAccessCheck( + Handle<JSObject> object, + Handle<Object> receiver, + LookupResult* result, + Handle<Name> name, + PropertyAttributes* attributes); MUST_USE_RESULT MaybeObject* GetElementWithCallback(Object* receiver, Object* structure, @@ -2755,11 +2689,12 @@ class JSObject: public JSReceiver { JSReceiver* receiver, uint32_t index, bool continue_search); - MUST_USE_RESULT MaybeObject* SetElementWithCallback( - Object* structure, + static Handle<Object> SetElementWithCallback( + Handle<JSObject> object, + Handle<Object> structure, uint32_t index, - Object* value, - JSObject* holder, + Handle<Object> value, + Handle<JSObject> holder, StrictModeFlag strict_mode); MUST_USE_RESULT MaybeObject* SetElementWithInterceptor( uint32_t index, @@ -2775,17 +2710,91 @@ class JSObject: public JSReceiver { StrictModeFlag strict_mode, bool check_prototype, SetPropertyMode set_mode); + MUST_USE_RESULT MaybeObject* SetElementWithCallbackSetterInPrototypes( + uint32_t index, + Object* value, + bool* found, + StrictModeFlag strict_mode); // Searches the prototype chain for property 'name'. If it is found and // has a setter, invoke it and set '*done' to true. If it is found and is // read-only, reject and set '*done' to true. Otherwise, set '*done' to - // false. Can cause GC and can return a failure result with '*done==true'. - MUST_USE_RESULT MaybeObject* SetPropertyViaPrototypes( - Name* name, - Object* value, + // false. Can throw and return an empty handle with '*done==true'. + static Handle<Object> SetPropertyViaPrototypes( + Handle<JSObject> object, + Handle<Name> name, + Handle<Object> value, PropertyAttributes attributes, StrictModeFlag strict_mode, bool* done); + static Handle<Object> SetPropertyPostInterceptor( + Handle<JSObject> object, + Handle<Name> name, + Handle<Object> value, + PropertyAttributes attributes, + StrictModeFlag strict_mode); + static Handle<Object> SetPropertyUsingTransition( + Handle<JSObject> object, + LookupResult* lookup, + Handle<Name> name, + Handle<Object> value, + PropertyAttributes attributes); + static Handle<Object> SetPropertyWithFailedAccessCheck( + Handle<JSObject> object, + LookupResult* result, + Handle<Name> name, + Handle<Object> value, + bool check_prototype, + StrictModeFlag strict_mode); + + // Add a property to an object. + static Handle<Object> AddProperty( + Handle<JSObject> object, + Handle<Name> name, + Handle<Object> value, + PropertyAttributes attributes, + StrictModeFlag strict_mode, + StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED, + ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK, + ValueType value_type = OPTIMAL_REPRESENTATION, + StoreMode mode = ALLOW_AS_CONSTANT, + TransitionFlag flag = INSERT_TRANSITION); + + // Add a constant function property to a fast-case object. + // This leaves a CONSTANT_TRANSITION in the old map, and + // if it is called on a second object with this map, a + // normal property is added instead, with a map transition. + // This avoids the creation of many maps with the same constant + // function, all orphaned. + static void AddConstantProperty(Handle<JSObject> object, + Handle<Name> name, + Handle<Object> constant, + PropertyAttributes attributes, + TransitionFlag flag); + + // Add a property to a fast-case object. + static void AddFastProperty(Handle<JSObject> object, + Handle<Name> name, + Handle<Object> value, + PropertyAttributes attributes, + StoreFromKeyed store_mode, + ValueType value_type, + TransitionFlag flag); + + // Add a property to a fast-case object using a map transition to + // new_map. + static void AddFastPropertyUsingMap(Handle<JSObject> object, + Handle<Map> new_map, + Handle<Name> name, + Handle<Object> value, + int field_index, + Representation representation); + + // Add a property to a slow-case object. + static void AddSlowProperty(Handle<JSObject> object, + Handle<Name> name, + Handle<Object> value, + PropertyAttributes attributes); static Handle<Object> DeleteProperty(Handle<JSObject> object, Handle<Name> name, @@ -2919,7 +2928,8 @@ class FixedArray: public FixedArrayBase { // Copy operations. MUST_USE_RESULT inline MaybeObject* Copy(); - MUST_USE_RESULT MaybeObject* CopySize(int new_length); + MUST_USE_RESULT MaybeObject* CopySize(int new_length, + PretenureFlag pretenure = NOT_TENURED); // Add the elements of a JSArray to this FixedArray. MUST_USE_RESULT MaybeObject* AddKeysFromJSArray(JSArray* array); @@ -3042,6 +3052,100 @@ class FixedDoubleArray: public FixedArrayBase { }; +// ConstantPoolArray describes a fixed-sized array containing constant pool +// entires. +// The format of the pool is: +// [0]: Field holding the first index which is a pointer entry +// [1]: Field holding the first index which is a int32 entry +// [2] ... [first_ptr_index() - 1]: 64 bit entries +// [first_ptr_index()] ... [first_int32_index() - 1]: pointer entries +// [first_int32_index()] ... [length - 1]: 32 bit entries +class ConstantPoolArray: public FixedArrayBase { + public: + // Getters for the field storing the first index for different type entries. + inline int first_ptr_index(); + inline int first_int64_index(); + inline int first_int32_index(); + + // Getters for counts of different type entries. + inline int count_of_ptr_entries(); + inline int count_of_int64_entries(); + inline int count_of_int32_entries(); + + // Setter and getter for pool elements. + inline Object* get_ptr_entry(int index); + inline int64_t get_int64_entry(int index); + inline int32_t get_int32_entry(int index); + inline double get_int64_entry_as_double(int index); + + inline void set(int index, Object* value); + inline void set(int index, int64_t value); + inline void set(int index, double value); + inline void set(int index, int32_t value); + + // Set up initial state. + inline void SetEntryCounts(int number_of_int64_entries, + int number_of_ptr_entries, + int number_of_int32_entries); + + // Copy operations + MUST_USE_RESULT inline MaybeObject* Copy(); + + // Garbage collection support. + inline static int SizeFor(int number_of_int64_entries, + int number_of_ptr_entries, + int number_of_int32_entries) { + return RoundUp(OffsetAt(number_of_int64_entries, + number_of_ptr_entries, + number_of_int32_entries), + kPointerSize); + } + + // Code Generation support. + inline int OffsetOfElementAt(int index) { + ASSERT(index < length()); + if (index >= first_int32_index()) { + return OffsetAt(count_of_int64_entries(), count_of_ptr_entries(), + index - first_int32_index()); + } else if (index >= first_ptr_index()) { + return OffsetAt(count_of_int64_entries(), index - first_ptr_index(), 0); + } else { + return OffsetAt(index, 0, 0); + } + } + + // Casting. + static inline ConstantPoolArray* cast(Object* obj); + + // Layout description. + static const int kFirstPointerIndexOffset = FixedArray::kHeaderSize; + static const int kFirstInt32IndexOffset = + kFirstPointerIndexOffset + kPointerSize; + static const int kFirstOffset = kFirstInt32IndexOffset + kPointerSize; + + // Dispatched behavior. + void ConstantPoolIterateBody(ObjectVisitor* v); + + DECLARE_PRINTER(ConstantPoolArray) + DECLARE_VERIFIER(ConstantPoolArray) + + private: + inline void set_first_ptr_index(int value); + inline void set_first_int32_index(int value); + + inline static int OffsetAt(int number_of_int64_entries, + int number_of_ptr_entries, + int number_of_int32_entries) { + return kFirstOffset + + (number_of_int64_entries * kInt64Size) + + (number_of_ptr_entries * kPointerSize) + + (number_of_int32_entries * kInt32Size); + } + + DISALLOW_IMPLICIT_CONSTRUCTORS(ConstantPoolArray); +}; + + // DescriptorArrays are fixed arrays used to hold instance descriptors. // The format of the these objects is: // [0]: Number of descriptors @@ -3175,6 +3279,13 @@ class DescriptorArray: public FixedArray { DescriptorArray* src, int src_index, const WhitenessWitness&); + static Handle<DescriptorArray> Merge(Handle<DescriptorArray> desc, + int verbatim, + int valid, + int new_size, + int modify_index, + StoreMode store_mode, + Handle<DescriptorArray> other); MUST_USE_RESULT MaybeObject* Merge(int verbatim, int valid, int new_size, @@ -3191,6 +3302,10 @@ class DescriptorArray: public FixedArray { return CopyUpToAddAttributes(enumeration_index, NONE); } + static Handle<DescriptorArray> CopyUpToAddAttributes( + Handle<DescriptorArray> desc, + int enumeration_index, + PropertyAttributes attributes); MUST_USE_RESULT MaybeObject* CopyUpToAddAttributes( int enumeration_index, PropertyAttributes attributes); @@ -3543,7 +3658,10 @@ class HashTable: public FixedArray { MUST_USE_RESULT MaybeObject* Shrink(Key key); // Ensure enough space for n additional elements. - MUST_USE_RESULT MaybeObject* EnsureCapacity(int n, Key key); + MUST_USE_RESULT MaybeObject* EnsureCapacity( + int n, + Key key, + PretenureFlag pretenure = NOT_TENURED); }; @@ -3982,6 +4100,58 @@ class ObjectHashTable: public HashTable<ObjectHashTableShape<2>, Object*> { }; +template <int entrysize> +class WeakHashTableShape : public BaseShape<Object*> { + public: + static inline bool IsMatch(Object* key, Object* other); + static inline uint32_t Hash(Object* key); + static inline uint32_t HashForObject(Object* key, Object* object); + MUST_USE_RESULT static inline MaybeObject* AsObject(Heap* heap, + Object* key); + static const int kPrefixSize = 0; + static const int kEntrySize = entrysize; +}; + + +// WeakHashTable maps keys that are arbitrary objects to object values. +// It is used for the global weak hash table that maps objects +// embedded in optimized code to dependent code lists. +class WeakHashTable: public HashTable<WeakHashTableShape<2>, Object*> { + public: + static inline WeakHashTable* cast(Object* obj) { + ASSERT(obj->IsHashTable()); + return reinterpret_cast<WeakHashTable*>(obj); + } + + // Looks up the value associated with the given key. The hole value is + // returned in case the key is not present. + Object* Lookup(Object* key); + + // Adds (or overwrites) the value associated with the given key. Mapping a + // key to the hole value causes removal of the whole entry. + MUST_USE_RESULT MaybeObject* Put(Object* key, Object* value); + + // This function is called when heap verification is turned on. + void Zap(Object* value) { + int capacity = Capacity(); + for (int i = 0; i < capacity; i++) { + set(EntryToIndex(i), value); + set(EntryToValueIndex(i), value); + } + } + + private: + friend class MarkCompactCollector; + + void AddEntry(int entry, Object* key, Object* value); + + // Returns the index to the value of an entry. + static inline int EntryToValueIndex(int entry) { + return EntryToIndex(entry) + 1; + } +}; + + // JSFunctionResultCache caches results of some JSFunction invocation. // It is a fixed array with fixed structure: // [0]: factory function @@ -4120,9 +4290,9 @@ class ScopeInfo : public FixedArray { // Copies all the context locals into an object used to materialize a scope. - bool CopyContextLocalsToScopeObject(Isolate* isolate, - Handle<Context> context, - Handle<JSObject> scope_object); + static bool CopyContextLocalsToScopeObject(Handle<ScopeInfo> scope_info, + Handle<Context> context, + Handle<JSObject> scope_object); static Handle<ScopeInfo> Create(Scope* scope, Zone* zone); @@ -4233,8 +4403,9 @@ class NormalizedMapCache: public FixedArray { public: static const int kEntries = 64; - MUST_USE_RESULT MaybeObject* Get(JSObject* object, - PropertyNormalizationMode mode); + static Handle<Map> Get(Handle<NormalizedMapCache> cache, + Handle<JSObject> object, + PropertyNormalizationMode mode); void Clear(); @@ -4772,6 +4943,7 @@ class Code: public HeapObject { V(FUNCTION) \ V(OPTIMIZED_FUNCTION) \ V(STUB) \ + V(HANDLER) \ V(BUILTIN) \ V(REGEXP) @@ -4811,19 +4983,16 @@ class Code: public HeapObject { CONSTANT, CALLBACKS, INTERCEPTOR, - MAP_TRANSITION, + TRANSITION, NONEXISTENT }; - enum StubHolder { - OWN_STUB, - PROTOTYPE_STUB - }; - typedef int ExtraICState; static const ExtraICState kNoExtraICState = 0; + static const int kPrologueOffsetNotSet = -1; + #ifdef ENABLE_DISASSEMBLER // Printing static const char* ICState2String(InlineCacheState state); @@ -4886,6 +5055,9 @@ class Code: public HeapObject { // [flags]: Access to specific code flags. inline Kind kind(); + inline Kind handler_kind() { + return static_cast<Kind>(arguments_count()); + } inline InlineCacheState ic_state(); // Only valid for IC stubs. inline ExtraICState extra_ic_state(); // Only valid for IC stubs. @@ -4895,7 +5067,8 @@ class Code: public HeapObject { // TODO(danno): This is a bit of a hack right now since there are still // clients of this API that pass "extra" values in for argc. These clients // should be retrofitted to used ExtendedExtraICState. - return kind == COMPARE_NIL_IC || kind == TO_BOOLEAN_IC; + return kind == COMPARE_NIL_IC || kind == TO_BOOLEAN_IC || + kind == BINARY_OP_IC; } inline StubType type(); // Only valid for monomorphic IC stubs. @@ -4904,6 +5077,7 @@ class Code: public HeapObject { // Testers for IC stub kinds. inline bool is_inline_cache_stub(); inline bool is_debug_stub(); + inline bool is_handler() { return kind() == HANDLER; } inline bool is_load_stub() { return kind() == LOAD_IC; } inline bool is_keyed_load_stub() { return kind() == KEYED_LOAD_IC; } inline bool is_store_stub() { return kind() == STORE_IC; } @@ -4914,6 +5088,7 @@ class Code: public HeapObject { inline bool is_compare_ic_stub() { return kind() == COMPARE_IC; } inline bool is_compare_nil_ic_stub() { return kind() == COMPARE_NIL_IC; } inline bool is_to_boolean_ic_stub() { return kind() == TO_BOOLEAN_IC; } + inline bool is_keyed_stub(); // [major_key]: For kind STUB or BINARY_OP_IC, the major key. inline int major_key(); @@ -4997,8 +5172,6 @@ class Code: public HeapObject { inline bool marked_for_deoptimization(); inline void set_marked_for_deoptimization(bool flag); - bool allowed_in_shared_map_code_cache(); - // Get the safepoint entry for the given pc. SafepointEntry GetSafepointEntry(Address pc); @@ -5011,9 +5184,12 @@ class Code: public HeapObject { void FindAllMaps(MapHandleList* maps); void ReplaceFirstMap(Map* replace); - // Find the first code in an IC stub. - Code* FindFirstCode(); - void FindAllCode(CodeHandleList* code_list, int length); + // Find the first handler in an IC stub. + Code* FindFirstHandler(); + + // Find |length| handlers and put them into |code_list|. Returns false if not + // enough handlers can be found. + bool FindHandlers(CodeHandleList* code_list, int length = -1); // Find the first name in an IC stub. Name* FindFirstName(); @@ -5024,8 +5200,6 @@ class Code: public HeapObject { class ExtraICStateKeyedAccessStoreMode: public BitField<KeyedAccessStoreMode, 1, 4> {}; // NOLINT - class ExtraICStateStubHolder: public BitField<StubHolder, 0, 1> {}; - static inline StrictModeFlag GetStrictMode(ExtraICState extra_ic_state) { return ExtraICStateStrictMode::decode(extra_ic_state); } @@ -5042,10 +5216,6 @@ class Code: public HeapObject { ExtraICStateStrictMode::encode(strict_mode); } - static inline ExtraICState ComputeExtraICState(StubHolder stub_holder) { - return ExtraICStateStubHolder::encode(stub_holder); - } - // Flags operations. static inline Flags ComputeFlags( Kind kind, @@ -5142,11 +5312,15 @@ class Code: public HeapObject { #define DECLARE_CODE_AGE_ENUM(X) k##X##CodeAge, enum Age { - kNoAge = 0, + kNotExecutedCodeAge = -2, + kExecutedOnceCodeAge = -1, + kNoAgeCodeAge = 0, CODE_AGE_LIST(DECLARE_CODE_AGE_ENUM) kAfterLastCodeAge, kLastCodeAge = kAfterLastCodeAge - 1, - kCodeAgeCount = kAfterLastCodeAge - 1 + kCodeAgeCount = kAfterLastCodeAge - 1, + kIsOldCodeAge = kSexagenarianCodeAge, + kPreAgedCodeAge = kIsOldCodeAge - 1 }; #undef DECLARE_CODE_AGE_ENUM @@ -5154,19 +5328,25 @@ class Code: public HeapObject { // being entered through the prologue. Used to determine when it is // relatively safe to flush this code object and replace it with the lazy // compilation stub. - static void MakeCodeAgeSequenceYoung(byte* sequence); + static void MakeCodeAgeSequenceYoung(byte* sequence, Isolate* isolate); + static void MarkCodeAsExecuted(byte* sequence, Isolate* isolate); void MakeOlder(MarkingParity); static bool IsYoungSequence(byte* sequence); bool IsOld(); - int GetAge(); + Age GetAge(); + static inline Code* GetPreAgedCodeAgeStub(Isolate* isolate) { + return GetCodeAgeStub(isolate, kNotExecutedCodeAge, NO_MARKING_PARITY); + } void PrintDeoptLocation(int bailout_id); bool CanDeoptAt(Address pc); #ifdef VERIFY_HEAP - void VerifyEmbeddedMapsDependency(); + void VerifyEmbeddedObjectsDependency(); #endif + static bool IsWeakEmbeddedObject(Kind kind, Object* object); + // Max loop nesting marker used to postpose OSR. We don't take loop // nesting that is deeper than 5 levels into account. static const int kMaxLoopNestingMarker = 6; @@ -5300,10 +5480,11 @@ class Code: public HeapObject { MarkingParity* parity); static void GetCodeAgeAndParity(byte* sequence, Age* age, MarkingParity* parity); - static Code* GetCodeAgeStub(Age age, MarkingParity parity); + static Code* GetCodeAgeStub(Isolate* isolate, Age age, MarkingParity parity); // Code aging -- platform-specific - static void PatchPlatformCodeAge(byte* sequence, Age age, + static void PatchPlatformCodeAge(Isolate* isolate, + byte* sequence, Age age, MarkingParity parity); DISALLOW_IMPLICIT_CONSTRUCTORS(Code); @@ -5591,6 +5772,12 @@ class Map: public HeapObject { Map* transitioned_map); inline void SetTransition(int transition_index, Map* target); inline Map* GetTransition(int transition_index); + + static Handle<TransitionArray> AddTransition(Handle<Map> map, + Handle<Name> key, + Handle<Map> target, + SimpleTransitionFlag flag); + MUST_USE_RESULT inline MaybeObject* AddTransition(Name* key, Map* target, SimpleTransitionFlag flag); @@ -5611,16 +5798,16 @@ class Map: public HeapObject { int target_number_of_fields, int target_inobject, int target_unused); + static Handle<Map> GeneralizeAllFieldRepresentations( + Handle<Map> map, + Representation new_representation); static Handle<Map> GeneralizeRepresentation( Handle<Map> map, int modify_index, Representation new_representation, StoreMode store_mode); - MUST_USE_RESULT MaybeObject* GeneralizeRepresentation( - int modify_index, - Representation representation, - StoreMode store_mode); - MUST_USE_RESULT MaybeObject* CopyGeneralizeAllRepresentations( + static Handle<Map> CopyGeneralizeAllRepresentations( + Handle<Map> map, int modify_index, StoreMode store_mode, PropertyAttributes attributes, @@ -5791,18 +5978,24 @@ class Map: public HeapObject { // descriptor array of the map. Returns NULL if no updated map is found. Map* CurrentMapForDeprecated(); + static Handle<Map> RawCopy(Handle<Map> map, int instance_size); MUST_USE_RESULT MaybeObject* RawCopy(int instance_size); MUST_USE_RESULT MaybeObject* CopyWithPreallocatedFieldDescriptors(); static Handle<Map> CopyDropDescriptors(Handle<Map> map); MUST_USE_RESULT MaybeObject* CopyDropDescriptors(); + static Handle<Map> CopyReplaceDescriptors(Handle<Map> map, + Handle<DescriptorArray> descriptors, + TransitionFlag flag, + Handle<Name> name); MUST_USE_RESULT MaybeObject* CopyReplaceDescriptors( DescriptorArray* descriptors, TransitionFlag flag, Name* name = NULL, SimpleTransitionFlag simple_flag = FULL_TRANSITION); - MUST_USE_RESULT MaybeObject* CopyInstallDescriptors( + static Handle<Map> CopyInstallDescriptors( + Handle<Map> map, int new_descriptor, - DescriptorArray* descriptors); + Handle<DescriptorArray> descriptors); MUST_USE_RESULT MaybeObject* ShareDescriptor(DescriptorArray* descriptors, Descriptor* descriptor); MUST_USE_RESULT MaybeObject* CopyAddDescriptor(Descriptor* descriptor, @@ -5818,13 +6011,12 @@ class Map: public HeapObject { MUST_USE_RESULT MaybeObject* CopyAsElementsKind(ElementsKind kind, TransitionFlag flag); - MUST_USE_RESULT MaybeObject* CopyForObserved(); + + static Handle<Map> CopyForObserved(Handle<Map> map); static Handle<Map> CopyNormalized(Handle<Map> map, PropertyNormalizationMode mode, NormalizedMapSharingMode sharing); - MUST_USE_RESULT MaybeObject* CopyNormalized(PropertyNormalizationMode mode, - NormalizedMapSharingMode sharing); inline void AppendDescriptor(Descriptor* desc, const DescriptorArray::WhitenessWitness&); @@ -6221,9 +6413,6 @@ class Script: public Struct { // // Installation of ids for the selected builtin functions is handled // by the bootstrapper. -// -// NOTE: Order is important: math functions should be at the end of -// the list and MathFloor should be the first math function. #define FUNCTIONS_WITH_ID_LIST(V) \ V(Array.prototype, push, ArrayPush) \ V(Array.prototype, pop, ArrayPop) \ @@ -6258,8 +6447,7 @@ enum BuiltinFunctionId { #undef DECLARE_FUNCTION_ID // Fake id for a special case of Math.pow. Note, it continues the // list of math functions. - kMathPowHalf, - kFirstMathFunctionId = kMathFloor + kMathPowHalf }; @@ -7841,14 +8029,24 @@ class AllocationSite: public Struct { static const uint32_t kMaximumArrayBytesToPretransition = 8 * 1024; DECL_ACCESSORS(transition_info, Object) + // nested_site threads a list of sites that represent nested literals + // walked in a particular order. So [[1, 2], 1, 2] will have one + // nested_site, but [[1, 2], 3, [4]] will have a list of two. + DECL_ACCESSORS(nested_site, Object) + DECL_ACCESSORS(dependent_code, DependentCode) DECL_ACCESSORS(weak_next, Object) - void Initialize() { - SetElementsKind(GetInitialFastElementsKind()); + inline void Initialize(); + + bool HasNestedSites() { + return nested_site()->IsAllocationSite(); } + // This method is expensive, it should only be called for reporting. + bool IsNestedSite(); + ElementsKind GetElementsKind() { - ASSERT(!IsLiteralSite()); + ASSERT(!SitePointsToLiteral()); return static_cast<ElementsKind>(Smi::cast(transition_info())->value()); } @@ -7856,11 +8054,11 @@ class AllocationSite: public Struct { set_transition_info(Smi::FromInt(static_cast<int>(kind))); } - bool IsLiteralSite() { + bool SitePointsToLiteral() { // If transition_info is a smi, then it represents an ElementsKind // for a constructed array. Otherwise, it must be a boilerplate - // for an array literal - return transition_info()->IsJSArray(); + // for an object or array literal. + return transition_info()->IsJSArray() || transition_info()->IsJSObject(); } DECLARE_PRINTER(AllocationSite) @@ -7873,11 +8071,13 @@ class AllocationSite: public Struct { static inline bool CanTrack(InstanceType type); static const int kTransitionInfoOffset = HeapObject::kHeaderSize; - static const int kWeakNextOffset = kTransitionInfoOffset + kPointerSize; + static const int kNestedSiteOffset = kTransitionInfoOffset + kPointerSize; + static const int kDependentCodeOffset = kNestedSiteOffset + kPointerSize; + static const int kWeakNextOffset = kDependentCodeOffset + kPointerSize; static const int kSize = kWeakNextOffset + kPointerSize; typedef FixedBodyDescriptor<HeapObject::kHeaderSize, - kTransitionInfoOffset + kPointerSize, + kDependentCodeOffset + kPointerSize, kSize> BodyDescriptor; private: @@ -7902,7 +8102,8 @@ class AllocationMemento: public Struct { DECLARE_VERIFIER(AllocationMemento) // Returns NULL if no AllocationMemento is available for object. - static AllocationMemento* FindForJSObject(JSObject* object); + static AllocationMemento* FindForJSObject(JSObject* object, + bool in_GC = false); static inline AllocationMemento* cast(Object* obj); private: @@ -9018,9 +9219,17 @@ class PropertyCell: public Cell { // of the cell's current type and the value's type. If the change causes // a change of the type of the cell's contents, code dependent on the cell // will be deoptimized. - MUST_USE_RESULT MaybeObject* SetValueInferType( - Object* value, - WriteBarrierMode mode = UPDATE_WRITE_BARRIER); + static void SetValueInferType(Handle<PropertyCell> cell, + Handle<Object> value); + + // Computes the new type of the cell's contents for the given value, but + // without actually modifying the 'type' field. + static Handle<Type> UpdatedType(Handle<PropertyCell> cell, + Handle<Object> value); + + void AddDependentCompilationInfo(CompilationInfo* info); + + void AddDependentCode(Handle<Code> code); // Casting. static inline PropertyCell* cast(Object* obj); @@ -9045,13 +9254,6 @@ class PropertyCell: public Cell { kSize, kSize> BodyDescriptor; - void AddDependentCompilationInfo(CompilationInfo* info); - - void AddDependentCode(Handle<Code> code); - - static Type* UpdateType(Handle<PropertyCell> cell, - Handle<Object> value); - private: DECL_ACCESSORS(type_raw, Object) DISALLOW_IMPLICIT_CONSTRUCTORS(PropertyCell); @@ -9070,9 +9272,6 @@ class JSProxy: public JSReceiver { // Casting. static inline JSProxy* cast(Object* obj); - bool HasPropertyWithHandler(Name* name); - bool HasElementWithHandler(uint32_t index); - MUST_USE_RESULT MaybeObject* GetPropertyWithHandler( Object* receiver, Name* name); @@ -9080,21 +9279,15 @@ class JSProxy: public JSReceiver { Object* receiver, uint32_t index); - MUST_USE_RESULT MaybeObject* SetPropertyWithHandler( - JSReceiver* receiver, - Name* name, - Object* value, - PropertyAttributes attributes, - StrictModeFlag strict_mode); - // If the handler defines an accessor property with a setter, invoke it. // If it defines an accessor property without a setter, or a data property // that is read-only, throw. In all these cases set '*done' to true, // otherwise set it to false. - MUST_USE_RESULT MaybeObject* SetPropertyViaPrototypesWithHandler( - JSReceiver* receiver, - Name* name, - Object* value, + static Handle<Object> SetPropertyViaPrototypesWithHandler( + Handle<JSProxy> proxy, + Handle<JSReceiver> receiver, + Handle<Name> name, + Handle<Object> value, PropertyAttributes attributes, StrictModeFlag strict_mode, bool* done); @@ -9142,12 +9335,21 @@ class JSProxy: public JSReceiver { private: friend class JSReceiver; + static Handle<Object> SetPropertyWithHandler(Handle<JSProxy> proxy, + Handle<JSReceiver> receiver, + Handle<Name> name, + Handle<Object> value, + PropertyAttributes attributes, + StrictModeFlag strict_mode); static Handle<Object> SetElementWithHandler(Handle<JSProxy> proxy, Handle<JSReceiver> receiver, uint32_t index, Handle<Object> value, StrictModeFlag strict_mode); + static bool HasPropertyWithHandler(Handle<JSProxy> proxy, Handle<Name> name); + static bool HasElementWithHandler(Handle<JSProxy> proxy, uint32_t index); + static Handle<Object> DeletePropertyWithHandler(Handle<JSProxy> proxy, Handle<Name> name, DeleteMode mode); @@ -10157,6 +10359,9 @@ class ObjectVisitor BASE_EMBEDDED { // [start, end). Any or all of the values may be modified on return. virtual void VisitPointers(Object** start, Object** end) = 0; + // Handy shorthand for visiting a single pointer. + virtual void VisitPointer(Object** p) { VisitPointers(p, p + 1); } + // To allow lazy clearing of inline caches the visitor has // a rich interface for iterating over Code objects.. @@ -10185,22 +10390,14 @@ class ObjectVisitor BASE_EMBEDDED { // about the code's age. virtual void VisitCodeAgeSequence(RelocInfo* rinfo); - // Handy shorthand for visiting a single pointer. - virtual void VisitPointer(Object** p) { VisitPointers(p, p + 1); } - // Visit pointer embedded into a code object. virtual void VisitEmbeddedPointer(RelocInfo* rinfo); - // Visits a contiguous arrays of external references (references to the C++ - // heap) in the half-open range [start, end). Any or all of the values - // may be modified on return. - virtual void VisitExternalReferences(Address* start, Address* end) {} - + // Visits an external reference embedded into a code object. virtual void VisitExternalReference(RelocInfo* rinfo); - inline void VisitExternalReference(Address* p) { - VisitExternalReferences(p, p + 1); - } + // Visits an external reference. The value may be modified on return. + virtual void VisitExternalReference(Address* p) {} // Visits a handle that has an embedder-assigned class ID. virtual void VisitEmbedderReference(Object** p, uint16_t class_id) {} diff --git a/deps/v8/src/optimizing-compiler-thread.cc b/deps/v8/src/optimizing-compiler-thread.cc index 085143d998..e9c0254522 100644 --- a/deps/v8/src/optimizing-compiler-thread.cc +++ b/deps/v8/src/optimizing-compiler-thread.cc @@ -29,6 +29,7 @@ #include "v8.h" +#include "full-codegen.h" #include "hydrogen.h" #include "isolate.h" #include "v8threads.h" @@ -36,6 +37,19 @@ namespace v8 { namespace internal { +OptimizingCompilerThread::~OptimizingCompilerThread() { + ASSERT_EQ(0, input_queue_length_); + DeleteArray(input_queue_); + if (FLAG_concurrent_osr) { +#ifdef DEBUG + for (int i = 0; i < osr_buffer_capacity_; i++) { + CHECK_EQ(NULL, osr_buffer_[i]); + } +#endif + DeleteArray(osr_buffer_); + } +} + void OptimizingCompilerThread::Run() { #ifdef DEBUG @@ -74,7 +88,6 @@ void OptimizingCompilerThread::Run() { { AllowHandleDereference allow_handle_dereference; FlushInputQueue(true); } - Release_Store(&queue_length_, static_cast<AtomicWord>(0)); Release_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE)); stop_semaphore_.Signal(); // Return to start of consumer loop. @@ -93,99 +106,125 @@ void OptimizingCompilerThread::Run() { } +RecompileJob* OptimizingCompilerThread::NextInput() { + LockGuard<Mutex> access_input_queue_(&input_queue_mutex_); + if (input_queue_length_ == 0) return NULL; + RecompileJob* job = input_queue_[InputQueueIndex(0)]; + ASSERT_NE(NULL, job); + input_queue_shift_ = InputQueueIndex(1); + input_queue_length_--; + return job; +} + + void OptimizingCompilerThread::CompileNext() { - OptimizingCompiler* optimizing_compiler = NULL; - bool result = input_queue_.Dequeue(&optimizing_compiler); - USE(result); - ASSERT(result); - Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1)); + RecompileJob* job = NextInput(); + ASSERT_NE(NULL, job); // The function may have already been optimized by OSR. Simply continue. - OptimizingCompiler::Status status = optimizing_compiler->OptimizeGraph(); + RecompileJob::Status status = job->OptimizeGraph(); USE(status); // Prevent an unused-variable error in release mode. - ASSERT(status != OptimizingCompiler::FAILED); + ASSERT(status != RecompileJob::FAILED); // The function may have already been optimized by OSR. Simply continue. // Use a mutex to make sure that functions marked for install // are always also queued. - if (!optimizing_compiler->info()->osr_ast_id().IsNone()) { - ASSERT(FLAG_concurrent_osr); - LockGuard<Mutex> access_osr_lists(&osr_list_mutex_); - osr_candidates_.RemoveElement(optimizing_compiler); - ready_for_osr_.Add(optimizing_compiler); - } else { - output_queue_.Enqueue(optimizing_compiler); - isolate_->stack_guard()->RequestInstallCode(); + output_queue_.Enqueue(job); + isolate_->stack_guard()->RequestInstallCode(); +} + + +static void DisposeRecompileJob(RecompileJob* job, + bool restore_function_code) { + // The recompile job is allocated in the CompilationInfo's zone. + CompilationInfo* info = job->info(); + if (restore_function_code) { + if (info->is_osr()) { + if (!job->IsWaitingForInstall()) BackEdgeTable::RemoveStackCheck(info); + } else { + Handle<JSFunction> function = info->closure(); + function->ReplaceCode(function->shared()->code()); + } } + delete info; } void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) { - OptimizingCompiler* optimizing_compiler; - // The optimizing compiler is allocated in the CompilationInfo's zone. - while (input_queue_.Dequeue(&optimizing_compiler)) { + RecompileJob* job; + while ((job = NextInput())) { // This should not block, since we have one signal on the input queue // semaphore corresponding to each element in the input queue. input_queue_semaphore_.Wait(); - CompilationInfo* info = optimizing_compiler->info(); - if (restore_function_code) { - Handle<JSFunction> function = info->closure(); - function->ReplaceCode(function->shared()->code()); + // OSR jobs are dealt with separately. + if (!job->info()->is_osr()) { + DisposeRecompileJob(job, restore_function_code); } - delete info; } } void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) { - OptimizingCompiler* optimizing_compiler; - // The optimizing compiler is allocated in the CompilationInfo's zone. - while (output_queue_.Dequeue(&optimizing_compiler)) { - CompilationInfo* info = optimizing_compiler->info(); - if (restore_function_code) { - Handle<JSFunction> function = info->closure(); - function->ReplaceCode(function->shared()->code()); + RecompileJob* job; + while (output_queue_.Dequeue(&job)) { + // OSR jobs are dealt with separately. + if (!job->info()->is_osr()) { + DisposeRecompileJob(job, restore_function_code); } - delete info; } +} + - osr_candidates_.Clear(); - RemoveStaleOSRCandidates(0); +void OptimizingCompilerThread::FlushOsrBuffer(bool restore_function_code) { + for (int i = 0; i < osr_buffer_capacity_; i++) { + if (osr_buffer_[i] != NULL) { + DisposeRecompileJob(osr_buffer_[i], restore_function_code); + osr_buffer_[i] = NULL; + } + } } void OptimizingCompilerThread::Flush() { ASSERT(!IsOptimizerThread()); Release_Store(&stop_thread_, static_cast<AtomicWord>(FLUSH)); + if (FLAG_block_concurrent_recompilation) Unblock(); input_queue_semaphore_.Signal(); stop_semaphore_.Wait(); FlushOutputQueue(true); + if (FLAG_concurrent_osr) FlushOsrBuffer(true); + if (FLAG_trace_concurrent_recompilation) { + PrintF(" ** Flushed concurrent recompilation queues.\n"); + } } void OptimizingCompilerThread::Stop() { ASSERT(!IsOptimizerThread()); Release_Store(&stop_thread_, static_cast<AtomicWord>(STOP)); + if (FLAG_block_concurrent_recompilation) Unblock(); input_queue_semaphore_.Signal(); stop_semaphore_.Wait(); if (FLAG_concurrent_recompilation_delay != 0) { - // Barrier when loading queue length is not necessary since the write - // happens in CompileNext on the same thread. - // This is used only for testing. - while (NoBarrier_Load(&queue_length_) > 0) CompileNext(); + // At this point the optimizing compiler thread's event loop has stopped. + // There is no need for a mutex when reading input_queue_length_. + while (input_queue_length_ > 0) CompileNext(); InstallOptimizedFunctions(); } else { FlushInputQueue(false); FlushOutputQueue(false); } + if (FLAG_concurrent_osr) FlushOsrBuffer(false); + if (FLAG_trace_concurrent_recompilation) { double percentage = time_spent_compiling_.PercentOf(time_spent_total_); PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage); } - if (FLAG_trace_osr && FLAG_concurrent_osr) { + if ((FLAG_trace_osr || FLAG_trace_concurrent_recompilation) && + FLAG_concurrent_osr) { PrintF("[COSR hit rate %d / %d]\n", osr_hits_, osr_attempts_); } @@ -196,60 +235,96 @@ void OptimizingCompilerThread::Stop() { void OptimizingCompilerThread::InstallOptimizedFunctions() { ASSERT(!IsOptimizerThread()); HandleScope handle_scope(isolate_); - OptimizingCompiler* compiler; - while (true) { - if (!output_queue_.Dequeue(&compiler)) return; - Compiler::InstallOptimizedCode(compiler); - } - // Remove the oldest OSR candidates that are ready so that we - // only have limited number of them waiting. - if (FLAG_concurrent_osr) RemoveStaleOSRCandidates(); + RecompileJob* job; + while (output_queue_.Dequeue(&job)) { + CompilationInfo* info = job->info(); + if (info->is_osr()) { + if (FLAG_trace_osr) { + PrintF("[COSR - "); + info->closure()->PrintName(); + PrintF(" is ready for install and entry at AST id %d]\n", + info->osr_ast_id().ToInt()); + } + job->WaitForInstall(); + BackEdgeTable::RemoveStackCheck(info); + } else { + Compiler::InstallOptimizedCode(job); + } + } } -void OptimizingCompilerThread::QueueForOptimization( - OptimizingCompiler* optimizing_compiler) { +void OptimizingCompilerThread::QueueForOptimization(RecompileJob* job) { ASSERT(IsQueueAvailable()); ASSERT(!IsOptimizerThread()); - Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(1)); - if (optimizing_compiler->info()->osr_ast_id().IsNone()) { - optimizing_compiler->info()->closure()->MarkInRecompileQueue(); - } else { - LockGuard<Mutex> access_osr_lists(&osr_list_mutex_); - osr_candidates_.Add(optimizing_compiler); + CompilationInfo* info = job->info(); + if (info->is_osr()) { + if (FLAG_trace_concurrent_recompilation) { + PrintF(" ** Queueing "); + info->closure()->PrintName(); + PrintF(" for concurrent on-stack replacement.\n"); + } osr_attempts_++; + BackEdgeTable::AddStackCheck(info); + AddToOsrBuffer(job); + // Add job to the front of the input queue. + LockGuard<Mutex> access_input_queue(&input_queue_mutex_); + ASSERT_LT(input_queue_length_, input_queue_capacity_); + // Move shift_ back by one. + input_queue_shift_ = InputQueueIndex(input_queue_capacity_ - 1); + input_queue_[InputQueueIndex(0)] = job; + input_queue_length_++; + } else { + info->closure()->MarkInRecompileQueue(); + // Add job to the back of the input queue. + LockGuard<Mutex> access_input_queue(&input_queue_mutex_); + ASSERT_LT(input_queue_length_, input_queue_capacity_); + input_queue_[InputQueueIndex(input_queue_length_)] = job; + input_queue_length_++; + } + if (FLAG_block_concurrent_recompilation) { + blocked_jobs_++; + } else { + input_queue_semaphore_.Signal(); + } +} + + +void OptimizingCompilerThread::Unblock() { + ASSERT(!IsOptimizerThread()); + while (blocked_jobs_ > 0) { + input_queue_semaphore_.Signal(); + blocked_jobs_--; } - input_queue_.Enqueue(optimizing_compiler); - input_queue_semaphore_.Signal(); } -OptimizingCompiler* OptimizingCompilerThread::FindReadyOSRCandidate( +RecompileJob* OptimizingCompilerThread::FindReadyOSRCandidate( Handle<JSFunction> function, uint32_t osr_pc_offset) { ASSERT(!IsOptimizerThread()); - OptimizingCompiler* result = NULL; - { LockGuard<Mutex> access_osr_lists(&osr_list_mutex_); - for (int i = 0; i < ready_for_osr_.length(); i++) { - if (ready_for_osr_[i]->info()->HasSameOsrEntry(function, osr_pc_offset)) { - osr_hits_++; - result = ready_for_osr_.Remove(i); - break; - } + for (int i = 0; i < osr_buffer_capacity_; i++) { + RecompileJob* current = osr_buffer_[i]; + if (current != NULL && + current->IsWaitingForInstall() && + current->info()->HasSameOsrEntry(function, osr_pc_offset)) { + osr_hits_++; + osr_buffer_[i] = NULL; + return current; } } - RemoveStaleOSRCandidates(); - return result; + return NULL; } bool OptimizingCompilerThread::IsQueuedForOSR(Handle<JSFunction> function, uint32_t osr_pc_offset) { ASSERT(!IsOptimizerThread()); - LockGuard<Mutex> access_osr_lists(&osr_list_mutex_); - for (int i = 0; i < osr_candidates_.length(); i++) { - if (osr_candidates_[i]->info()->HasSameOsrEntry(function, osr_pc_offset)) { - return true; + for (int i = 0; i < osr_buffer_capacity_; i++) { + RecompileJob* current = osr_buffer_[i]; + if (current != NULL && + current->info()->HasSameOsrEntry(function, osr_pc_offset)) { + return !current->IsWaitingForInstall(); } } return false; @@ -258,30 +333,39 @@ bool OptimizingCompilerThread::IsQueuedForOSR(Handle<JSFunction> function, bool OptimizingCompilerThread::IsQueuedForOSR(JSFunction* function) { ASSERT(!IsOptimizerThread()); - LockGuard<Mutex> access_osr_lists(&osr_list_mutex_); - for (int i = 0; i < osr_candidates_.length(); i++) { - if (*osr_candidates_[i]->info()->closure() == function) { - return true; + for (int i = 0; i < osr_buffer_capacity_; i++) { + RecompileJob* current = osr_buffer_[i]; + if (current != NULL && *current->info()->closure() == function) { + return !current->IsWaitingForInstall(); } } return false; } -void OptimizingCompilerThread::RemoveStaleOSRCandidates(int limit) { +void OptimizingCompilerThread::AddToOsrBuffer(RecompileJob* job) { ASSERT(!IsOptimizerThread()); - LockGuard<Mutex> access_osr_lists(&osr_list_mutex_); - while (ready_for_osr_.length() > limit) { - OptimizingCompiler* compiler = ready_for_osr_.Remove(0); - CompilationInfo* throw_away = compiler->info(); + // Find the next slot that is empty or has a stale job. + while (true) { + RecompileJob* stale = osr_buffer_[osr_buffer_cursor_]; + if (stale == NULL || stale->IsWaitingForInstall()) break; + osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_; + } + + // Add to found slot and dispose the evicted job. + RecompileJob* evicted = osr_buffer_[osr_buffer_cursor_]; + if (evicted != NULL) { + ASSERT(evicted->IsWaitingForInstall()); + CompilationInfo* info = evicted->info(); if (FLAG_trace_osr) { PrintF("[COSR - Discarded "); - throw_away->closure()->PrintName(); - PrintF(", AST id %d]\n", - throw_away->osr_ast_id().ToInt()); + info->closure()->PrintName(); + PrintF(", AST id %d]\n", info->osr_ast_id().ToInt()); } - delete throw_away; + DisposeRecompileJob(evicted, false); } + osr_buffer_[osr_buffer_cursor_] = job; + osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_; } diff --git a/deps/v8/src/optimizing-compiler-thread.h b/deps/v8/src/optimizing-compiler-thread.h index d1ed6a2c59..754aecebf5 100644 --- a/deps/v8/src/optimizing-compiler-thread.h +++ b/deps/v8/src/optimizing-compiler-thread.h @@ -40,7 +40,7 @@ namespace v8 { namespace internal { class HOptimizedGraphBuilder; -class OptimizingCompiler; +class RecompileJob; class SharedFunctionInfo; class OptimizingCompilerThread : public Thread { @@ -53,38 +53,47 @@ class OptimizingCompilerThread : public Thread { isolate_(isolate), stop_semaphore_(0), input_queue_semaphore_(0), - osr_candidates_(2), - ready_for_osr_(2), + input_queue_capacity_(FLAG_concurrent_recompilation_queue_length), + input_queue_length_(0), + input_queue_shift_(0), + osr_buffer_capacity_(FLAG_concurrent_recompilation_queue_length + 4), + osr_buffer_cursor_(0), osr_hits_(0), - osr_attempts_(0) { + osr_attempts_(0), + blocked_jobs_(0) { NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE)); - NoBarrier_Store(&queue_length_, static_cast<AtomicWord>(0)); + input_queue_ = NewArray<RecompileJob*>(input_queue_capacity_); + if (FLAG_concurrent_osr) { + // Allocate and mark OSR buffer slots as empty. + osr_buffer_ = NewArray<RecompileJob*>(osr_buffer_capacity_); + for (int i = 0; i < osr_buffer_capacity_; i++) osr_buffer_[i] = NULL; + } } - ~OptimizingCompilerThread() {} + + ~OptimizingCompilerThread(); void Run(); void Stop(); void Flush(); - void QueueForOptimization(OptimizingCompiler* optimizing_compiler); + void QueueForOptimization(RecompileJob* optimizing_compiler); + void Unblock(); void InstallOptimizedFunctions(); - OptimizingCompiler* FindReadyOSRCandidate(Handle<JSFunction> function, - uint32_t osr_pc_offset); + RecompileJob* FindReadyOSRCandidate(Handle<JSFunction> function, + uint32_t osr_pc_offset); bool IsQueuedForOSR(Handle<JSFunction> function, uint32_t osr_pc_offset); bool IsQueuedForOSR(JSFunction* function); inline bool IsQueueAvailable() { - // We don't need a barrier since we have a data dependency right - // after. - Atomic32 current_length = NoBarrier_Load(&queue_length_); - - // This can be queried only from the execution thread. - ASSERT(!IsOptimizerThread()); - // Since only the execution thread increments queue_length_ and - // only one thread can run inside an Isolate at one time, a direct - // doesn't introduce a race -- queue_length_ may decreased in - // meantime, but not increased. - return (current_length < FLAG_concurrent_recompilation_queue_length); + LockGuard<Mutex> access_input_queue(&input_queue_mutex_); + return input_queue_length_ < input_queue_capacity_; + } + + inline void AgeBufferedOsrJobs() { + // Advance cursor of the cyclic buffer to next empty slot or stale OSR job. + // Dispose said OSR job in the latter case. Calling this on every GC + // should make sure that we do not hold onto stale jobs indefinitely. + AddToOsrBuffer(NULL); } #ifdef DEBUG @@ -94,13 +103,22 @@ class OptimizingCompilerThread : public Thread { private: enum StopFlag { CONTINUE, STOP, FLUSH }; - // Remove the oldest OSR candidates that are ready so that we - // only have |limit| left waiting. - void RemoveStaleOSRCandidates(int limit = kReadyForOSRLimit); - void FlushInputQueue(bool restore_function_code); void FlushOutputQueue(bool restore_function_code); + void FlushOsrBuffer(bool restore_function_code); void CompileNext(); + RecompileJob* NextInput(); + + // Add a recompilation task for OSR to the cyclic buffer, awaiting OSR entry. + // Tasks evicted from the cyclic buffer are discarded. + void AddToOsrBuffer(RecompileJob* compiler); + + inline int InputQueueIndex(int i) { + int result = (i + input_queue_shift_) % input_queue_capacity_; + ASSERT_LE(0, result); + ASSERT_LT(result, input_queue_capacity_); + return result; + } #ifdef DEBUG int thread_id_; @@ -111,25 +129,29 @@ class OptimizingCompilerThread : public Thread { Semaphore stop_semaphore_; Semaphore input_queue_semaphore_; - // Queue of incoming recompilation tasks (including OSR). - UnboundQueue<OptimizingCompiler*> input_queue_; + // Circular queue of incoming recompilation tasks (including OSR). + RecompileJob** input_queue_; + int input_queue_capacity_; + int input_queue_length_; + int input_queue_shift_; + Mutex input_queue_mutex_; + // Queue of recompilation tasks ready to be installed (excluding OSR). - UnboundQueue<OptimizingCompiler*> output_queue_; - // List of all OSR related recompilation tasks (both incoming and ready ones). - List<OptimizingCompiler*> osr_candidates_; - // List of recompilation tasks ready for OSR. - List<OptimizingCompiler*> ready_for_osr_; + UnboundQueue<RecompileJob*> output_queue_; + + // Cyclic buffer of recompilation tasks for OSR. + RecompileJob** osr_buffer_; + int osr_buffer_capacity_; + int osr_buffer_cursor_; volatile AtomicWord stop_thread_; - volatile Atomic32 queue_length_; TimeDelta time_spent_compiling_; TimeDelta time_spent_total_; - Mutex osr_list_mutex_; int osr_hits_; int osr_attempts_; - static const int kReadyForOSRLimit = 4; + int blocked_jobs_; }; } } // namespace v8::internal diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc index 05ae11e429..d84649d86b 100644 --- a/deps/v8/src/parser.cc +++ b/deps/v8/src/parser.cc @@ -536,7 +536,8 @@ Parser::FunctionState::~FunctionState() { // Implementation of Parser Parser::Parser(CompilationInfo* info) - : isolate_(info->isolate()), + : ParserBase(&scanner_, info->isolate()->stack_guard()->real_climit()), + isolate_(info->isolate()), symbol_cache_(0, info->zone()), script_(info->script()), scanner_(isolate_->unicode_cache()), @@ -548,11 +549,6 @@ Parser::Parser(CompilationInfo* info) extension_(info->extension()), pre_parse_data_(NULL), fni_(NULL), - allow_natives_syntax_(false), - allow_lazy_(false), - allow_generators_(false), - allow_for_of_(false), - stack_overflow_(false), parenthesized_function_(false), zone_(info->zone()), info_(info) { @@ -569,7 +565,9 @@ Parser::Parser(CompilationInfo* info) FunctionLiteral* Parser::ParseProgram() { - HistogramTimerScope timer_scope(isolate()->counters()->parse()); + // TODO(bmeurer): We temporarily need to pass allow_nesting = true here, + // see comment for HistogramTimerScope class. + HistogramTimerScope timer_scope(isolate()->counters()->parse(), true); Handle<String> source(String::cast(script_->source())); isolate()->counters()->total_parse_size()->Increment(source->length()); ElapsedTimer timer; @@ -652,10 +650,10 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info, top_scope_->SetLanguageMode(info->language_mode()); ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16, zone()); bool ok = true; - int beg_loc = scanner().location().beg_pos; + int beg_pos = scanner().location().beg_pos; ParseSourceElements(body, Token::EOS, info->is_eval(), true, &ok); if (ok && !top_scope_->is_classic_mode()) { - CheckOctalLiteral(beg_loc, scanner().location().end_pos, &ok); + CheckOctalLiteral(beg_pos, scanner().location().end_pos, &ok); } if (ok && is_extended_mode()) { @@ -685,11 +683,12 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info, FunctionLiteral::ANONYMOUS_EXPRESSION, FunctionLiteral::kGlobalOrEval, FunctionLiteral::kNotParenthesized, - FunctionLiteral::kNotGenerator); + FunctionLiteral::kNotGenerator, + 0); result->set_ast_properties(factory()->visitor()->ast_properties()); result->set_dont_optimize_reason( factory()->visitor()->dont_optimize_reason()); - } else if (stack_overflow_) { + } else if (stack_overflow()) { isolate()->StackOverflow(); } } @@ -786,7 +785,7 @@ FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source) { ASSERT(target_stack_ == NULL); if (result == NULL) { - if (stack_overflow_) isolate()->StackOverflow(); + if (stack_overflow()) isolate()->StackOverflow(); } else { Handle<String> inferred_name(shared_info->inferred_name()); result->set_inferred_name(inferred_name); @@ -984,6 +983,7 @@ Statement* Parser::ParseModuleDeclaration(ZoneStringList* names, bool* ok) { // ModuleDeclaration: // 'module' Identifier Module + int pos = peek_position(); Handle<String> name = ParseIdentifier(CHECK_OK); #ifdef DEBUG @@ -994,7 +994,7 @@ Statement* Parser::ParseModuleDeclaration(ZoneStringList* names, bool* ok) { Module* module = ParseModule(CHECK_OK); VariableProxy* proxy = NewUnresolved(name, MODULE, module->interface()); Declaration* declaration = - factory()->NewModuleDeclaration(proxy, module, top_scope_); + factory()->NewModuleDeclaration(proxy, module, top_scope_, pos); Declare(declaration, true, CHECK_OK); #ifdef DEBUG @@ -1009,9 +1009,9 @@ Statement* Parser::ParseModuleDeclaration(ZoneStringList* names, bool* ok) { if (names) names->Add(name, zone()); if (module->body() == NULL) - return factory()->NewEmptyStatement(); + return factory()->NewEmptyStatement(pos); else - return factory()->NewModuleStatement(proxy, module->body()); + return factory()->NewModuleStatement(proxy, module->body(), pos); } @@ -1046,8 +1046,9 @@ Module* Parser::ParseModuleLiteral(bool* ok) { // Module: // '{' ModuleElement '}' + int pos = peek_position(); // Construct block expecting 16 statements. - Block* body = factory()->NewBlock(NULL, 16, false); + Block* body = factory()->NewBlock(NULL, 16, false, RelocInfo::kNoPosition); #ifdef DEBUG if (FLAG_print_interface_details) PrintF("# Literal "); #endif @@ -1092,7 +1093,7 @@ Module* Parser::ParseModuleLiteral(bool* ok) { ASSERT(*ok); interface->Freeze(ok); ASSERT(*ok); - return factory()->NewModuleLiteral(body, interface); + return factory()->NewModuleLiteral(body, interface, pos); } @@ -1101,6 +1102,7 @@ Module* Parser::ParseModulePath(bool* ok) { // Identifier // ModulePath '.' Identifier + int pos = peek_position(); Module* result = ParseModuleVariable(CHECK_OK); while (Check(Token::PERIOD)) { Handle<String> name = ParseIdentifierName(CHECK_OK); @@ -1108,7 +1110,7 @@ Module* Parser::ParseModulePath(bool* ok) { if (FLAG_print_interface_details) PrintF("# Path .%s ", name->ToAsciiArray()); #endif - Module* member = factory()->NewModulePath(result, name); + Module* member = factory()->NewModulePath(result, name, pos); result->interface()->Add(name, member->interface(), zone(), ok); if (!*ok) { #ifdef DEBUG @@ -1134,6 +1136,7 @@ Module* Parser::ParseModuleVariable(bool* ok) { // ModulePath: // Identifier + int pos = peek_position(); Handle<String> name = ParseIdentifier(CHECK_OK); #ifdef DEBUG if (FLAG_print_interface_details) @@ -1143,7 +1146,7 @@ Module* Parser::ParseModuleVariable(bool* ok) { factory(), name, Interface::NewModule(zone()), scanner().location().beg_pos); - return factory()->NewModuleVariable(proxy); + return factory()->NewModuleVariable(proxy, pos); } @@ -1151,6 +1154,7 @@ Module* Parser::ParseModuleUrl(bool* ok) { // Module: // String + int pos = peek_position(); Expect(Token::STRING, CHECK_OK); Handle<String> symbol = GetSymbol(); @@ -1163,10 +1167,10 @@ Module* Parser::ParseModuleUrl(bool* ok) { // Create an empty literal as long as the feature isn't finished. USE(symbol); Scope* scope = NewScope(top_scope_, MODULE_SCOPE); - Block* body = factory()->NewBlock(NULL, 1, false); + Block* body = factory()->NewBlock(NULL, 1, false, RelocInfo::kNoPosition); body->set_scope(scope); Interface* interface = scope->interface(); - Module* result = factory()->NewModuleLiteral(body, interface); + Module* result = factory()->NewModuleLiteral(body, interface, pos); interface->Freeze(ok); ASSERT(*ok); interface->Unify(scope->interface(), zone(), ok); @@ -1194,6 +1198,7 @@ Block* Parser::ParseImportDeclaration(bool* ok) { // // TODO(ES6): implement destructuring ImportSpecifiers + int pos = peek_position(); Expect(Token::IMPORT, CHECK_OK); ZoneStringList names(1, zone()); @@ -1211,7 +1216,7 @@ Block* Parser::ParseImportDeclaration(bool* ok) { // Generate a separate declaration for each identifier. // TODO(ES6): once we implement destructuring, make that one declaration. - Block* block = factory()->NewBlock(NULL, 1, true); + Block* block = factory()->NewBlock(NULL, 1, true, RelocInfo::kNoPosition); for (int i = 0; i < names.length(); ++i) { #ifdef DEBUG if (FLAG_print_interface_details) @@ -1232,7 +1237,7 @@ Block* Parser::ParseImportDeclaration(bool* ok) { } VariableProxy* proxy = NewUnresolved(names[i], LET, interface); Declaration* declaration = - factory()->NewImportDeclaration(proxy, module, top_scope_); + factory()->NewImportDeclaration(proxy, module, top_scope_, pos); Declare(declaration, true, CHECK_OK); } @@ -1256,6 +1261,7 @@ Statement* Parser::ParseExportDeclaration(bool* ok) { ZoneStringList names(1, zone()); switch (peek()) { case Token::IDENTIFIER: { + int pos = position(); Handle<String> name = ParseIdentifier(CHECK_OK); // Handle 'module' as a context-sensitive keyword. if (!name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("module"))) { @@ -1266,7 +1272,7 @@ Statement* Parser::ParseExportDeclaration(bool* ok) { names.Add(name, zone()); } ExpectSemicolon(CHECK_OK); - result = factory()->NewEmptyStatement(); + result = factory()->NewEmptyStatement(pos); } else { result = ParseModuleDeclaration(&names, CHECK_OK); } @@ -1305,7 +1311,7 @@ Statement* Parser::ParseExportDeclaration(bool* ok) { // TODO(rossberg): Rethink whether we actually need to store export // declarations (for compilation?). // ExportDeclaration* declaration = - // factory()->NewExportDeclaration(proxy, top_scope_); + // factory()->NewExportDeclaration(proxy, top_scope_, position); // top_scope_->AddDeclaration(declaration); } @@ -1363,10 +1369,6 @@ Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) { // labels can be simply ignored in all other cases; except for // trivial labeled break statements 'label: break label' which is // parsed into an empty statement. - - // Keep the source position of the statement - int statement_pos = scanner().peek_location().beg_pos; - Statement* stmt = NULL; switch (peek()) { case Token::LBRACE: return ParseBlock(labels, ok); @@ -1374,52 +1376,41 @@ Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) { case Token::CONST: // fall through case Token::LET: case Token::VAR: - stmt = ParseVariableStatement(kStatement, NULL, ok); - break; + return ParseVariableStatement(kStatement, NULL, ok); case Token::SEMICOLON: Next(); - return factory()->NewEmptyStatement(); + return factory()->NewEmptyStatement(RelocInfo::kNoPosition); case Token::IF: - stmt = ParseIfStatement(labels, ok); - break; + return ParseIfStatement(labels, ok); case Token::DO: - stmt = ParseDoWhileStatement(labels, ok); - break; + return ParseDoWhileStatement(labels, ok); case Token::WHILE: - stmt = ParseWhileStatement(labels, ok); - break; + return ParseWhileStatement(labels, ok); case Token::FOR: - stmt = ParseForStatement(labels, ok); - break; + return ParseForStatement(labels, ok); case Token::CONTINUE: - stmt = ParseContinueStatement(ok); - break; + return ParseContinueStatement(ok); case Token::BREAK: - stmt = ParseBreakStatement(labels, ok); - break; + return ParseBreakStatement(labels, ok); case Token::RETURN: - stmt = ParseReturnStatement(ok); - break; + return ParseReturnStatement(ok); case Token::WITH: - stmt = ParseWithStatement(labels, ok); - break; + return ParseWithStatement(labels, ok); case Token::SWITCH: - stmt = ParseSwitchStatement(labels, ok); - break; + return ParseSwitchStatement(labels, ok); case Token::THROW: - stmt = ParseThrowStatement(ok); - break; + return ParseThrowStatement(ok); case Token::TRY: { // NOTE: It is somewhat complicated to have labels on @@ -1427,12 +1418,10 @@ Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) { // one must take great care not to treat it as a // fall-through. It is much easier just to wrap the entire // try-statement in a statement block and put the labels there - Block* result = factory()->NewBlock(labels, 1, false); + Block* result = + factory()->NewBlock(labels, 1, false, RelocInfo::kNoPosition); Target target(&this->target_stack_, result); TryStatement* statement = ParseTryStatement(CHECK_OK); - if (statement) { - statement->set_statement_pos(statement_pos); - } if (result) result->AddStatement(statement, zone()); return result; } @@ -1459,16 +1448,11 @@ Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) { } case Token::DEBUGGER: - stmt = ParseDebuggerStatement(ok); - break; + return ParseDebuggerStatement(ok); default: - stmt = ParseExpressionOrLabelledStatement(labels, ok); + return ParseExpressionOrLabelledStatement(labels, ok); } - - // Store the source position of the statement - if (stmt != NULL) stmt->set_statement_pos(statement_pos); - return stmt; } @@ -1480,7 +1464,7 @@ VariableProxy* Parser::NewUnresolved( // Let/const variables in harmony mode are always added to the immediately // enclosing scope. return DeclarationScope(mode)->NewUnresolved( - factory(), name, interface, scanner().location().beg_pos); + factory(), name, interface, position()); } @@ -1647,6 +1631,7 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) { // declaration is resolved by looking up the function through a // callback provided by the extension. Statement* Parser::ParseNativeDeclaration(bool* ok) { + int pos = peek_position(); Expect(Token::FUNCTION, CHECK_OK); Handle<String> name = ParseIdentifier(CHECK_OK); Expect(Token::LPAREN, CHECK_OK); @@ -1667,39 +1652,19 @@ Statement* Parser::ParseNativeDeclaration(bool* ok) { // because of lazy compilation. DeclarationScope(VAR)->ForceEagerCompilation(); - // Compute the function template for the native function. - v8::Handle<v8::FunctionTemplate> fun_template = - extension_->GetNativeFunction(v8::Utils::ToLocal(name)); - ASSERT(!fun_template.IsEmpty()); - - // Instantiate the function and create a shared function info from it. - Handle<JSFunction> fun = Utils::OpenHandle(*fun_template->GetFunction()); - const int literals = fun->NumberOfLiterals(); - Handle<Code> code = Handle<Code>(fun->shared()->code()); - Handle<Code> construct_stub = Handle<Code>(fun->shared()->construct_stub()); - bool is_generator = false; - Handle<SharedFunctionInfo> shared = - isolate()->factory()->NewSharedFunctionInfo(name, literals, is_generator, - code, Handle<ScopeInfo>(fun->shared()->scope_info())); - shared->set_construct_stub(*construct_stub); - - // Copy the function data to the shared function info. - shared->set_function_data(fun->shared()->function_data()); - int parameters = fun->shared()->formal_parameter_count(); - shared->set_formal_parameter_count(parameters); - // TODO(1240846): It's weird that native function declarations are // introduced dynamically when we meet their declarations, whereas // other functions are set up when entering the surrounding scope. VariableProxy* proxy = NewUnresolved(name, VAR, Interface::NewValue()); Declaration* declaration = - factory()->NewVariableDeclaration(proxy, VAR, top_scope_); + factory()->NewVariableDeclaration(proxy, VAR, top_scope_, pos); Declare(declaration, true, CHECK_OK); - SharedFunctionInfoLiteral* lit = - factory()->NewSharedFunctionInfoLiteral(shared); + NativeFunctionLiteral* lit = factory()->NewNativeFunctionLiteral( + name, extension_, RelocInfo::kNoPosition); return factory()->NewExpressionStatement( factory()->NewAssignment( - Token::INIT_VAR, proxy, lit, RelocInfo::kNoPosition)); + Token::INIT_VAR, proxy, lit, RelocInfo::kNoPosition), + pos); } @@ -1710,7 +1675,7 @@ Statement* Parser::ParseFunctionDeclaration(ZoneStringList* names, bool* ok) { // 'function' '*' Identifier '(' FormalParameterListopt ')' // '{' FunctionBody '}' Expect(Token::FUNCTION, CHECK_OK); - int function_token_position = scanner().location().beg_pos; + int pos = position(); bool is_generator = allow_generators() && Check(Token::MUL); bool is_strict_reserved = false; Handle<String> name = ParseIdentifierOrStrictReservedWord( @@ -1718,7 +1683,7 @@ Statement* Parser::ParseFunctionDeclaration(ZoneStringList* names, bool* ok) { FunctionLiteral* fun = ParseFunctionLiteral(name, is_strict_reserved, is_generator, - function_token_position, + pos, FunctionLiteral::DECLARATION, CHECK_OK); // Even if we're not at the top-level of the global or a function @@ -1730,10 +1695,10 @@ Statement* Parser::ParseFunctionDeclaration(ZoneStringList* names, bool* ok) { is_extended_mode() && !top_scope_->is_global_scope() ? LET : VAR; VariableProxy* proxy = NewUnresolved(name, mode, Interface::NewValue()); Declaration* declaration = - factory()->NewFunctionDeclaration(proxy, mode, fun, top_scope_); + factory()->NewFunctionDeclaration(proxy, mode, fun, top_scope_, pos); Declare(declaration, true, CHECK_OK); if (names) names->Add(name, zone()); - return factory()->NewEmptyStatement(); + return factory()->NewEmptyStatement(RelocInfo::kNoPosition); } @@ -1747,7 +1712,8 @@ Block* Parser::ParseBlock(ZoneStringList* labels, bool* ok) { // (ECMA-262, 3rd, 12.2) // // Construct block expecting 16 statements. - Block* result = factory()->NewBlock(labels, 16, false); + Block* result = + factory()->NewBlock(labels, 16, false, RelocInfo::kNoPosition); Target target(&this->target_stack_, result); Expect(Token::LBRACE, CHECK_OK); while (peek() != Token::RBRACE) { @@ -1768,7 +1734,8 @@ Block* Parser::ParseScopedBlock(ZoneStringList* labels, bool* ok) { // '{' BlockElement* '}' // Construct block expecting 16 statements. - Block* body = factory()->NewBlock(labels, 16, false); + Block* body = + factory()->NewBlock(labels, 16, false, RelocInfo::kNoPosition); Scope* block_scope = NewScope(top_scope_, BLOCK_SCOPE); // Parse the statements and collect escaping labels. @@ -1838,6 +1805,8 @@ Block* Parser::ParseVariableDeclarations( // TODO(ES6): // ConstBinding :: // BindingPattern '=' AssignmentExpression + + int pos = peek_position(); VariableMode mode = VAR; // True if the binding needs initialization. 'let' and 'const' declared // bindings are created uninitialized by their declaration nodes and @@ -1923,7 +1892,7 @@ Block* Parser::ParseVariableDeclarations( // is inside an initializer block, it is ignored. // // Create new block with one expected declaration. - Block* block = factory()->NewBlock(NULL, 1, true); + Block* block = factory()->NewBlock(NULL, 1, true, pos); int nvars = 0; // the number of variables declared Handle<String> name; do { @@ -1960,7 +1929,7 @@ Block* Parser::ParseVariableDeclarations( is_const ? Interface::NewConst() : Interface::NewValue(); VariableProxy* proxy = NewUnresolved(name, mode, interface); Declaration* declaration = - factory()->NewVariableDeclaration(proxy, mode, top_scope_); + factory()->NewVariableDeclaration(proxy, mode, top_scope_, pos); Declare(declaration, mode != VAR, CHECK_OK); nvars++; if (declaration_scope->num_var_or_const() > kMaxNumFunctionLocals) { @@ -2000,11 +1969,11 @@ Block* Parser::ParseVariableDeclarations( Scope* initialization_scope = is_const ? declaration_scope : top_scope_; Expression* value = NULL; - int position = -1; + int pos = -1; // Harmony consts have non-optional initializers. if (peek() == Token::ASSIGN || mode == CONST_HARMONY) { Expect(Token::ASSIGN, CHECK_OK); - position = scanner().location().beg_pos; + pos = position(); value = ParseAssignmentExpression(var_context != kForStatement, CHECK_OK); // Don't infer if it is "a = function(){...}();"-like expression. if (fni_ != NULL && @@ -2019,12 +1988,12 @@ Block* Parser::ParseVariableDeclarations( // Record the end position of the initializer. if (proxy->var() != NULL) { - proxy->var()->set_initializer_position(scanner().location().end_pos); + proxy->var()->set_initializer_position(position()); } // Make sure that 'const x' and 'let x' initialize 'x' to undefined. if (value == NULL && needs_init) { - value = GetLiteralUndefined(); + value = GetLiteralUndefined(position()); } // Global variable declarations must be compiled in a specific @@ -2052,7 +2021,7 @@ Block* Parser::ParseVariableDeclarations( ZoneList<Expression*>* arguments = new(zone()) ZoneList<Expression*>(3, zone()); // We have at least 1 parameter. - arguments->Add(factory()->NewLiteral(name), zone()); + arguments->Add(factory()->NewLiteral(name, pos), zone()); CallRuntime* initialize; if (is_const) { @@ -2066,12 +2035,12 @@ Block* Parser::ParseVariableDeclarations( initialize = factory()->NewCallRuntime( isolate()->factory()->InitializeConstGlobal_string(), Runtime::FunctionForId(Runtime::kInitializeConstGlobal), - arguments); + arguments, pos); } else { // Add strict mode. // We may want to pass singleton to avoid Literal allocations. LanguageMode language_mode = initialization_scope->language_mode(); - arguments->Add(factory()->NewNumberLiteral(language_mode), zone()); + arguments->Add(factory()->NewNumberLiteral(language_mode, pos), zone()); // Be careful not to assign a value to the global variable if // we're in a with. The initialization value should not @@ -2089,11 +2058,12 @@ Block* Parser::ParseVariableDeclarations( initialize = factory()->NewCallRuntime( isolate()->factory()->InitializeVarGlobal_string(), Runtime::FunctionForId(Runtime::kInitializeVarGlobal), - arguments); + arguments, pos); } - block->AddStatement(factory()->NewExpressionStatement(initialize), - zone()); + block->AddStatement( + factory()->NewExpressionStatement(initialize, RelocInfo::kNoPosition), + zone()); } else if (needs_init) { // Constant initializations always assign to the declared constant which // is always at the function scope level. This is only relevant for @@ -2106,9 +2076,10 @@ Block* Parser::ParseVariableDeclarations( ASSERT(proxy->var() != NULL); ASSERT(value != NULL); Assignment* assignment = - factory()->NewAssignment(init_op, proxy, value, position); - block->AddStatement(factory()->NewExpressionStatement(assignment), - zone()); + factory()->NewAssignment(init_op, proxy, value, pos); + block->AddStatement( + factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition), + zone()); value = NULL; } @@ -2122,9 +2093,10 @@ Block* Parser::ParseVariableDeclarations( VariableProxy* proxy = initialization_scope->NewUnresolved(factory(), name, interface); Assignment* assignment = - factory()->NewAssignment(init_op, proxy, value, position); - block->AddStatement(factory()->NewExpressionStatement(assignment), - zone()); + factory()->NewAssignment(init_op, proxy, value, pos); + block->AddStatement( + factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition), + zone()); } if (fni_ != NULL) fni_->Leave(); @@ -2156,6 +2128,7 @@ Statement* Parser::ParseExpressionOrLabelledStatement(ZoneStringList* labels, // ExpressionStatement | LabelledStatement :: // Expression ';' // Identifier ':' Statement + int pos = peek_position(); bool starts_with_idenfifier = peek_any_identifier(); Expression* expr = ParseExpression(true, CHECK_OK); if (peek() == Token::COLON && starts_with_idenfifier && expr != NULL && @@ -2215,7 +2188,7 @@ Statement* Parser::ParseExpressionOrLabelledStatement(ZoneStringList* labels, scanner().literal_contains_escapes()) { ExpectSemicolon(CHECK_OK); } - return factory()->NewExpressionStatement(expr); + return factory()->NewExpressionStatement(expr, pos); } @@ -2223,6 +2196,7 @@ IfStatement* Parser::ParseIfStatement(ZoneStringList* labels, bool* ok) { // IfStatement :: // 'if' '(' Expression ')' Statement ('else' Statement)? + int pos = peek_position(); Expect(Token::IF, CHECK_OK); Expect(Token::LPAREN, CHECK_OK); Expression* condition = ParseExpression(true, CHECK_OK); @@ -2233,9 +2207,10 @@ IfStatement* Parser::ParseIfStatement(ZoneStringList* labels, bool* ok) { Next(); else_statement = ParseStatement(labels, CHECK_OK); } else { - else_statement = factory()->NewEmptyStatement(); + else_statement = factory()->NewEmptyStatement(RelocInfo::kNoPosition); } - return factory()->NewIfStatement(condition, then_statement, else_statement); + return factory()->NewIfStatement( + condition, then_statement, else_statement, pos); } @@ -2243,6 +2218,7 @@ Statement* Parser::ParseContinueStatement(bool* ok) { // ContinueStatement :: // 'continue' Identifier? ';' + int pos = peek_position(); Expect(Token::CONTINUE, CHECK_OK); Handle<String> label = Handle<String>::null(); Token::Value tok = peek(); @@ -2265,7 +2241,7 @@ Statement* Parser::ParseContinueStatement(bool* ok) { return NULL; } ExpectSemicolon(CHECK_OK); - return factory()->NewContinueStatement(target); + return factory()->NewContinueStatement(target, pos); } @@ -2273,6 +2249,7 @@ Statement* Parser::ParseBreakStatement(ZoneStringList* labels, bool* ok) { // BreakStatement :: // 'break' Identifier? ';' + int pos = peek_position(); Expect(Token::BREAK, CHECK_OK); Handle<String> label; Token::Value tok = peek(); @@ -2284,7 +2261,7 @@ Statement* Parser::ParseBreakStatement(ZoneStringList* labels, bool* ok) { // empty statements, e.g. 'l1: l2: l3: break l2;' if (!label.is_null() && ContainsLabel(labels, label)) { ExpectSemicolon(CHECK_OK); - return factory()->NewEmptyStatement(); + return factory()->NewEmptyStatement(pos); } BreakableStatement* target = NULL; target = LookupBreakTarget(label, CHECK_OK); @@ -2301,7 +2278,7 @@ Statement* Parser::ParseBreakStatement(ZoneStringList* labels, bool* ok) { return NULL; } ExpectSemicolon(CHECK_OK); - return factory()->NewBreakStatement(target); + return factory()->NewBreakStatement(target, pos); } @@ -2309,10 +2286,11 @@ Statement* Parser::ParseReturnStatement(bool* ok) { // ReturnStatement :: // 'return' Expression? ';' - // Consume the return token. It is necessary to do the before + // Consume the return token. It is necessary to do that before // reporting any errors on it, because of the way errors are // reported (underlining). Expect(Token::RETURN, CHECK_OK); + int pos = position(); Token::Value tok = peek(); Statement* result; @@ -2321,7 +2299,7 @@ Statement* Parser::ParseReturnStatement(bool* ok) { tok == Token::SEMICOLON || tok == Token::RBRACE || tok == Token::EOS) { - return_value = GetLiteralUndefined(); + return_value = GetLiteralUndefined(position()); } else { return_value = ParseExpression(true, CHECK_OK); } @@ -2330,10 +2308,10 @@ Statement* Parser::ParseReturnStatement(bool* ok) { Expression* generator = factory()->NewVariableProxy( current_function_state_->generator_object_variable()); Expression* yield = factory()->NewYield( - generator, return_value, Yield::FINAL, RelocInfo::kNoPosition); - result = factory()->NewExpressionStatement(yield); + generator, return_value, Yield::FINAL, pos); + result = factory()->NewExpressionStatement(yield, pos); } else { - result = factory()->NewReturnStatement(return_value); + result = factory()->NewReturnStatement(return_value, pos); } // An ECMAScript program is considered syntactically incorrect if it @@ -2347,7 +2325,7 @@ Statement* Parser::ParseReturnStatement(bool* ok) { Handle<String> message = isolate()->factory()->illegal_return_string(); Expression* throw_error = NewThrowSyntaxError(message, Handle<Object>::null()); - return factory()->NewExpressionStatement(throw_error); + return factory()->NewExpressionStatement(throw_error, pos); } return result; } @@ -2358,6 +2336,7 @@ Statement* Parser::ParseWithStatement(ZoneStringList* labels, bool* ok) { // 'with' '(' Expression ')' Statement Expect(Token::WITH, CHECK_OK); + int pos = position(); if (!top_scope_->is_classic_mode()) { ReportMessage("strict_mode_with", Vector<const char*>::empty()); @@ -2377,7 +2356,7 @@ Statement* Parser::ParseWithStatement(ZoneStringList* labels, bool* ok) { stmt = ParseStatement(labels, CHECK_OK); with_scope->set_end_position(scanner().location().end_pos); } - return factory()->NewWithStatement(with_scope, expr, stmt); + return factory()->NewWithStatement(with_scope, expr, stmt, pos); } @@ -2401,7 +2380,7 @@ CaseClause* Parser::ParseCaseClause(bool* default_seen_ptr, bool* ok) { *default_seen_ptr = true; } Expect(Token::COLON, CHECK_OK); - int pos = scanner().location().beg_pos; + int pos = position(); ZoneList<Statement*>* statements = new(zone()) ZoneList<Statement*>(5, zone()); while (peek() != Token::CASE && @@ -2411,7 +2390,7 @@ CaseClause* Parser::ParseCaseClause(bool* default_seen_ptr, bool* ok) { statements->Add(stat, zone()); } - return new(zone()) CaseClause(isolate(), label, statements, pos); + return factory()->NewCaseClause(label, statements, pos); } @@ -2420,7 +2399,8 @@ SwitchStatement* Parser::ParseSwitchStatement(ZoneStringList* labels, // SwitchStatement :: // 'switch' '(' Expression ')' '{' CaseClause* '}' - SwitchStatement* statement = factory()->NewSwitchStatement(labels); + SwitchStatement* statement = + factory()->NewSwitchStatement(labels, peek_position()); Target target(&this->target_stack_, statement); Expect(Token::SWITCH, CHECK_OK); @@ -2447,7 +2427,7 @@ Statement* Parser::ParseThrowStatement(bool* ok) { // 'throw' Expression ';' Expect(Token::THROW, CHECK_OK); - int pos = scanner().location().beg_pos; + int pos = position(); if (scanner().HasAnyLineTerminatorBeforeNext()) { ReportMessage("newline_after_throw", Vector<const char*>::empty()); *ok = false; @@ -2456,7 +2436,8 @@ Statement* Parser::ParseThrowStatement(bool* ok) { Expression* exception = ParseExpression(true, CHECK_OK); ExpectSemicolon(CHECK_OK); - return factory()->NewExpressionStatement(factory()->NewThrow(exception, pos)); + return factory()->NewExpressionStatement( + factory()->NewThrow(exception, pos), pos); } @@ -2473,6 +2454,7 @@ TryStatement* Parser::ParseTryStatement(bool* ok) { // 'finally' Block Expect(Token::TRY, CHECK_OK); + int pos = position(); TargetCollector try_collector(zone()); Block* try_block; @@ -2544,9 +2526,10 @@ TryStatement* Parser::ParseTryStatement(bool* ok) { ASSERT(catch_scope != NULL && catch_variable != NULL); int index = current_function_state_->NextHandlerIndex(); TryCatchStatement* statement = factory()->NewTryCatchStatement( - index, try_block, catch_scope, catch_variable, catch_block); + index, try_block, catch_scope, catch_variable, catch_block, + RelocInfo::kNoPosition); statement->set_escaping_targets(try_collector.targets()); - try_block = factory()->NewBlock(NULL, 1, false); + try_block = factory()->NewBlock(NULL, 1, false, RelocInfo::kNoPosition); try_block->AddStatement(statement, zone()); catch_block = NULL; // Clear to indicate it's been handled. } @@ -2557,11 +2540,12 @@ TryStatement* Parser::ParseTryStatement(bool* ok) { ASSERT(catch_scope != NULL && catch_variable != NULL); int index = current_function_state_->NextHandlerIndex(); result = factory()->NewTryCatchStatement( - index, try_block, catch_scope, catch_variable, catch_block); + index, try_block, catch_scope, catch_variable, catch_block, pos); } else { ASSERT(finally_block != NULL); int index = current_function_state_->NextHandlerIndex(); - result = factory()->NewTryFinallyStatement(index, try_block, finally_block); + result = factory()->NewTryFinallyStatement( + index, try_block, finally_block, pos); // Combine the jump targets of the try block and the possible catch block. try_collector.targets()->AddAll(*catch_collector.targets(), zone()); } @@ -2576,7 +2560,8 @@ DoWhileStatement* Parser::ParseDoWhileStatement(ZoneStringList* labels, // DoStatement :: // 'do' Statement 'while' '(' Expression ')' ';' - DoWhileStatement* loop = factory()->NewDoWhileStatement(labels); + DoWhileStatement* loop = + factory()->NewDoWhileStatement(labels, peek_position()); Target target(&this->target_stack_, loop); Expect(Token::DO, CHECK_OK); @@ -2584,11 +2569,6 @@ DoWhileStatement* Parser::ParseDoWhileStatement(ZoneStringList* labels, Expect(Token::WHILE, CHECK_OK); Expect(Token::LPAREN, CHECK_OK); - if (loop != NULL) { - int position = scanner().location().beg_pos; - loop->set_condition_position(position); - } - Expression* cond = ParseExpression(true, CHECK_OK); Expect(Token::RPAREN, CHECK_OK); @@ -2607,7 +2587,7 @@ WhileStatement* Parser::ParseWhileStatement(ZoneStringList* labels, bool* ok) { // WhileStatement :: // 'while' '(' Expression ')' Statement - WhileStatement* loop = factory()->NewWhileStatement(labels); + WhileStatement* loop = factory()->NewWhileStatement(labels, peek_position()); Target target(&this->target_stack_, loop); Expect(Token::WHILE, CHECK_OK); @@ -2666,8 +2646,8 @@ void Parser::InitializeForEachStatement(ForEachStatement* stmt, // var result = iterator.next(); { Expression* iterator_proxy = factory()->NewVariableProxy(iterator); - Expression* next_literal = - factory()->NewLiteral(heap_factory->next_string()); + Expression* next_literal = factory()->NewLiteral( + heap_factory->next_string(), RelocInfo::kNoPosition); Expression* next_property = factory()->NewProperty( iterator_proxy, next_literal, RelocInfo::kNoPosition); ZoneList<Expression*>* next_arguments = @@ -2681,8 +2661,8 @@ void Parser::InitializeForEachStatement(ForEachStatement* stmt, // result.done { - Expression* done_literal = - factory()->NewLiteral(heap_factory->done_string()); + Expression* done_literal = factory()->NewLiteral( + heap_factory->done_string(), RelocInfo::kNoPosition); Expression* result_proxy = factory()->NewVariableProxy(result); result_done = factory()->NewProperty( result_proxy, done_literal, RelocInfo::kNoPosition); @@ -2690,8 +2670,8 @@ void Parser::InitializeForEachStatement(ForEachStatement* stmt, // each = result.value { - Expression* value_literal = - factory()->NewLiteral(heap_factory->value_string()); + Expression* value_literal = factory()->NewLiteral( + heap_factory->value_string(), RelocInfo::kNoPosition); Expression* result_proxy = factory()->NewVariableProxy(result); Expression* result_value = factory()->NewProperty( result_proxy, value_literal, RelocInfo::kNoPosition); @@ -2711,6 +2691,7 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) { // ForStatement :: // 'for' '(' Expression? ';' Expression? ';' Expression? ')' Statement + int pos = peek_position(); Statement* init = NULL; // Create an in-between scope for let-bound iteration variables. @@ -2735,7 +2716,8 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) { if (!name.is_null() && CheckInOrOf(accept_OF, &mode)) { Interface* interface = is_const ? Interface::NewConst() : Interface::NewValue(); - ForEachStatement* loop = factory()->NewForEachStatement(mode, labels); + ForEachStatement* loop = + factory()->NewForEachStatement(mode, labels, pos); Target target(&this->target_stack_, loop); Expression* enumerable = ParseExpression(true, CHECK_OK); @@ -2745,7 +2727,8 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) { top_scope_->NewUnresolved(factory(), name, interface); Statement* body = ParseStatement(NULL, CHECK_OK); InitializeForEachStatement(loop, each, enumerable, body); - Block* result = factory()->NewBlock(NULL, 2, false); + Block* result = + factory()->NewBlock(NULL, 2, false, RelocInfo::kNoPosition); result->AddStatement(variable_statement, zone()); result->AddStatement(loop, zone()); top_scope_ = saved_scope; @@ -2789,7 +2772,8 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) { Handle<String> tempname = heap_factory->InternalizeString(tempstr); Variable* temp = top_scope_->DeclarationScope()->NewTemporary(tempname); VariableProxy* temp_proxy = factory()->NewVariableProxy(temp); - ForEachStatement* loop = factory()->NewForEachStatement(mode, labels); + ForEachStatement* loop = + factory()->NewForEachStatement(mode, labels, pos); Target target(&this->target_stack_, loop); // The expression does not see the loop variable. @@ -2801,11 +2785,12 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) { VariableProxy* each = top_scope_->NewUnresolved(factory(), name, Interface::NewValue()); Statement* body = ParseStatement(NULL, CHECK_OK); - Block* body_block = factory()->NewBlock(NULL, 3, false); + Block* body_block = + factory()->NewBlock(NULL, 3, false, RelocInfo::kNoPosition); Assignment* assignment = factory()->NewAssignment( Token::ASSIGN, each, temp_proxy, RelocInfo::kNoPosition); - Statement* assignment_statement = - factory()->NewExpressionStatement(assignment); + Statement* assignment_statement = factory()->NewExpressionStatement( + assignment, RelocInfo::kNoPosition); body_block->AddStatement(variable_statement, zone()); body_block->AddStatement(assignment_statement, zone()); body_block->AddStatement(body, zone()); @@ -2835,7 +2820,8 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) { isolate()->factory()->invalid_lhs_in_for_in_string(); expression = NewThrowReferenceError(message); } - ForEachStatement* loop = factory()->NewForEachStatement(mode, labels); + ForEachStatement* loop = + factory()->NewForEachStatement(mode, labels, pos); Target target(&this->target_stack_, loop); Expression* enumerable = ParseExpression(true, CHECK_OK); @@ -2851,13 +2837,14 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) { return loop; } else { - init = factory()->NewExpressionStatement(expression); + init = factory()->NewExpressionStatement( + expression, RelocInfo::kNoPosition); } } } // Standard 'for' loop - ForStatement* loop = factory()->NewForStatement(labels); + ForStatement* loop = factory()->NewForStatement(labels, pos); Target target(&this->target_stack_, loop); // Parsed initializer at this point. @@ -2872,7 +2859,7 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) { Statement* next = NULL; if (peek() != Token::RPAREN) { Expression* exp = ParseExpression(true, CHECK_OK); - next = factory()->NewExpressionStatement(exp); + next = factory()->NewExpressionStatement(exp, RelocInfo::kNoPosition); } Expect(Token::RPAREN, CHECK_OK); @@ -2892,7 +2879,7 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) { // for (; c; n) b // } ASSERT(init != NULL); - Block* result = factory()->NewBlock(NULL, 2, false); + Block* result = factory()->NewBlock(NULL, 2, false, RelocInfo::kNoPosition); result->AddStatement(init, zone()); result->AddStatement(loop, zone()); result->set_scope(for_scope); @@ -2914,10 +2901,9 @@ Expression* Parser::ParseExpression(bool accept_IN, bool* ok) { Expression* result = ParseAssignmentExpression(accept_IN, CHECK_OK); while (peek() == Token::COMMA) { Expect(Token::COMMA, CHECK_OK); - int position = scanner().location().beg_pos; + int pos = position(); Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK); - result = - factory()->NewBinaryOperation(Token::COMMA, result, right, position); + result = factory()->NewBinaryOperation(Token::COMMA, result, right, pos); } return result; } @@ -2961,7 +2947,7 @@ Expression* Parser::ParseAssignmentExpression(bool accept_IN, bool* ok) { MarkAsLValue(expression); Token::Value op = Next(); // Get assignment operator. - int pos = scanner().location().beg_pos; + int pos = position(); Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK); // TODO(1231235): We try to estimate the set of properties set by @@ -3005,15 +2991,14 @@ Expression* Parser::ParseAssignmentExpression(bool accept_IN, bool* ok) { Expression* Parser::ParseYieldExpression(bool* ok) { // YieldExpression :: // 'yield' '*'? AssignmentExpression - int position = scanner().peek_location().beg_pos; + int pos = peek_position(); Expect(Token::YIELD, CHECK_OK); Yield::Kind kind = Check(Token::MUL) ? Yield::DELEGATING : Yield::SUSPEND; Expression* generator_object = factory()->NewVariableProxy( current_function_state_->generator_object_variable()); Expression* expression = ParseAssignmentExpression(false, CHECK_OK); - Yield* yield = - factory()->NewYield(generator_object, expression, kind, position); + Yield* yield = factory()->NewYield(generator_object, expression, kind, pos); if (kind == Yield::DELEGATING) { yield->set_index(current_function_state_->NextHandlerIndex()); } @@ -3027,6 +3012,7 @@ Expression* Parser::ParseConditionalExpression(bool accept_IN, bool* ok) { // LogicalOrExpression // LogicalOrExpression '?' AssignmentExpression ':' AssignmentExpression + int pos = peek_position(); // We start using the binary expression parser for prec >= 4 only! Expression* expression = ParseBinaryExpression(4, accept_IN, CHECK_OK); if (peek() != Token::CONDITIONAL) return expression; @@ -3034,17 +3020,14 @@ Expression* Parser::ParseConditionalExpression(bool accept_IN, bool* ok) { // In parsing the first assignment expression in conditional // expressions we always accept the 'in' keyword; see ECMA-262, // section 11.12, page 58. - int left_position = scanner().peek_location().beg_pos; Expression* left = ParseAssignmentExpression(true, CHECK_OK); Expect(Token::COLON, CHECK_OK); - int right_position = scanner().peek_location().beg_pos; Expression* right = ParseAssignmentExpression(accept_IN, CHECK_OK); - return factory()->NewConditional( - expression, left, right, left_position, right_position); + return factory()->NewConditional(expression, left, right, pos); } -static int Precedence(Token::Value tok, bool accept_IN) { +int ParserBase::Precedence(Token::Value tok, bool accept_IN) { if (tok == Token::IN && !accept_IN) return 0; // 0 precedence will terminate binary expression parsing @@ -3060,7 +3043,7 @@ Expression* Parser::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) { // prec1 >= 4 while (Precedence(peek(), accept_IN) == prec1) { Token::Value op = Next(); - int position = scanner().location().beg_pos; + int pos = position(); Expression* y = ParseBinaryExpression(prec1 + 1, accept_IN, CHECK_OK); // Compute some expressions involving only number literals. @@ -3071,47 +3054,47 @@ Expression* Parser::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) { switch (op) { case Token::ADD: - x = factory()->NewNumberLiteral(x_val + y_val); + x = factory()->NewNumberLiteral(x_val + y_val, pos); continue; case Token::SUB: - x = factory()->NewNumberLiteral(x_val - y_val); + x = factory()->NewNumberLiteral(x_val - y_val, pos); continue; case Token::MUL: - x = factory()->NewNumberLiteral(x_val * y_val); + x = factory()->NewNumberLiteral(x_val * y_val, pos); continue; case Token::DIV: - x = factory()->NewNumberLiteral(x_val / y_val); + x = factory()->NewNumberLiteral(x_val / y_val, pos); continue; case Token::BIT_OR: { int value = DoubleToInt32(x_val) | DoubleToInt32(y_val); - x = factory()->NewNumberLiteral(value); + x = factory()->NewNumberLiteral(value, pos); continue; } case Token::BIT_AND: { int value = DoubleToInt32(x_val) & DoubleToInt32(y_val); - x = factory()->NewNumberLiteral(value); + x = factory()->NewNumberLiteral(value, pos); continue; } case Token::BIT_XOR: { int value = DoubleToInt32(x_val) ^ DoubleToInt32(y_val); - x = factory()->NewNumberLiteral(value); + x = factory()->NewNumberLiteral(value, pos); continue; } case Token::SHL: { int value = DoubleToInt32(x_val) << (DoubleToInt32(y_val) & 0x1f); - x = factory()->NewNumberLiteral(value); + x = factory()->NewNumberLiteral(value, pos); continue; } case Token::SHR: { uint32_t shift = DoubleToInt32(y_val) & 0x1f; uint32_t value = DoubleToUint32(x_val) >> shift; - x = factory()->NewNumberLiteral(value); + x = factory()->NewNumberLiteral(value, pos); continue; } case Token::SAR: { uint32_t shift = DoubleToInt32(y_val) & 0x1f; int value = ArithmeticShiftRight(DoubleToInt32(x_val), shift); - x = factory()->NewNumberLiteral(value); + x = factory()->NewNumberLiteral(value, pos); continue; } default: @@ -3130,15 +3113,15 @@ Expression* Parser::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) { case Token::NE_STRICT: cmp = Token::EQ_STRICT; break; default: break; } - x = factory()->NewCompareOperation(cmp, x, y, position); + x = factory()->NewCompareOperation(cmp, x, y, pos); if (cmp != op) { // The comparison was negated - add a NOT. - x = factory()->NewUnaryOperation(Token::NOT, x, position); + x = factory()->NewUnaryOperation(Token::NOT, x, pos); } } else { // We have a "normal" binary operation. - x = factory()->NewBinaryOperation(op, x, y, position); + x = factory()->NewBinaryOperation(op, x, y, pos); } } } @@ -3162,7 +3145,7 @@ Expression* Parser::ParseUnaryExpression(bool* ok) { Token::Value op = peek(); if (Token::IsUnaryOp(op)) { op = Next(); - int position = scanner().location().beg_pos; + int pos = position(); Expression* expression = ParseUnaryExpression(CHECK_OK); if (expression != NULL && (expression->AsLiteral() != NULL)) { @@ -3170,9 +3153,8 @@ Expression* Parser::ParseUnaryExpression(bool* ok) { if (op == Token::NOT) { // Convert the literal to a boolean condition and negate it. bool condition = literal->BooleanValue(); - Handle<Object> result(isolate()->heap()->ToBoolean(!condition), - isolate()); - return factory()->NewLiteral(result); + Handle<Object> result = isolate()->factory()->ToBoolean(!condition); + return factory()->NewLiteral(result, pos); } else if (literal->IsNumber()) { // Compute some expressions involving only number literals. double value = literal->Number(); @@ -3180,9 +3162,9 @@ Expression* Parser::ParseUnaryExpression(bool* ok) { case Token::ADD: return expression; case Token::SUB: - return factory()->NewNumberLiteral(-value); + return factory()->NewNumberLiteral(-value, pos); case Token::BIT_NOT: - return factory()->NewNumberLiteral(~DoubleToInt32(value)); + return factory()->NewNumberLiteral(~DoubleToInt32(value), pos); default: break; } @@ -3205,25 +3187,25 @@ Expression* Parser::ParseUnaryExpression(bool* ok) { if (op == Token::ADD) { return factory()->NewBinaryOperation(Token::MUL, expression, - factory()->NewNumberLiteral(1), - position); + factory()->NewNumberLiteral(1, pos), + pos); } // The same idea for '-foo' => 'foo*(-1)'. if (op == Token::SUB) { return factory()->NewBinaryOperation(Token::MUL, expression, - factory()->NewNumberLiteral(-1), - position); + factory()->NewNumberLiteral(-1, pos), + pos); } // ...and one more time for '~foo' => 'foo^(~0)'. if (op == Token::BIT_NOT) { return factory()->NewBinaryOperation(Token::BIT_XOR, expression, - factory()->NewNumberLiteral(~0), - position); + factory()->NewNumberLiteral(~0, pos), + pos); } - return factory()->NewUnaryOperation(op, expression, position); + return factory()->NewUnaryOperation(op, expression, pos); } else if (Token::IsCountOp(op)) { op = Next(); @@ -3244,11 +3226,10 @@ Expression* Parser::ParseUnaryExpression(bool* ok) { } MarkAsLValue(expression); - int position = scanner().location().beg_pos; return factory()->NewCountOperation(op, true /* prefix */, expression, - position); + position()); } else { return ParsePostfixExpression(ok); @@ -3280,12 +3261,11 @@ Expression* Parser::ParsePostfixExpression(bool* ok) { MarkAsLValue(expression); Token::Value next = Next(); - int position = scanner().location().beg_pos; expression = factory()->NewCountOperation(next, false /* postfix */, expression, - position); + position()); } return expression; } @@ -3306,7 +3286,7 @@ Expression* Parser::ParseLeftHandSideExpression(bool* ok) { switch (peek()) { case Token::LBRACK: { Consume(Token::LBRACK); - int pos = scanner().location().beg_pos; + int pos = position(); Expression* index = ParseExpression(true, CHECK_OK); result = factory()->NewProperty(result, index, pos); Expect(Token::RBRACK, CHECK_OK); @@ -3318,14 +3298,14 @@ Expression* Parser::ParseLeftHandSideExpression(bool* ok) { if (scanner().current_token() == Token::IDENTIFIER) { // For call of an identifier we want to report position of // the identifier as position of the call in the stack trace. - pos = scanner().location().beg_pos; + pos = position(); } else { // For other kinds of calls we record position of the parenthesis as // position of the call. Note that this is extremely important for // expressions of the form function(){...}() for which call position // should not point to the closing brace otherwise it will intersect // with positions recorded for function literal and confuse debugger. - pos = scanner().peek_location().beg_pos; + pos = peek_position(); // Also the trailing parenthesis are a hint that the function will // be called immediately. If we happen to have parsed a preceding // function literal eagerly, we can also compile it eagerly. @@ -3354,10 +3334,10 @@ Expression* Parser::ParseLeftHandSideExpression(bool* ok) { case Token::PERIOD: { Consume(Token::PERIOD); - int pos = scanner().location().beg_pos; + int pos = position(); Handle<String> name = ParseIdentifierName(CHECK_OK); - result = - factory()->NewProperty(result, factory()->NewLiteral(name), pos); + result = factory()->NewProperty( + result, factory()->NewLiteral(name, pos), pos); if (fni_ != NULL) fni_->PushLiteralName(name); break; } @@ -3382,7 +3362,7 @@ Expression* Parser::ParseNewPrefix(PositionStack* stack, bool* ok) { // member expression parser, which is only allowed to match argument // lists as long as it has 'new' prefixes left Expect(Token::NEW, CHECK_OK); - PositionStack::Element pos(stack, scanner().location().beg_pos); + PositionStack::Element pos(stack, position()); Expression* result; if (peek() == Token::NEW) { @@ -3421,7 +3401,7 @@ Expression* Parser::ParseMemberWithNewPrefixesExpression(PositionStack* stack, Expression* result = NULL; if (peek() == Token::FUNCTION) { Expect(Token::FUNCTION, CHECK_OK); - int function_token_position = scanner().location().beg_pos; + int function_token_position = position(); bool is_generator = allow_generators() && Check(Token::MUL); Handle<String> name; bool is_strict_reserved_name = false; @@ -3446,7 +3426,7 @@ Expression* Parser::ParseMemberWithNewPrefixesExpression(PositionStack* stack, switch (peek()) { case Token::LBRACK: { Consume(Token::LBRACK); - int pos = scanner().location().beg_pos; + int pos = position(); Expression* index = ParseExpression(true, CHECK_OK); result = factory()->NewProperty(result, index, pos); if (fni_ != NULL) { @@ -3462,10 +3442,10 @@ Expression* Parser::ParseMemberWithNewPrefixesExpression(PositionStack* stack, } case Token::PERIOD: { Consume(Token::PERIOD); - int pos = scanner().location().beg_pos; + int pos = position(); Handle<String> name = ParseIdentifierName(CHECK_OK); - result = - factory()->NewProperty(result, factory()->NewLiteral(name), pos); + result = factory()->NewProperty( + result, factory()->NewLiteral(name, pos), pos); if (fni_ != NULL) fni_->PushLiteralName(name); break; } @@ -3473,8 +3453,8 @@ Expression* Parser::ParseMemberWithNewPrefixesExpression(PositionStack* stack, if ((stack == NULL) || stack->is_empty()) return result; // Consume one of the new prefixes (already parsed). ZoneList<Expression*>* args = ParseArguments(CHECK_OK); - int last = stack->pop(); - result = factory()->NewCallNew(result, args, last); + int pos = stack->pop(); + result = factory()->NewCallNew(result, args, pos); break; } default: @@ -3491,9 +3471,10 @@ DebuggerStatement* Parser::ParseDebuggerStatement(bool* ok) { // DebuggerStatement :: // 'debugger' ';' + int pos = peek_position(); Expect(Token::DEBUGGER, CHECK_OK); ExpectSemicolon(CHECK_OK); - return factory()->NewDebuggerStatement(); + return factory()->NewDebuggerStatement(pos); } @@ -3501,7 +3482,7 @@ void Parser::ReportUnexpectedToken(Token::Value token) { // We don't report stack overflows here, to avoid increasing the // stack depth even further. Instead we report it after parsing is // over, in ParseProgram/ParseJson. - if (token == Token::ILLEGAL && stack_overflow_) return; + if (token == Token::ILLEGAL && stack_overflow()) return; // Four of the tokens are treated specially switch (token) { case Token::EOS: @@ -3555,6 +3536,7 @@ Expression* Parser::ParsePrimaryExpression(bool* ok) { // RegExpLiteral // '(' Expression ')' + int pos = peek_position(); Expression* result = NULL; switch (peek()) { case Token::THIS: { @@ -3565,17 +3547,17 @@ Expression* Parser::ParsePrimaryExpression(bool* ok) { case Token::NULL_LITERAL: Consume(Token::NULL_LITERAL); - result = factory()->NewLiteral(isolate()->factory()->null_value()); + result = factory()->NewLiteral(isolate()->factory()->null_value(), pos); break; case Token::TRUE_LITERAL: Consume(Token::TRUE_LITERAL); - result = factory()->NewLiteral(isolate()->factory()->true_value()); + result = factory()->NewLiteral(isolate()->factory()->true_value(), pos); break; case Token::FALSE_LITERAL: Consume(Token::FALSE_LITERAL); - result = factory()->NewLiteral(isolate()->factory()->false_value()); + result = factory()->NewLiteral(isolate()->factory()->false_value(), pos); break; case Token::IDENTIFIER: @@ -3589,8 +3571,7 @@ Expression* Parser::ParsePrimaryExpression(bool* ok) { PrintF("# Variable %s ", name->ToAsciiArray()); #endif Interface* interface = Interface::NewUnknown(zone()); - result = top_scope_->NewUnresolved( - factory(), name, interface, scanner().location().beg_pos); + result = top_scope_->NewUnresolved(factory(), name, interface, pos); break; } @@ -3601,14 +3582,14 @@ Expression* Parser::ParsePrimaryExpression(bool* ok) { scanner().literal_ascii_string(), ALLOW_HEX | ALLOW_OCTAL | ALLOW_IMPLICIT_OCTAL | ALLOW_BINARY); - result = factory()->NewNumberLiteral(value); + result = factory()->NewNumberLiteral(value, pos); break; } case Token::STRING: { Consume(Token::STRING); Handle<String> symbol = GetSymbol(); - result = factory()->NewLiteral(symbol); + result = factory()->NewLiteral(symbol, pos); if (fni_ != NULL) fni_->PushLiteralName(symbol); break; } @@ -3662,12 +3643,13 @@ Expression* Parser::ParseArrayLiteral(bool* ok) { // ArrayLiteral :: // '[' Expression? (',' Expression?)* ']' + int pos = peek_position(); ZoneList<Expression*>* values = new(zone()) ZoneList<Expression*>(4, zone()); Expect(Token::LBRACK, CHECK_OK); while (peek() != Token::RBRACK) { Expression* elem; if (peek() == Token::COMMA) { - elem = GetLiteralTheHole(); + elem = GetLiteralTheHole(peek_position()); } else { elem = ParseAssignmentExpression(true, CHECK_OK); } @@ -3729,7 +3711,7 @@ Expression* Parser::ParseArrayLiteral(bool* ok) { literals->set(1, *element_values); return factory()->NewArrayLiteral( - literals, values, literal_index, is_simple, depth); + literals, values, literal_index, is_simple, depth, pos); } @@ -3793,84 +3775,6 @@ Handle<Object> Parser::GetBoilerplateValue(Expression* expression) { } -// Validation per 11.1.5 Object Initialiser -class ObjectLiteralPropertyChecker { - public: - ObjectLiteralPropertyChecker(Parser* parser, LanguageMode language_mode) : - props_(Literal::Match), - parser_(parser), - language_mode_(language_mode) { - } - - void CheckProperty( - ObjectLiteral::Property* property, - Scanner::Location loc, - bool* ok); - - private: - enum PropertyKind { - kGetAccessor = 0x01, - kSetAccessor = 0x02, - kAccessor = kGetAccessor | kSetAccessor, - kData = 0x04 - }; - - static intptr_t GetPropertyKind(ObjectLiteral::Property* property) { - switch (property->kind()) { - case ObjectLiteral::Property::GETTER: - return kGetAccessor; - case ObjectLiteral::Property::SETTER: - return kSetAccessor; - default: - return kData; - } - } - - HashMap props_; - Parser* parser_; - LanguageMode language_mode_; -}; - - -void ObjectLiteralPropertyChecker::CheckProperty( - ObjectLiteral::Property* property, - Scanner::Location loc, - bool* ok) { - ASSERT(property != NULL); - Literal* literal = property->key(); - HashMap::Entry* entry = props_.Lookup(literal, literal->Hash(), true); - intptr_t prev = reinterpret_cast<intptr_t> (entry->value); - intptr_t curr = GetPropertyKind(property); - - // Duplicate data properties are illegal in strict or extended mode. - if (language_mode_ != CLASSIC_MODE && (curr & prev & kData) != 0) { - parser_->ReportMessageAt(loc, "strict_duplicate_property", - Vector<const char*>::empty()); - *ok = false; - return; - } - // Data property conflicting with an accessor. - if (((curr & kData) && (prev & kAccessor)) || - ((prev & kData) && (curr & kAccessor))) { - parser_->ReportMessageAt(loc, "accessor_data_property", - Vector<const char*>::empty()); - *ok = false; - return; - } - // Two accessors of the same type conflicting - if ((curr & prev & kAccessor) != 0) { - parser_->ReportMessageAt(loc, "accessor_get_set", - Vector<const char*>::empty()); - *ok = false; - return; - } - - // Update map - entry->value = reinterpret_cast<void*> (prev | curr); - *ok = true; -} - - void Parser::BuildObjectLiteralConstantProperties( ZoneList<ObjectLiteral::Property*>* properties, Handle<FixedArray> constant_properties, @@ -3943,41 +3847,6 @@ void Parser::BuildObjectLiteralConstantProperties( } -ObjectLiteral::Property* Parser::ParseObjectLiteralGetSet(bool is_getter, - bool* ok) { - // Special handling of getter and setter syntax: - // { ... , get foo() { ... }, ... , set foo(v) { ... v ... } , ... } - // We have already read the "get" or "set" keyword. - Token::Value next = Next(); - bool is_keyword = Token::IsKeyword(next); - if (next == Token::IDENTIFIER || next == Token::NUMBER || - next == Token::FUTURE_RESERVED_WORD || - next == Token::FUTURE_STRICT_RESERVED_WORD || - next == Token::STRING || is_keyword) { - Handle<String> name; - if (is_keyword) { - name = isolate_->factory()->InternalizeUtf8String(Token::String(next)); - } else { - name = GetSymbol(); - } - FunctionLiteral* value = - ParseFunctionLiteral(name, - false, // reserved words are allowed here - false, // not a generator - RelocInfo::kNoPosition, - FunctionLiteral::ANONYMOUS_EXPRESSION, - CHECK_OK); - // Allow any number of parameters for compatibilty with JSC. - // Specification only allows zero parameters for get and one for set. - return factory()->NewObjectLiteralProperty(is_getter, value); - } else { - ReportUnexpectedToken(next); - *ok = false; - return NULL; - } -} - - Expression* Parser::ParseObjectLiteral(bool* ok) { // ObjectLiteral :: // '{' ( @@ -3985,12 +3854,13 @@ Expression* Parser::ParseObjectLiteral(bool* ok) { // | (('get' | 'set') (IdentifierName | String | Number) FunctionLiteral) // )*[','] '}' + int pos = peek_position(); ZoneList<ObjectLiteral::Property*>* properties = new(zone()) ZoneList<ObjectLiteral::Property*>(4, zone()); int number_of_boilerplate_properties = 0; bool has_function = false; - ObjectLiteralPropertyChecker checker(this, top_scope_->language_mode()); + ObjectLiteralChecker checker(this, top_scope_->language_mode()); Expect(Token::LBRACE, CHECK_OK); @@ -3999,9 +3869,7 @@ Expression* Parser::ParseObjectLiteral(bool* ok) { Literal* key = NULL; Token::Value next = peek(); - - // Location of the property name token - Scanner::Location loc = scanner().peek_location(); + int next_pos = peek_position(); switch (next) { case Token::FUTURE_RESERVED_WORD: @@ -4014,27 +3882,54 @@ Expression* Parser::ParseObjectLiteral(bool* ok) { if (fni_ != NULL) fni_->PushLiteralName(id); if ((is_getter || is_setter) && peek() != Token::COLON) { - // Update loc to point to the identifier - loc = scanner().peek_location(); - ObjectLiteral::Property* property = - ParseObjectLiteralGetSet(is_getter, CHECK_OK); - if (IsBoilerplateProperty(property)) { - number_of_boilerplate_properties++; - } - // Validate the property. - checker.CheckProperty(property, loc, CHECK_OK); - properties->Add(property, zone()); - if (peek() != Token::RBRACE) Expect(Token::COMMA, CHECK_OK); - - if (fni_ != NULL) { - fni_->Infer(); - fni_->Leave(); - } - continue; // restart the while + // Special handling of getter and setter syntax: + // { ... , get foo() { ... }, ... , set foo(v) { ... v ... } , ... } + // We have already read the "get" or "set" keyword. + Token::Value next = Next(); + bool is_keyword = Token::IsKeyword(next); + if (next != i::Token::IDENTIFIER && + next != i::Token::FUTURE_RESERVED_WORD && + next != i::Token::FUTURE_STRICT_RESERVED_WORD && + next != i::Token::NUMBER && + next != i::Token::STRING && + !is_keyword) { + // Unexpected token. + ReportUnexpectedToken(next); + *ok = false; + return NULL; + } + // Validate the property. + PropertyKind type = is_getter ? kGetterProperty : kSetterProperty; + checker.CheckProperty(next, type, CHECK_OK); + Handle<String> name = is_keyword + ? isolate_->factory()->InternalizeUtf8String(Token::String(next)) + : GetSymbol(); + FunctionLiteral* value = + ParseFunctionLiteral(name, + false, // reserved words are allowed here + false, // not a generator + RelocInfo::kNoPosition, + FunctionLiteral::ANONYMOUS_EXPRESSION, + CHECK_OK); + // Allow any number of parameters for compatibilty with JSC. + // Specification only allows zero parameters for get and one for set. + ObjectLiteral::Property* property = + factory()->NewObjectLiteralProperty(is_getter, value, next_pos); + if (IsBoilerplateProperty(property)) { + number_of_boilerplate_properties++; + } + properties->Add(property, zone()); + if (peek() != Token::RBRACE) Expect(Token::COMMA, CHECK_OK); + + if (fni_ != NULL) { + fni_->Infer(); + fni_->Leave(); + } + continue; // restart the while } // Failed to parse as get/set property, so it's just a property // called "get" or "set". - key = factory()->NewLiteral(id); + key = factory()->NewLiteral(id, next_pos); break; } case Token::STRING: { @@ -4043,10 +3938,10 @@ Expression* Parser::ParseObjectLiteral(bool* ok) { if (fni_ != NULL) fni_->PushLiteralName(string); uint32_t index; if (!string.is_null() && string->AsArrayIndex(&index)) { - key = factory()->NewNumberLiteral(index); + key = factory()->NewNumberLiteral(index, next_pos); break; } - key = factory()->NewLiteral(string); + key = factory()->NewLiteral(string, next_pos); break; } case Token::NUMBER: { @@ -4056,14 +3951,14 @@ Expression* Parser::ParseObjectLiteral(bool* ok) { scanner().literal_ascii_string(), ALLOW_HEX | ALLOW_OCTAL | ALLOW_IMPLICIT_OCTAL | ALLOW_BINARY); - key = factory()->NewNumberLiteral(value); + key = factory()->NewNumberLiteral(value, next_pos); break; } default: if (Token::IsKeyword(next)) { Consume(next); Handle<String> string = GetSymbol(); - key = factory()->NewLiteral(string); + key = factory()->NewLiteral(string, next_pos); } else { // Unexpected token. Token::Value next = Next(); @@ -4073,6 +3968,9 @@ Expression* Parser::ParseObjectLiteral(bool* ok) { } } + // Validate the property + checker.CheckProperty(next, kValueProperty, CHECK_OK); + Expect(Token::COLON, CHECK_OK); Expression* value = ParseAssignmentExpression(true, CHECK_OK); @@ -4090,8 +3988,6 @@ Expression* Parser::ParseObjectLiteral(bool* ok) { // Count CONSTANT or COMPUTED properties to maintain the enumeration order. if (IsBoilerplateProperty(property)) number_of_boilerplate_properties++; - // Validate the property - checker.CheckProperty(property, loc, CHECK_OK); properties->Add(property, zone()); // TODO(1240767): Consider allowing trailing comma. @@ -4127,11 +4023,13 @@ Expression* Parser::ParseObjectLiteral(bool* ok) { fast_elements, depth, may_store_doubles, - has_function); + has_function, + pos); } Expression* Parser::ParseRegExpLiteral(bool seen_equal, bool* ok) { + int pos = peek_position(); if (!scanner().ScanRegExpPattern(seen_equal)) { Next(); ReportMessage("unterminated_regexp", Vector<const char*>::empty()); @@ -4146,7 +4044,7 @@ Expression* Parser::ParseRegExpLiteral(bool seen_equal, bool* ok) { Handle<String> js_flags = NextLiteralString(TENURED); Next(); - return factory()->NewRegExpLiteral(js_pattern, js_flags, literal_index); + return factory()->NewRegExpLiteral(js_pattern, js_flags, literal_index, pos); } @@ -4271,12 +4169,15 @@ FunctionLiteral* Parser::ParseFunctionLiteral( Handle<String> function_name, bool name_is_strict_reserved, bool is_generator, - int function_token_position, + int function_token_pos, FunctionLiteral::FunctionType function_type, bool* ok) { // Function :: // '(' FormalParameterList? ')' '{' FunctionBody '}' + int pos = function_token_pos == RelocInfo::kNoPosition + ? peek_position() : function_token_pos; + // Anonymous functions were passed either the empty symbol or a null // handle as the function name. Remember if we were passed a non-empty // handle to decide whether to invoke function name inference. @@ -4414,8 +4315,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral( function_name, fvar_mode, true /* is valid LHS */, Variable::NORMAL, kCreatedInitialized, Interface::NewConst()); VariableProxy* proxy = factory()->NewVariableProxy(fvar); - VariableDeclaration* fvar_declaration = - factory()->NewVariableDeclaration(proxy, fvar_mode, top_scope_); + VariableDeclaration* fvar_declaration = factory()->NewVariableDeclaration( + proxy, fvar_mode, top_scope_, RelocInfo::kNoPosition); top_scope_->DeclareFunctionVar(fvar_declaration); } @@ -4436,7 +4337,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral( parenthesized_function_ = false; // The bit was set for this function only. if (is_lazily_compiled) { - int function_block_pos = scanner().location().beg_pos; + int function_block_pos = position(); FunctionEntry entry; if (pre_parse_data_ != NULL) { // If we have pre_parse_data_, we use it to skip parsing the function @@ -4466,11 +4367,10 @@ FunctionLiteral* Parser::ParseFunctionLiteral( // building an AST. This gathers the data needed to build a lazy // function. SingletonLogger logger; - preparser::PreParser::PreParseResult result = - LazyParseFunctionLiteral(&logger); - if (result == preparser::PreParser::kPreParseStackOverflow) { + PreParser::PreParseResult result = LazyParseFunctionLiteral(&logger); + if (result == PreParser::kPreParseStackOverflow) { // Propagate stack overflow. - stack_overflow_ = true; + set_stack_overflow(); *ok = false; return NULL; } @@ -4505,9 +4405,9 @@ FunctionLiteral* Parser::ParseFunctionLiteral( body->Add(factory()->NewExpressionStatement( factory()->NewAssignment(fvar_init_op, fproxy, - factory()->NewThisFunction(), - RelocInfo::kNoPosition)), - zone()); + factory()->NewThisFunction(pos), + RelocInfo::kNoPosition), + RelocInfo::kNoPosition), zone()); } // For generators, allocate and yield an iterator on function entry. @@ -4517,7 +4417,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral( CallRuntime* allocation = factory()->NewCallRuntime( isolate()->factory()->empty_string(), Runtime::FunctionForId(Runtime::kCreateJSGeneratorObject), - arguments); + arguments, pos); VariableProxy* init_proxy = factory()->NewVariableProxy( current_function_state_->generator_object_variable()); Assignment* assignment = factory()->NewAssignment( @@ -4526,7 +4426,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral( current_function_state_->generator_object_variable()); Yield* yield = factory()->NewYield( get_proxy, assignment, Yield::INITIAL, RelocInfo::kNoPosition); - body->Add(factory()->NewExpressionStatement(yield), zone()); + body->Add(factory()->NewExpressionStatement( + yield, RelocInfo::kNoPosition), zone()); } ParseSourceElements(body, Token::RBRACE, false, false, CHECK_OK); @@ -4535,10 +4436,11 @@ FunctionLiteral* Parser::ParseFunctionLiteral( VariableProxy* get_proxy = factory()->NewVariableProxy( current_function_state_->generator_object_variable()); Expression *undefined = factory()->NewLiteral( - isolate()->factory()->undefined_value()); + isolate()->factory()->undefined_value(), RelocInfo::kNoPosition); Yield* yield = factory()->NewYield( get_proxy, undefined, Yield::FINAL, RelocInfo::kNoPosition); - body->Add(factory()->NewExpressionStatement(yield), zone()); + body->Add(factory()->NewExpressionStatement( + yield, RelocInfo::kNoPosition), zone()); } materialized_literal_count = function_state.materialized_literal_count(); @@ -4553,9 +4455,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral( if (!top_scope_->is_classic_mode()) { if (IsEvalOrArguments(function_name)) { int start_pos = scope->start_position(); - int position = function_token_position != RelocInfo::kNoPosition - ? function_token_position - : (start_pos > 0 ? start_pos - 1 : start_pos); + int position = function_token_pos != RelocInfo::kNoPosition + ? function_token_pos : (start_pos > 0 ? start_pos - 1 : start_pos); Scanner::Location location = Scanner::Location(position, start_pos); ReportMessageAt(location, "strict_function_name", Vector<const char*>::empty()); @@ -4576,9 +4477,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral( } if (name_is_strict_reserved) { int start_pos = scope->start_position(); - int position = function_token_position != RelocInfo::kNoPosition - ? function_token_position - : (start_pos > 0 ? start_pos - 1 : start_pos); + int position = function_token_pos != RelocInfo::kNoPosition + ? function_token_pos : (start_pos > 0 ? start_pos - 1 : start_pos); Scanner::Location location = Scanner::Location(position, start_pos); ReportMessageAt(location, "strict_reserved_word", Vector<const char*>::empty()); @@ -4615,8 +4515,9 @@ FunctionLiteral* Parser::ParseFunctionLiteral( function_type, FunctionLiteral::kIsFunction, parenthesized, - generator); - function_literal->set_function_token_position(function_token_position); + generator, + pos); + function_literal->set_function_token_position(function_token_pos); function_literal->set_ast_properties(&ast_properties); function_literal->set_dont_optimize_reason(dont_optimize_reason); @@ -4625,16 +4526,14 @@ FunctionLiteral* Parser::ParseFunctionLiteral( } -preparser::PreParser::PreParseResult Parser::LazyParseFunctionLiteral( +PreParser::PreParseResult Parser::LazyParseFunctionLiteral( SingletonLogger* logger) { HistogramTimerScope preparse_scope(isolate()->counters()->pre_parse()); ASSERT_EQ(Token::LBRACE, scanner().current_token()); if (reusable_preparser_ == NULL) { intptr_t stack_limit = isolate()->stack_guard()->real_climit(); - reusable_preparser_ = new preparser::PreParser(&scanner_, - NULL, - stack_limit); + reusable_preparser_ = new PreParser(&scanner_, NULL, stack_limit); reusable_preparser_->set_allow_harmony_scoping(allow_harmony_scoping()); reusable_preparser_->set_allow_modules(allow_modules()); reusable_preparser_->set_allow_natives_syntax(allow_natives_syntax()); @@ -4644,7 +4543,7 @@ preparser::PreParser::PreParseResult Parser::LazyParseFunctionLiteral( reusable_preparser_->set_allow_harmony_numeric_literals( allow_harmony_numeric_literals()); } - preparser::PreParser::PreParseResult result = + PreParser::PreParseResult result = reusable_preparser_->PreParseLazyFunction(top_scope_->language_mode(), is_generator(), logger); @@ -4656,6 +4555,7 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) { // CallRuntime :: // '%' Identifier Arguments + int pos = peek_position(); Expect(Token::MOD, CHECK_OK); Handle<String> name = ParseIdentifier(CHECK_OK); ZoneList<Expression*>* args = ParseArguments(CHECK_OK); @@ -4701,11 +4601,11 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) { } // We have a valid intrinsics call or a call to a builtin. - return factory()->NewCallRuntime(name, function, args); + return factory()->NewCallRuntime(name, function, args, pos); } -bool Parser::peek_any_identifier() { +bool ParserBase::peek_any_identifier() { Token::Value next = peek(); return next == Token::IDENTIFIER || next == Token::FUTURE_RESERVED_WORD || @@ -4714,35 +4614,9 @@ bool Parser::peek_any_identifier() { } -void Parser::Consume(Token::Value token) { - Token::Value next = Next(); - USE(next); - USE(token); - ASSERT(next == token); -} - - -void Parser::Expect(Token::Value token, bool* ok) { - Token::Value next = Next(); - if (next == token) return; - ReportUnexpectedToken(next); - *ok = false; -} - - -bool Parser::Check(Token::Value token) { - Token::Value next = peek(); - if (next == token) { - Consume(next); - return true; - } - return false; -} - - -bool Parser::CheckContextualKeyword(Vector<const char> keyword) { +bool ParserBase::CheckContextualKeyword(Vector<const char> keyword) { if (peek() == Token::IDENTIFIER && - scanner().is_next_contextual_keyword(keyword)) { + scanner()->is_next_contextual_keyword(keyword)) { Consume(Token::IDENTIFIER); return true; } @@ -4750,7 +4624,7 @@ bool Parser::CheckContextualKeyword(Vector<const char> keyword) { } -void Parser::ExpectSemicolon(bool* ok) { +void ParserBase::ExpectSemicolon(bool* ok) { // Check for automatic semicolon insertion according to // the rules given in ECMA-262, section 7.9, page 21. Token::Value tok = peek(); @@ -4758,7 +4632,7 @@ void Parser::ExpectSemicolon(bool* ok) { Next(); return; } - if (scanner().HasAnyLineTerminatorBeforeNext() || + if (scanner()->HasAnyLineTerminatorBeforeNext() || tok == Token::RBRACE || tok == Token::EOS) { return; @@ -4767,23 +4641,25 @@ void Parser::ExpectSemicolon(bool* ok) { } -void Parser::ExpectContextualKeyword(Vector<const char> keyword, bool* ok) { +void ParserBase::ExpectContextualKeyword(Vector<const char> keyword, bool* ok) { Expect(Token::IDENTIFIER, ok); if (!*ok) return; - if (!scanner().is_literal_contextual_keyword(keyword)) { + if (!scanner()->is_literal_contextual_keyword(keyword)) { + ReportUnexpectedToken(scanner()->current_token()); *ok = false; - ReportUnexpectedToken(scanner().current_token()); } } -Literal* Parser::GetLiteralUndefined() { - return factory()->NewLiteral(isolate()->factory()->undefined_value()); +Literal* Parser::GetLiteralUndefined(int position) { + return factory()->NewLiteral( + isolate()->factory()->undefined_value(), position); } -Literal* Parser::GetLiteralTheHole() { - return factory()->NewLiteral(isolate()->factory()->the_hole_value()); +Literal* Parser::GetLiteralTheHole(int position) { + return factory()->NewLiteral( + isolate()->factory()->the_hole_value(), RelocInfo::kNoPosition); } @@ -4865,14 +4741,11 @@ void Parser::CheckStrictModeLValue(Expression* expression, // Checks whether an octal literal was last seen between beg_pos and end_pos. // If so, reports an error. Only called for strict mode. -void Parser::CheckOctalLiteral(int beg_pos, int end_pos, bool* ok) { - Scanner::Location octal = scanner().octal_position(); - if (octal.IsValid() && - beg_pos <= octal.beg_pos && - octal.end_pos <= end_pos) { - ReportMessageAt(octal, "strict_octal_literal", - Vector<const char*>::empty()); - scanner().clear_octal_position(); +void ParserBase::CheckOctalLiteral(int beg_pos, int end_pos, bool* ok) { + Scanner::Location octal = scanner()->octal_position(); + if (octal.IsValid() && beg_pos <= octal.beg_pos && octal.end_pos <= end_pos) { + ReportMessageAt(octal, "strict_octal_literal"); + scanner()->clear_octal_position(); *ok = false; } } @@ -5012,12 +4885,13 @@ Expression* Parser::NewThrowError(Handle<String> constructor, Handle<JSArray> array = isolate()->factory()->NewJSArrayWithElements( elements, FAST_ELEMENTS, TENURED); + int pos = position(); ZoneList<Expression*>* args = new(zone()) ZoneList<Expression*>(2, zone()); - args->Add(factory()->NewLiteral(message), zone()); - args->Add(factory()->NewLiteral(array), zone()); + args->Add(factory()->NewLiteral(message, pos), zone()); + args->Add(factory()->NewLiteral(array, pos), zone()); CallRuntime* call_constructor = - factory()->NewCallRuntime(constructor, NULL, args); - return factory()->NewThrow(call_constructor, scanner().location().beg_pos); + factory()->NewCallRuntime(constructor, NULL, args, pos); + return factory()->NewThrow(call_constructor, pos); } @@ -5907,15 +5781,15 @@ ScriptDataImpl* PreParserApi::PreParse(Isolate* isolate, HistogramTimerScope timer(isolate->counters()->pre_parse()); Scanner scanner(isolate->unicode_cache()); intptr_t stack_limit = isolate->stack_guard()->real_climit(); - preparser::PreParser preparser(&scanner, &recorder, stack_limit); + PreParser preparser(&scanner, &recorder, stack_limit); preparser.set_allow_lazy(true); preparser.set_allow_generators(FLAG_harmony_generators); preparser.set_allow_for_of(FLAG_harmony_iteration); preparser.set_allow_harmony_scoping(FLAG_harmony_scoping); preparser.set_allow_harmony_numeric_literals(FLAG_harmony_numeric_literals); scanner.Initialize(source); - preparser::PreParser::PreParseResult result = preparser.PreParseProgram(); - if (result == preparser::PreParser::kPreParseStackOverflow) { + PreParser::PreParseResult result = preparser.PreParseProgram(); + if (result == PreParser::kPreParseStackOverflow) { isolate->StackOverflow(); return NULL; } diff --git a/deps/v8/src/parser.h b/deps/v8/src/parser.h index 783626ad19..79ce68b615 100644 --- a/deps/v8/src/parser.h +++ b/deps/v8/src/parser.h @@ -425,7 +425,7 @@ class RegExpParser BASE_EMBEDDED { // Forward declaration. class SingletonLogger; -class Parser BASE_EMBEDDED { +class Parser : public ParserBase { public: explicit Parser(CompilationInfo* info); ~Parser() { @@ -433,44 +433,12 @@ class Parser BASE_EMBEDDED { reusable_preparser_ = NULL; } - bool allow_natives_syntax() const { return allow_natives_syntax_; } - bool allow_lazy() const { return allow_lazy_; } - bool allow_modules() { return scanner().HarmonyModules(); } - bool allow_harmony_scoping() { return scanner().HarmonyScoping(); } - bool allow_generators() const { return allow_generators_; } - bool allow_for_of() const { return allow_for_of_; } - bool allow_harmony_numeric_literals() { - return scanner().HarmonyNumericLiterals(); - } - - void set_allow_natives_syntax(bool allow) { allow_natives_syntax_ = allow; } - void set_allow_lazy(bool allow) { allow_lazy_ = allow; } - void set_allow_modules(bool allow) { scanner().SetHarmonyModules(allow); } - void set_allow_harmony_scoping(bool allow) { - scanner().SetHarmonyScoping(allow); - } - void set_allow_generators(bool allow) { allow_generators_ = allow; } - void set_allow_for_of(bool allow) { allow_for_of_ = allow; } - void set_allow_harmony_numeric_literals(bool allow) { - scanner().SetHarmonyNumericLiterals(allow); - } - // Parses the source code represented by the compilation info and sets its // function literal. Returns false (and deallocates any allocated AST // nodes) if parsing failed. static bool Parse(CompilationInfo* info) { return Parser(info).Parse(); } bool Parse(); - // Returns NULL if parsing failed. - FunctionLiteral* ParseProgram(); - - void ReportMessageAt(Scanner::Location loc, - const char* message, - Vector<const char*> args); - void ReportMessageAt(Scanner::Location loc, - const char* message, - Vector<Handle<String> > args); - private: static const int kMaxNumFunctionLocals = 131071; // 2^17-1 @@ -568,6 +536,9 @@ class Parser BASE_EMBEDDED { Mode old_mode_; }; + // Returns NULL if parsing failed. + FunctionLiteral* ParseProgram(); + FunctionLiteral* ParseLazy(); FunctionLiteral* ParseLazy(Utf16CharacterStream* source); @@ -584,6 +555,15 @@ class Parser BASE_EMBEDDED { void ReportInvalidPreparseData(Handle<String> name, bool* ok); void ReportMessage(const char* message, Vector<const char*> args); void ReportMessage(const char* message, Vector<Handle<String> > args); + void ReportMessageAt(Scanner::Location location, const char* type) { + ReportMessageAt(location, type, Vector<const char*>::empty()); + } + void ReportMessageAt(Scanner::Location loc, + const char* message, + Vector<const char*> args); + void ReportMessageAt(Scanner::Location loc, + const char* message, + Vector<Handle<String> > args); void set_pre_parse_data(ScriptDataImpl *data) { pre_parse_data_ = data; @@ -671,7 +651,6 @@ class Parser BASE_EMBEDDED { Expression* ParsePrimaryExpression(bool* ok); Expression* ParseArrayLiteral(bool* ok); Expression* ParseObjectLiteral(bool* ok); - ObjectLiteral::Property* ParseObjectLiteralGetSet(bool is_getter, bool* ok); Expression* ParseRegExpLiteral(bool seen_equal, bool* ok); // Populate the constant properties fixed array for a materialized object @@ -711,40 +690,10 @@ class Parser BASE_EMBEDDED { // Magical syntax support. Expression* ParseV8Intrinsic(bool* ok); - INLINE(Token::Value peek()) { - if (stack_overflow_) return Token::ILLEGAL; - return scanner().peek(); - } - - INLINE(Token::Value Next()) { - // BUG 1215673: Find a thread safe way to set a stack limit in - // pre-parse mode. Otherwise, we cannot safely pre-parse from other - // threads. - if (stack_overflow_) { - return Token::ILLEGAL; - } - if (StackLimitCheck(isolate()).HasOverflowed()) { - // Any further calls to Next or peek will return the illegal token. - // The current call must return the next token, which might already - // have been peek'ed. - stack_overflow_ = true; - } - return scanner().Next(); - } - bool is_generator() const { return current_function_state_->is_generator(); } bool CheckInOrOf(bool accept_OF, ForEachStatement::VisitMode* visit_mode); - bool peek_any_identifier(); - - INLINE(void Consume(Token::Value token)); - void Expect(Token::Value token, bool* ok); - bool Check(Token::Value token); - void ExpectSemicolon(bool* ok); - bool CheckContextualKeyword(Vector<const char> keyword); - void ExpectContextualKeyword(Vector<const char> keyword, bool* ok); - Handle<String> LiteralString(PretenureFlag tenured) { if (scanner().is_literal_ascii()) { return isolate_->factory()->NewStringFromAscii( @@ -768,8 +717,8 @@ class Parser BASE_EMBEDDED { Handle<String> GetSymbol(); // Get odd-ball literals. - Literal* GetLiteralUndefined(); - Literal* GetLiteralTheHole(); + Literal* GetLiteralUndefined(int position); + Literal* GetLiteralTheHole(int position); Handle<String> ParseIdentifier(bool* ok); Handle<String> ParseIdentifierOrStrictReservedWord( @@ -789,9 +738,6 @@ class Parser BASE_EMBEDDED { const char* error, bool* ok); - // Strict mode octal literal validation. - void CheckOctalLiteral(int beg_pos, int end_pos, bool* ok); - // For harmony block scoping mode: Check if the scope has conflicting var/let // declarations from different scopes. It covers for example // @@ -842,7 +788,7 @@ class Parser BASE_EMBEDDED { Handle<String> type, Vector< Handle<Object> > arguments); - preparser::PreParser::PreParseResult LazyParseFunctionLiteral( + PreParser::PreParseResult LazyParseFunctionLiteral( SingletonLogger* logger); AstNodeFactory<AstConstructionVisitor>* factory() { @@ -854,7 +800,7 @@ class Parser BASE_EMBEDDED { Handle<Script> script_; Scanner scanner_; - preparser::PreParser* reusable_preparser_; + PreParser* reusable_preparser_; Scope* top_scope_; Scope* original_scope_; // for ES5 function declarations in sloppy eval FunctionState* current_function_state_; @@ -864,11 +810,6 @@ class Parser BASE_EMBEDDED { FuncNameInferrer* fni_; Mode mode_; - bool allow_natives_syntax_; - bool allow_lazy_; - bool allow_generators_; - bool allow_for_of_; - bool stack_overflow_; // If true, the next (and immediately following) function literal is // preceded by a parenthesis. // Heuristically that means that the function will be called immediately, diff --git a/deps/v8/src/platform-cygwin.cc b/deps/v8/src/platform-cygwin.cc index 4d3b1e313e..0076d567f8 100644 --- a/deps/v8/src/platform-cygwin.cc +++ b/deps/v8/src/platform-cygwin.cc @@ -41,7 +41,6 @@ #include "v8.h" -#include "platform-posix.h" #include "platform.h" #include "simulator.h" #include "v8threads.h" @@ -88,11 +87,6 @@ void* OS::Allocate(const size_t requested, } -void OS::DumpBacktrace() { - // Currently unsupported. -} - - class PosixMemoryMappedFile : public OS::MemoryMappedFile { public: PosixMemoryMappedFile(FILE* file, void* memory, int size) @@ -205,12 +199,6 @@ void OS::SignalCodeMovingGC() { } -int OS::StackWalk(Vector<OS::StackFrame> frames) { - // Not supported on Cygwin. - return 0; -} - - // The VirtualMemory implementation is taken from platform-win32.cc. // The mmap-based virtual memory implementation as it is used on most posix // platforms does not work well because Cygwin does not support MAP_FIXED. diff --git a/deps/v8/src/platform-freebsd.cc b/deps/v8/src/platform-freebsd.cc index d81827805a..103fd6ce05 100644 --- a/deps/v8/src/platform-freebsd.cc +++ b/deps/v8/src/platform-freebsd.cc @@ -43,7 +43,6 @@ #include <sys/fcntl.h> // open #include <unistd.h> // getpagesize // If you don't have execinfo.h then you need devel/libexecinfo from ports. -#include <execinfo.h> // backtrace, backtrace_symbols #include <strings.h> // index #include <errno.h> #include <stdarg.h> @@ -54,7 +53,6 @@ #include "v8.h" #include "v8threads.h" -#include "platform-posix.h" #include "platform.h" #include "vm-state-inl.h" @@ -97,11 +95,6 @@ void* OS::Allocate(const size_t requested, } -void OS::DumpBacktrace() { - POSIXBacktraceHelper<backtrace, backtrace_symbols>::DumpBacktrace(); -} - - class PosixMemoryMappedFile : public OS::MemoryMappedFile { public: PosixMemoryMappedFile(FILE* file, void* memory, int size) @@ -199,10 +192,6 @@ void OS::SignalCodeMovingGC() { } -int OS::StackWalk(Vector<OS::StackFrame> frames) { - return POSIXBacktraceHelper<backtrace, backtrace_symbols>::StackWalk(frames); -} - // Constants used for mmap. static const int kMmapFd = -1; diff --git a/deps/v8/src/platform-linux.cc b/deps/v8/src/platform-linux.cc index b8b96025e1..eb2d10b3f9 100644 --- a/deps/v8/src/platform-linux.cc +++ b/deps/v8/src/platform-linux.cc @@ -38,11 +38,6 @@ #include <sys/types.h> #include <stdlib.h> -#if defined(__GLIBC__) && !defined(__UCLIBC__) -#include <execinfo.h> -#include <cxxabi.h> -#endif - // Ubuntu Dapper requires memory pages to be marked as // executable. Otherwise, OS raises an exception when executing code // in that page. @@ -66,7 +61,6 @@ #include "v8.h" -#include "platform-posix.h" #include "platform.h" #include "v8threads.h" #include "vm-state-inl.h" @@ -154,14 +148,6 @@ void* OS::Allocate(const size_t requested, } -void OS::DumpBacktrace() { - // backtrace is a glibc extension. -#if defined(__GLIBC__) && !defined(__UCLIBC__) - POSIXBacktraceHelper<backtrace, backtrace_symbols>::DumpBacktrace(); -#endif -} - - class PosixMemoryMappedFile : public OS::MemoryMappedFile { public: PosixMemoryMappedFile(FILE* file, void* memory, int size) @@ -313,16 +299,6 @@ void OS::SignalCodeMovingGC() { } -int OS::StackWalk(Vector<OS::StackFrame> frames) { - // backtrace is a glibc extension. -#if defined(__GLIBC__) && !defined(__UCLIBC__) - return POSIXBacktraceHelper<backtrace, backtrace_symbols>::StackWalk(frames); -#else - return 0; -#endif -} - - // Constants used for mmap. static const int kMmapFd = -1; static const int kMmapFdOffset = 0; diff --git a/deps/v8/src/platform-macos.cc b/deps/v8/src/platform-macos.cc index 67cc96f937..5ffc3fc54c 100644 --- a/deps/v8/src/platform-macos.cc +++ b/deps/v8/src/platform-macos.cc @@ -53,27 +53,15 @@ #include <stdlib.h> #include <string.h> #include <errno.h> -#include <cxxabi.h> #undef MAP_TYPE #include "v8.h" -#include "platform-posix.h" #include "platform.h" #include "simulator.h" #include "vm-state-inl.h" -// Manually define these here as weak imports, rather than including execinfo.h. -// This lets us launch on 10.4 which does not have these calls. -extern "C" { - extern int backtrace(void**, int) __attribute__((weak_import)); - extern char** backtrace_symbols(void* const*, int) - __attribute__((weak_import)); - extern void backtrace_symbols_fd(void* const*, int, int) - __attribute__((weak_import)); -} - namespace v8 { namespace internal { @@ -107,14 +95,6 @@ void* OS::Allocate(const size_t requested, } -void OS::DumpBacktrace() { - // If weak link to execinfo lib has failed, ie because we are on 10.4, abort. - if (backtrace == NULL) return; - - POSIXBacktraceHelper<backtrace, backtrace_symbols>::DumpBacktrace(); -} - - class PosixMemoryMappedFile : public OS::MemoryMappedFile { public: PosixMemoryMappedFile(FILE* file, void* memory, int size) @@ -220,14 +200,6 @@ double OS::LocalTimeOffset() { } -int OS::StackWalk(Vector<StackFrame> frames) { - // If weak link to execinfo lib has failed, ie because we are on 10.4, abort. - if (backtrace == NULL) return 0; - - return POSIXBacktraceHelper<backtrace, backtrace_symbols>::StackWalk(frames); -} - - VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } diff --git a/deps/v8/src/platform-openbsd.cc b/deps/v8/src/platform-openbsd.cc index 30a484f4b3..710c3904af 100644 --- a/deps/v8/src/platform-openbsd.cc +++ b/deps/v8/src/platform-openbsd.cc @@ -42,7 +42,6 @@ #include <sys/stat.h> // open #include <fcntl.h> // open #include <unistd.h> // sysconf -#include <execinfo.h> // backtrace, backtrace_symbols #include <strings.h> // index #include <errno.h> #include <stdarg.h> @@ -51,7 +50,6 @@ #include "v8.h" -#include "platform-posix.h" #include "platform.h" #include "v8threads.h" #include "vm-state-inl.h" @@ -96,11 +94,6 @@ void* OS::Allocate(const size_t requested, } -void OS::DumpBacktrace() { - // Currently unsupported. -} - - class PosixMemoryMappedFile : public OS::MemoryMappedFile { public: PosixMemoryMappedFile(FILE* file, void* memory, int size) @@ -231,34 +224,6 @@ void OS::SignalCodeMovingGC() { } -int OS::StackWalk(Vector<OS::StackFrame> frames) { - // backtrace is a glibc extension. - int frames_size = frames.length(); - ScopedVector<void*> addresses(frames_size); - - int frames_count = backtrace(addresses.start(), frames_size); - - char** symbols = backtrace_symbols(addresses.start(), frames_count); - if (symbols == NULL) { - return kStackWalkError; - } - - for (int i = 0; i < frames_count; i++) { - frames[i].address = addresses[i]; - // Format a text representation of the frame based on the information - // available. - SNPrintF(MutableCStrVector(frames[i].text, kStackWalkMaxTextLen), - "%s", - symbols[i]); - // Make sure line termination is in place. - frames[i].text[kStackWalkMaxTextLen - 1] = '\0'; - } - - free(symbols); - - return frames_count; -} - // Constants used for mmap. static const int kMmapFd = -1; diff --git a/deps/v8/src/platform-posix.cc b/deps/v8/src/platform-posix.cc index fe27eaf71f..797557d76f 100644 --- a/deps/v8/src/platform-posix.cc +++ b/deps/v8/src/platform-posix.cc @@ -29,8 +29,6 @@ // own but contains the parts which are the same across POSIX platforms Linux, // Mac OS, FreeBSD and OpenBSD. -#include "platform-posix.h" - #include <dlfcn.h> #include <pthread.h> #if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__) @@ -102,6 +100,48 @@ intptr_t OS::MaxVirtualMemory() { } +uint64_t OS::TotalPhysicalMemory() { +#if V8_OS_MACOSX + int mib[2]; + mib[0] = CTL_HW; + mib[1] = HW_MEMSIZE; + int64_t size = 0; + size_t len = sizeof(size); + if (sysctl(mib, 2, &size, &len, NULL, 0) != 0) { + UNREACHABLE(); + return 0; + } + return static_cast<uint64_t>(size); +#elif V8_OS_FREEBSD + int pages, page_size; + size_t size = sizeof(pages); + sysctlbyname("vm.stats.vm.v_page_count", &pages, &size, NULL, 0); + sysctlbyname("vm.stats.vm.v_page_size", &page_size, &size, NULL, 0); + if (pages == -1 || page_size == -1) { + UNREACHABLE(); + return 0; + } + return static_cast<uint64_t>(pages) * page_size; +#elif V8_OS_CYGWIN + MEMORYSTATUS memory_info; + memory_info.dwLength = sizeof(memory_info); + if (!GlobalMemoryStatus(&memory_info)) { + UNREACHABLE(); + return 0; + } + return static_cast<uint64_t>(memory_info.dwTotalPhys); +#else + intptr_t pages = sysconf(_SC_PHYS_PAGES); + intptr_t page_size = sysconf(_SC_PAGESIZE); + if (pages == -1 || page_size == -1) { + UNREACHABLE(); + return 0; + } + return static_cast<uint64_t>(pages) * page_size; +#endif +} + + int OS::ActivationFrameAlignment() { #if V8_TARGET_ARCH_ARM // On EABI ARM targets this is required for fp correctness in the diff --git a/deps/v8/src/platform-posix.h b/deps/v8/src/platform-posix.h deleted file mode 100644 index 6b73387cd7..0000000000 --- a/deps/v8/src/platform-posix.h +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef V8_PLATFORM_POSIX_H_ -#define V8_PLATFORM_POSIX_H_ - -#if !defined(ANDROID) -#include <cxxabi.h> -#endif -#include <stdio.h> - -#include "platform.h" - -namespace v8 { -namespace internal { - -// Used by platform implementation files during OS::DumpBacktrace() -// and OS::StackWalk(). -template<int (*backtrace)(void**, int), - char** (*backtrace_symbols)(void* const*, int)> -struct POSIXBacktraceHelper { - static void DumpBacktrace() { - void* trace[100]; - int size = backtrace(trace, ARRAY_SIZE(trace)); - char** symbols = backtrace_symbols(trace, size); - fprintf(stderr, "\n==== C stack trace ===============================\n\n"); - if (size == 0) { - fprintf(stderr, "(empty)\n"); - } else if (symbols == NULL) { - fprintf(stderr, "(no symbols)\n"); - } else { - for (int i = 1; i < size; ++i) { - fprintf(stderr, "%2d: ", i); - char mangled[201]; - if (sscanf(symbols[i], "%*[^(]%*[(]%200[^)+]", mangled) == 1) {// NOLINT - char* demangled = NULL; -#if !defined(ANDROID) - int status; - size_t length; - demangled = abi::__cxa_demangle(mangled, NULL, &length, &status); -#endif - fprintf(stderr, "%s\n", demangled != NULL ? demangled : mangled); - free(demangled); - } else { - fprintf(stderr, "??\n"); - } - } - } - fflush(stderr); - free(symbols); - } - - static int StackWalk(Vector<OS::StackFrame> frames) { - int frames_size = frames.length(); - ScopedVector<void*> addresses(frames_size); - - int frames_count = backtrace(addresses.start(), frames_size); - - char** symbols = backtrace_symbols(addresses.start(), frames_count); - if (symbols == NULL) { - return OS::kStackWalkError; - } - - for (int i = 0; i < frames_count; i++) { - frames[i].address = addresses[i]; - // Format a text representation of the frame based on the information - // available. - OS::SNPrintF(MutableCStrVector(frames[i].text, OS::kStackWalkMaxTextLen), - "%s", symbols[i]); - // Make sure line termination is in place. - frames[i].text[OS::kStackWalkMaxTextLen - 1] = '\0'; - } - - free(symbols); - - return frames_count; - } -}; - -} } // namespace v8::internal - -#endif // V8_PLATFORM_POSIX_H_ diff --git a/deps/v8/src/platform-solaris.cc b/deps/v8/src/platform-solaris.cc index f082af1254..a0590cbecb 100644 --- a/deps/v8/src/platform-solaris.cc +++ b/deps/v8/src/platform-solaris.cc @@ -51,7 +51,6 @@ #include "v8.h" -#include "platform-posix.h" #include "platform.h" #include "v8threads.h" #include "vm-state-inl.h" @@ -112,11 +111,6 @@ void* OS::Allocate(const size_t requested, } -void OS::DumpBacktrace() { - // Currently unsupported. -} - - class PosixMemoryMappedFile : public OS::MemoryMappedFile { public: PosixMemoryMappedFile(FILE* file, void* memory, int size) @@ -211,20 +205,6 @@ static int StackWalkCallback(uintptr_t pc, int signo, void* data) { } -int OS::StackWalk(Vector<OS::StackFrame> frames) { - ucontext_t ctx; - struct StackWalker walker = { frames, 0 }; - - if (getcontext(&ctx) < 0) return kStackWalkError; - - if (!walkcontext(&ctx, StackWalkCallback, &walker)) { - return kStackWalkError; - } - - return walker.index; -} - - // Constants used for mmap. static const int kMmapFd = -1; static const int kMmapFdOffset = 0; diff --git a/deps/v8/src/platform-win32.cc b/deps/v8/src/platform-win32.cc index ea4f7ea11f..35411bfdad 100644 --- a/deps/v8/src/platform-win32.cc +++ b/deps/v8/src/platform-win32.cc @@ -240,12 +240,16 @@ void MathSetup() { class Win32Time { public: // Constructors. + Win32Time(); explicit Win32Time(double jstime); Win32Time(int year, int mon, int day, int hour, int min, int sec); // Convert timestamp to JavaScript representation. double ToJSTime(); + // Set timestamp to current time. + void SetToCurrentTime(); + // Returns the local timezone offset in milliseconds east of UTC. This is // the number of milliseconds you must add to UTC to get local time, i.e. // LocalOffset(CET) = 3600000 and LocalOffset(PST) = -28800000. This @@ -314,6 +318,12 @@ char Win32Time::std_tz_name_[kTzNameSize]; char Win32Time::dst_tz_name_[kTzNameSize]; +// Initialize timestamp to start of epoc. +Win32Time::Win32Time() { + t() = 0; +} + + // Initialize timestamp from a JavaScript timestamp. Win32Time::Win32Time(double jstime) { t() = static_cast<int64_t>(jstime) * kTimeScaler + kTimeEpoc; @@ -340,6 +350,62 @@ double Win32Time::ToJSTime() { } +// Set timestamp to current time. +void Win32Time::SetToCurrentTime() { + // The default GetSystemTimeAsFileTime has a ~15.5ms resolution. + // Because we're fast, we like fast timers which have at least a + // 1ms resolution. + // + // timeGetTime() provides 1ms granularity when combined with + // timeBeginPeriod(). If the host application for v8 wants fast + // timers, it can use timeBeginPeriod to increase the resolution. + // + // Using timeGetTime() has a drawback because it is a 32bit value + // and hence rolls-over every ~49days. + // + // To use the clock, we use GetSystemTimeAsFileTime as our base; + // and then use timeGetTime to extrapolate current time from the + // start time. To deal with rollovers, we resync the clock + // any time when more than kMaxClockElapsedTime has passed or + // whenever timeGetTime creates a rollover. + + static bool initialized = false; + static TimeStamp init_time; + static DWORD init_ticks; + static const int64_t kHundredNanosecondsPerSecond = 10000000; + static const int64_t kMaxClockElapsedTime = + 60*kHundredNanosecondsPerSecond; // 1 minute + + // If we are uninitialized, we need to resync the clock. + bool needs_resync = !initialized; + + // Get the current time. + TimeStamp time_now; + GetSystemTimeAsFileTime(&time_now.ft_); + DWORD ticks_now = timeGetTime(); + + // Check if we need to resync due to clock rollover. + needs_resync |= ticks_now < init_ticks; + + // Check if we need to resync due to elapsed time. + needs_resync |= (time_now.t_ - init_time.t_) > kMaxClockElapsedTime; + + // Check if we need to resync due to backwards time change. + needs_resync |= time_now.t_ < init_time.t_; + + // Resync the clock if necessary. + if (needs_resync) { + GetSystemTimeAsFileTime(&init_time.ft_); + init_ticks = ticks_now = timeGetTime(); + initialized = true; + } + + // Finally, compute the actual time. Why is this so hard. + DWORD elapsed = ticks_now - init_ticks; + this->time_.t_ = init_time.t_ + (static_cast<int64_t>(elapsed) * 10000); +} + + // Guess the name of the timezone from the bias. // The guess is very biased towards the northern hemisphere. const char* Win32Time::GuessTimezoneNameFromBias(int bias) { @@ -891,11 +957,6 @@ void OS::DebugBreak() { } -void OS::DumpBacktrace() { - // Currently unsupported. -} - - class Win32MemoryMappedFile : public OS::MemoryMappedFile { public: Win32MemoryMappedFile(HANDLE file, @@ -1208,133 +1269,21 @@ void OS::SignalCodeMovingGC() { } -// Walk the stack using the facilities in dbghelp.dll and tlhelp32.dll - -// Switch off warning 4748 (/GS can not protect parameters and local variables -// from local buffer overrun because optimizations are disabled in function) as -// it is triggered by the use of inline assembler. -#pragma warning(push) -#pragma warning(disable : 4748) -int OS::StackWalk(Vector<OS::StackFrame> frames) { - BOOL ok; - - // Load the required functions from DLL's. - if (!LoadDbgHelpAndTlHelp32()) return kStackWalkError; - - // Get the process and thread handles. - HANDLE process_handle = GetCurrentProcess(); - HANDLE thread_handle = GetCurrentThread(); - - // Read the symbols. - if (!LoadSymbols(Isolate::Current(), process_handle)) return kStackWalkError; - - // Capture current context. - CONTEXT context; - RtlCaptureContext(&context); - - // Initialize the stack walking - STACKFRAME64 stack_frame; - memset(&stack_frame, 0, sizeof(stack_frame)); -#ifdef _WIN64 - stack_frame.AddrPC.Offset = context.Rip; - stack_frame.AddrFrame.Offset = context.Rbp; - stack_frame.AddrStack.Offset = context.Rsp; -#else - stack_frame.AddrPC.Offset = context.Eip; - stack_frame.AddrFrame.Offset = context.Ebp; - stack_frame.AddrStack.Offset = context.Esp; -#endif - stack_frame.AddrPC.Mode = AddrModeFlat; - stack_frame.AddrFrame.Mode = AddrModeFlat; - stack_frame.AddrStack.Mode = AddrModeFlat; - int frames_count = 0; - - // Collect stack frames. - int frames_size = frames.length(); - while (frames_count < frames_size) { - ok = _StackWalk64( - IMAGE_FILE_MACHINE_I386, // MachineType - process_handle, // hProcess - thread_handle, // hThread - &stack_frame, // StackFrame - &context, // ContextRecord - NULL, // ReadMemoryRoutine - _SymFunctionTableAccess64, // FunctionTableAccessRoutine - _SymGetModuleBase64, // GetModuleBaseRoutine - NULL); // TranslateAddress - if (!ok) break; - - // Store the address. - ASSERT((stack_frame.AddrPC.Offset >> 32) == 0); // 32-bit address. - frames[frames_count].address = - reinterpret_cast<void*>(stack_frame.AddrPC.Offset); - - // Try to locate a symbol for this frame. - DWORD64 symbol_displacement; - SmartArrayPointer<IMAGEHLP_SYMBOL64> symbol( - NewArray<IMAGEHLP_SYMBOL64>(kStackWalkMaxNameLen)); - if (symbol.is_empty()) return kStackWalkError; // Out of memory. - memset(*symbol, 0, sizeof(IMAGEHLP_SYMBOL64) + kStackWalkMaxNameLen); - (*symbol)->SizeOfStruct = sizeof(IMAGEHLP_SYMBOL64); - (*symbol)->MaxNameLength = kStackWalkMaxNameLen; - ok = _SymGetSymFromAddr64(process_handle, // hProcess - stack_frame.AddrPC.Offset, // Address - &symbol_displacement, // Displacement - *symbol); // Symbol - if (ok) { - // Try to locate more source information for the symbol. - IMAGEHLP_LINE64 Line; - memset(&Line, 0, sizeof(Line)); - Line.SizeOfStruct = sizeof(Line); - DWORD line_displacement; - ok = _SymGetLineFromAddr64( - process_handle, // hProcess - stack_frame.AddrPC.Offset, // dwAddr - &line_displacement, // pdwDisplacement - &Line); // Line - // Format a text representation of the frame based on the information - // available. - if (ok) { - SNPrintF(MutableCStrVector(frames[frames_count].text, - kStackWalkMaxTextLen), - "%s %s:%d:%d", - (*symbol)->Name, Line.FileName, Line.LineNumber, - line_displacement); - } else { - SNPrintF(MutableCStrVector(frames[frames_count].text, - kStackWalkMaxTextLen), - "%s", - (*symbol)->Name); - } - // Make sure line termination is in place. - frames[frames_count].text[kStackWalkMaxTextLen - 1] = '\0'; - } else { - // No text representation of this frame - frames[frames_count].text[0] = '\0'; - - // Continue if we are just missing a module (for non C/C++ frames a - // module will never be found). - int err = GetLastError(); - if (err != ERROR_MOD_NOT_FOUND) { - break; - } - } - - frames_count++; +uint64_t OS::TotalPhysicalMemory() { + MEMORYSTATUSEX memory_info; + memory_info.dwLength = sizeof(memory_info); + if (!GlobalMemoryStatusEx(&memory_info)) { + UNREACHABLE(); + return 0; } - // Return the number of frames filled in. - return frames_count; + return static_cast<uint64_t>(memory_info.ullTotalPhys); } -// Restore warnings to previous settings. -#pragma warning(pop) - #else // __MINGW32__ void OS::LogSharedLibraryAddresses(Isolate* isolate) { } void OS::SignalCodeMovingGC() { } -int OS::StackWalk(Vector<OS::StackFrame> frames) { return 0; } #endif // __MINGW32__ diff --git a/deps/v8/src/platform.h b/deps/v8/src/platform.h index ee8fb92910..8e524aeaf0 100644 --- a/deps/v8/src/platform.h +++ b/deps/v8/src/platform.h @@ -67,6 +67,8 @@ int signbit(double x); int strncasecmp(const char* s1, const char* s2, int n); +// Visual C++ 2013 and higher implement this function. +#if (_MSC_VER < 1800) inline int lrint(double flt) { int intgr; #if V8_TARGET_ARCH_IA32 @@ -84,6 +86,8 @@ inline int lrint(double flt) { return intgr; } +#endif // _MSC_VER < 1800 + #endif // V8_CC_MSVC namespace v8 { @@ -252,9 +256,6 @@ class OS { // Debug break. static void DebugBreak(); - // Dump C++ current stack trace (only functional on Linux). - static void DumpBacktrace(); - // Walk the stack. static const int kStackWalkError = -1; static const int kStackWalkMaxNameLen = 256; @@ -264,8 +265,6 @@ class OS { char text[kStackWalkMaxTextLen]; }; - static int StackWalk(Vector<StackFrame> frames); - class MemoryMappedFile { public: static MemoryMappedFile* open(const char* name); @@ -303,6 +302,9 @@ class OS { // positions indicated by the members of the CpuFeature enum from globals.h static uint64_t CpuFeaturesImpliedByPlatform(); + // The total amount of physical memory available on the current system. + static uint64_t TotalPhysicalMemory(); + // Maximum size of the virtual memory. 0 means there is no artificial // limit. static intptr_t MaxVirtualMemory(); diff --git a/deps/v8/src/platform/elapsed-timer.h b/deps/v8/src/platform/elapsed-timer.h index 2311db2f52..b61b007605 100644 --- a/deps/v8/src/platform/elapsed-timer.h +++ b/deps/v8/src/platform/elapsed-timer.h @@ -28,8 +28,8 @@ #ifndef V8_PLATFORM_ELAPSED_TIMER_H_ #define V8_PLATFORM_ELAPSED_TIMER_H_ -#include "checks.h" -#include "platform/time.h" +#include "../checks.h" +#include "time.h" namespace v8 { namespace internal { @@ -104,7 +104,7 @@ class ElapsedTimer V8_FINAL BASE_EMBEDDED { private: static V8_INLINE TimeTicks Now() { - TimeTicks now = TimeTicks::HighResNow(); + TimeTicks now = TimeTicks::HighResolutionNow(); ASSERT(!now.IsNull()); return now; } diff --git a/deps/v8/src/platform/mutex.h b/deps/v8/src/platform/mutex.h index 0f899ca597..125e9d4860 100644 --- a/deps/v8/src/platform/mutex.h +++ b/deps/v8/src/platform/mutex.h @@ -28,9 +28,9 @@ #ifndef V8_PLATFORM_MUTEX_H_ #define V8_PLATFORM_MUTEX_H_ -#include "lazy-instance.h" +#include "../lazy-instance.h" #if V8_OS_WIN -#include "win32-headers.h" +#include "../win32-headers.h" #endif #if V8_OS_POSIX diff --git a/deps/v8/src/platform/semaphore.h b/deps/v8/src/platform/semaphore.h index 2cfa142111..0babe5fd65 100644 --- a/deps/v8/src/platform/semaphore.h +++ b/deps/v8/src/platform/semaphore.h @@ -28,9 +28,9 @@ #ifndef V8_PLATFORM_SEMAPHORE_H_ #define V8_PLATFORM_SEMAPHORE_H_ -#include "lazy-instance.h" +#include "../lazy-instance.h" #if V8_OS_WIN -#include "win32-headers.h" +#include "../win32-headers.h" #endif #if V8_OS_MACOSX diff --git a/deps/v8/src/platform/time.cc b/deps/v8/src/platform/time.cc index ea6dd2c0ba..de0ca16473 100644 --- a/deps/v8/src/platform/time.cc +++ b/deps/v8/src/platform/time.cc @@ -43,13 +43,6 @@ #include "win32-headers.h" #endif -#if V8_OS_WIN -// Prototype for GetTickCount64() procedure. -extern "C" { -typedef ULONGLONG (WINAPI *GETTICKCOUNT64PROC)(void); -} -#endif - namespace v8 { namespace internal { @@ -175,43 +168,43 @@ struct timespec TimeDelta::ToTimespec() const { // periodically resync the internal clock to the system clock. class Clock V8_FINAL { public: - Clock() : initial_time_(CurrentWallclockTime()), - initial_ticks_(TimeTicks::Now()) {} + Clock() : initial_ticks_(GetSystemTicks()), initial_time_(GetSystemTime()) {} Time Now() { - // This must be executed under lock. - LockGuard<Mutex> lock_guard(&mutex_); + // Time between resampling the un-granular clock for this API (1 minute). + const TimeDelta kMaxElapsedTime = TimeDelta::FromMinutes(1); - // Calculate the time elapsed since we started our timer. - TimeDelta elapsed = TimeTicks::Now() - initial_ticks_; + LockGuard<Mutex> lock_guard(&mutex_); - // Check if we don't need to synchronize with the wallclock yet. - if (elapsed.InMicroseconds() <= kMaxMicrosecondsToAvoidDrift) { - return initial_time_ + elapsed; + // Determine current time and ticks. + TimeTicks ticks = GetSystemTicks(); + Time time = GetSystemTime(); + + // Check if we need to synchronize with the system clock due to a backwards + // time change or the amount of time elapsed. + TimeDelta elapsed = ticks - initial_ticks_; + if (time < initial_time_ || elapsed > kMaxElapsedTime) { + initial_ticks_ = ticks; + initial_time_ = time; + return time; } - // Resynchronize with the wallclock. - initial_ticks_ = TimeTicks::Now(); - initial_time_ = CurrentWallclockTime(); - return initial_time_; + return initial_time_ + elapsed; } Time NowFromSystemTime() { - // This must be executed under lock. LockGuard<Mutex> lock_guard(&mutex_); - - // Resynchronize with the wallclock. - initial_ticks_ = TimeTicks::Now(); - initial_time_ = CurrentWallclockTime(); + initial_ticks_ = GetSystemTicks(); + initial_time_ = GetSystemTime(); return initial_time_; } private: - // Time between resampling the un-granular clock for this API (1 minute). - static const int64_t kMaxMicrosecondsToAvoidDrift = - Time::kMicrosecondsPerMinute; + static TimeTicks GetSystemTicks() { + return TimeTicks::Now(); + } - static Time CurrentWallclockTime() { + static Time GetSystemTime() { FILETIME ft; ::GetSystemTimeAsFileTime(&ft); return Time::FromFiletime(ft); @@ -223,9 +216,9 @@ class Clock V8_FINAL { }; -static LazyDynamicInstance<Clock, - DefaultCreateTrait<Clock>, - ThreadSafeInitOnceTrait>::type clock = LAZY_DYNAMIC_INSTANCE_INITIALIZER; +static LazyStaticInstance<Clock, + DefaultConstructTrait<Clock>, + ThreadSafeInitOnceTrait>::type clock = LAZY_STATIC_INSTANCE_INITIALIZER; Time Time::Now() { @@ -388,6 +381,7 @@ class TickClock { public: virtual ~TickClock() {} virtual int64_t Now() = 0; + virtual bool IsHighResolution() = 0; }; @@ -440,42 +434,24 @@ class HighResolutionTickClock V8_FINAL : public TickClock { int64_t ticks = (whole_seconds * Time::kMicrosecondsPerSecond) + ((leftover_ticks * Time::kMicrosecondsPerSecond) / ticks_per_second_); - // Make sure we never return 0 here, so that TimeTicks::HighResNow() + // Make sure we never return 0 here, so that TimeTicks::HighResolutionNow() // will never return 0. return ticks + 1; } - private: - int64_t ticks_per_second_; -}; - - -// The GetTickCount64() API is what we actually want for the regular tick -// clock, but this is only available starting with Windows Vista. -class WindowsVistaTickClock V8_FINAL : public TickClock { - public: - explicit WindowsVistaTickClock(GETTICKCOUNT64PROC func) : func_(func) { - ASSERT(func_ != NULL); - } - virtual ~WindowsVistaTickClock() {} - - virtual int64_t Now() V8_OVERRIDE { - // Query the current ticks (in ms). - ULONGLONG tick_count_ms = (*func_)(); - - // Convert to microseconds (make sure to never return 0 here). - return (tick_count_ms * Time::kMicrosecondsPerMillisecond) + 1; + virtual bool IsHighResolution() V8_OVERRIDE { + return true; } private: - GETTICKCOUNT64PROC func_; + int64_t ticks_per_second_; }; class RolloverProtectedTickClock V8_FINAL : public TickClock { public: // We initialize rollover_ms_ to 1 to ensure that we will never - // return 0 from TimeTicks::HighResNow() and TimeTicks::Now() below. + // return 0 from TimeTicks::HighResolutionNow() and TimeTicks::Now() below. RolloverProtectedTickClock() : last_seen_now_(0), rollover_ms_(1) {} virtual ~RolloverProtectedTickClock() {} @@ -487,6 +463,9 @@ class RolloverProtectedTickClock V8_FINAL : public TickClock { // Note that we do not use GetTickCount() here, since timeGetTime() gives // more predictable delta values, as described here: // http://blogs.msdn.com/b/larryosterman/archive/2009/09/02/what-s-the-difference-between-gettickcount-and-timegettime.aspx + // timeGetTime() provides 1ms granularity when combined with + // timeBeginPeriod(). If the host application for V8 wants fast timers, it + // can use timeBeginPeriod() to increase the resolution. DWORD now = timeGetTime(); if (now < last_seen_now_) { rollover_ms_ += V8_INT64_C(0x100000000); // ~49.7 days. @@ -495,6 +474,10 @@ class RolloverProtectedTickClock V8_FINAL : public TickClock { return (now + rollover_ms_) * Time::kMicrosecondsPerMillisecond; } + virtual bool IsHighResolution() V8_OVERRIDE { + return false; + } + private: Mutex mutex_; DWORD last_seen_now_; @@ -502,27 +485,10 @@ class RolloverProtectedTickClock V8_FINAL : public TickClock { }; -struct CreateTickClockTrait { - static TickClock* Create() { - // Try to load GetTickCount64() from kernel32.dll (available since Vista). - HMODULE kernel32 = ::GetModuleHandleA("kernel32.dll"); - ASSERT(kernel32 != NULL); - FARPROC proc = ::GetProcAddress(kernel32, "GetTickCount64"); - if (proc != NULL) { - return new WindowsVistaTickClock( - reinterpret_cast<GETTICKCOUNT64PROC>(proc)); - } - - // Fallback to the rollover protected tick clock. - return new RolloverProtectedTickClock; - } -}; - - -static LazyDynamicInstance<TickClock, - CreateTickClockTrait, +static LazyStaticInstance<RolloverProtectedTickClock, + DefaultConstructTrait<RolloverProtectedTickClock>, ThreadSafeInitOnceTrait>::type tick_clock = - LAZY_DYNAMIC_INSTANCE_INITIALIZER; + LAZY_STATIC_INSTANCE_INITIALIZER; struct CreateHighResTickClockTrait { @@ -560,21 +526,27 @@ TimeTicks TimeTicks::Now() { } -TimeTicks TimeTicks::HighResNow() { +TimeTicks TimeTicks::HighResolutionNow() { // Make sure we never return 0 here. TimeTicks ticks(high_res_tick_clock.Pointer()->Now()); ASSERT(!ticks.IsNull()); return ticks; } + +// static +bool TimeTicks::IsHighResolutionClockWorking() { + return high_res_tick_clock.Pointer()->IsHighResolution(); +} + #else // V8_OS_WIN TimeTicks TimeTicks::Now() { - return HighResNow(); + return HighResolutionNow(); } -TimeTicks TimeTicks::HighResNow() { +TimeTicks TimeTicks::HighResolutionNow() { int64_t ticks; #if V8_OS_MACOSX static struct mach_timebase_info info; @@ -608,6 +580,12 @@ TimeTicks TimeTicks::HighResNow() { return TimeTicks(ticks + 1); } + +// static +bool TimeTicks::IsHighResolutionClockWorking() { + return true; +} + #endif // V8_OS_WIN } } // namespace v8::internal diff --git a/deps/v8/src/platform/time.h b/deps/v8/src/platform/time.h index 2ce6cdd3e9..877e0203bb 100644 --- a/deps/v8/src/platform/time.h +++ b/deps/v8/src/platform/time.h @@ -31,7 +31,7 @@ #include <ctime> #include <limits> -#include "allocation.h" +#include "../allocation.h" // Forward declarations. extern "C" { @@ -333,7 +333,10 @@ class TimeTicks V8_FINAL BASE_EMBEDDED { // resolution. THIS CALL IS GENERALLY MUCH MORE EXPENSIVE THAN Now() AND // SHOULD ONLY BE USED WHEN IT IS REALLY NEEDED. // This method never returns a null TimeTicks. - static TimeTicks HighResNow(); + static TimeTicks HighResolutionNow(); + + // Returns true if the high-resolution clock is working on this system. + static bool IsHighResolutionClockWorking(); // Returns true if this object has not been initialized. bool IsNull() const { return ticks_ == 0; } diff --git a/deps/v8/src/preparser-api.cc b/deps/v8/src/preparser-api.cc deleted file mode 100644 index 462dfe2290..0000000000 --- a/deps/v8/src/preparser-api.cc +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifdef _MSC_VER -#define V8_WIN32_LEAN_AND_MEAN -#include "win32-headers.h" -#endif - -#include "../include/v8-preparser.h" - -#include "globals.h" -#include "checks.h" -#include "allocation.h" -#include "utils.h" -#include "list.h" -#include "hashmap.h" -#include "preparse-data-format.h" -#include "preparse-data.h" -#include "preparser.h" - -namespace v8 { -namespace internal { - -// UTF16Buffer based on a v8::UnicodeInputStream. -class InputStreamUtf16Buffer : public Utf16CharacterStream { - public: - /* The InputStreamUtf16Buffer maintains an internal buffer - * that is filled in chunks from the Utf16CharacterStream. - * It also maintains unlimited pushback capability, but optimized - * for small pushbacks. - * The pushback_buffer_ pointer points to the limit of pushbacks - * in the current buffer. There is room for a few pushback'ed chars before - * the buffer containing the most recently read chunk. If this is overflowed, - * an external buffer is allocated/reused to hold further pushbacks, and - * pushback_buffer_ and buffer_cursor_/buffer_end_ now points to the - * new buffer. When this buffer is read to the end again, the cursor is - * switched back to the internal buffer - */ - explicit InputStreamUtf16Buffer(v8::UnicodeInputStream* stream) - : Utf16CharacterStream(), - stream_(stream), - pushback_buffer_(buffer_), - pushback_buffer_end_cache_(NULL), - pushback_buffer_backing_(NULL), - pushback_buffer_backing_size_(0) { - buffer_cursor_ = buffer_end_ = buffer_ + kPushBackSize; - } - - virtual ~InputStreamUtf16Buffer() { - if (pushback_buffer_backing_ != NULL) { - DeleteArray(pushback_buffer_backing_); - } - } - - virtual void PushBack(uc32 ch) { - ASSERT(pos_ > 0); - if (ch == kEndOfInput) { - pos_--; - return; - } - if (buffer_cursor_ <= pushback_buffer_) { - // No more room in the current buffer to do pushbacks. - if (pushback_buffer_end_cache_ == NULL) { - // We have overflowed the pushback space at the beginning of buffer_. - // Switch to using a separate allocated pushback buffer. - if (pushback_buffer_backing_ == NULL) { - // Allocate a buffer the first time we need it. - pushback_buffer_backing_ = NewArray<uc16>(kPushBackSize); - pushback_buffer_backing_size_ = kPushBackSize; - } - pushback_buffer_ = pushback_buffer_backing_; - pushback_buffer_end_cache_ = buffer_end_; - buffer_end_ = pushback_buffer_backing_ + pushback_buffer_backing_size_; - buffer_cursor_ = buffer_end_ - 1; - } else { - // Hit the bottom of the allocated pushback buffer. - // Double the buffer and continue. - uc16* new_buffer = NewArray<uc16>(pushback_buffer_backing_size_ * 2); - OS::MemCopy(new_buffer + pushback_buffer_backing_size_, - pushback_buffer_backing_, - pushback_buffer_backing_size_); - DeleteArray(pushback_buffer_backing_); - buffer_cursor_ = new_buffer + pushback_buffer_backing_size_; - pushback_buffer_backing_ = pushback_buffer_ = new_buffer; - buffer_end_ = pushback_buffer_backing_ + pushback_buffer_backing_size_; - } - } - pushback_buffer_[buffer_cursor_ - pushback_buffer_- 1] = - static_cast<uc16>(ch); - pos_--; - } - - protected: - virtual bool ReadBlock() { - if (pushback_buffer_end_cache_ != NULL) { - buffer_cursor_ = buffer_; - buffer_end_ = pushback_buffer_end_cache_; - pushback_buffer_end_cache_ = NULL; - return buffer_end_ > buffer_cursor_; - } - // Copy the top of the buffer into the pushback area. - int32_t value; - uc16* buffer_start = buffer_ + kPushBackSize; - buffer_cursor_ = buffer_end_ = buffer_start; - while ((value = stream_->Next()) >= 0) { - if (value > - static_cast<int32_t>(unibrow::Utf16::kMaxNonSurrogateCharCode)) { - buffer_start[buffer_end_++ - buffer_start] = - unibrow::Utf16::LeadSurrogate(value); - buffer_start[buffer_end_++ - buffer_start] = - unibrow::Utf16::TrailSurrogate(value); - } else { - // buffer_end_ is a const pointer, but buffer_ is writable. - buffer_start[buffer_end_++ - buffer_start] = static_cast<uc16>(value); - } - // Stop one before the end of the buffer in case we get a surrogate pair. - if (buffer_end_ <= buffer_ + 1 + kPushBackSize + kBufferSize) break; - } - return buffer_end_ > buffer_start; - } - - virtual unsigned SlowSeekForward(unsigned pos) { - // Seeking in the input is not used by preparsing. - // It's only used by the real parser based on preparser data. - UNIMPLEMENTED(); - return 0; - } - - private: - static const unsigned kBufferSize = 512; - static const unsigned kPushBackSize = 16; - v8::UnicodeInputStream* const stream_; - // Buffer holding first kPushBackSize characters of pushback buffer, - // then kBufferSize chars of read-ahead. - // The pushback buffer is only used if pushing back characters past - // the start of a block. - uc16 buffer_[kPushBackSize + kBufferSize]; - // Limit of pushbacks before new allocation is necessary. - uc16* pushback_buffer_; - // Only if that pushback buffer at the start of buffer_ isn't sufficient - // is the following used. - const uc16* pushback_buffer_end_cache_; - uc16* pushback_buffer_backing_; - unsigned pushback_buffer_backing_size_; -}; - -} // namespace internal. - - -UnicodeInputStream::~UnicodeInputStream() { } - - -PreParserData Preparse(UnicodeInputStream* input, size_t max_stack) { - internal::InputStreamUtf16Buffer buffer(input); - uintptr_t stack_limit = reinterpret_cast<uintptr_t>(&buffer) - max_stack; - internal::UnicodeCache unicode_cache; - internal::Scanner scanner(&unicode_cache); - scanner.Initialize(&buffer); - internal::CompleteParserRecorder recorder; - preparser::PreParser preparser(&scanner, &recorder, stack_limit); - preparser.set_allow_lazy(true); - preparser::PreParser::PreParseResult result = preparser.PreParseProgram(); - if (result == preparser::PreParser::kPreParseStackOverflow) { - return PreParserData::StackOverflow(); - } - internal::Vector<unsigned> pre_data = recorder.ExtractData(); - size_t size = pre_data.length() * sizeof(pre_data[0]); - unsigned char* data = reinterpret_cast<unsigned char*>(pre_data.start()); - return PreParserData(size, data); -} - -} // namespace v8. diff --git a/deps/v8/src/preparser.cc b/deps/v8/src/preparser.cc index 36a94a3315..a87c434558 100644 --- a/deps/v8/src/preparser.cc +++ b/deps/v8/src/preparser.cc @@ -42,10 +42,10 @@ #include "unicode.h" #include "utils.h" -#ifdef _MSC_VER +#if V8_CC_MSVC && (_MSC_VER < 1800) namespace std { -// Usually defined in math.h, but not in MSVC. +// Usually defined in math.h, but not in MSVC until VS2013+. // Abstracted to work int isfinite(double value); @@ -53,28 +53,27 @@ int isfinite(double value); #endif namespace v8 { - -namespace preparser { +namespace internal { PreParser::PreParseResult PreParser::PreParseLazyFunction( - i::LanguageMode mode, bool is_generator, i::ParserRecorder* log) { + LanguageMode mode, bool is_generator, ParserRecorder* log) { log_ = log; // Lazy functions always have trivial outer scopes (no with/catch scopes). Scope top_scope(&scope_, kTopLevelScope); set_language_mode(mode); Scope function_scope(&scope_, kFunctionScope); function_scope.set_is_generator(is_generator); - ASSERT_EQ(i::Token::LBRACE, scanner_->current_token()); + ASSERT_EQ(Token::LBRACE, scanner()->current_token()); bool ok = true; - int start_position = scanner_->peek_location().beg_pos; + int start_position = peek_position(); ParseLazyFunctionLiteralBody(&ok); - if (stack_overflow_) return kPreParseStackOverflow; + if (stack_overflow()) return kPreParseStackOverflow; if (!ok) { - ReportUnexpectedToken(scanner_->current_token()); + ReportUnexpectedToken(scanner()->current_token()); } else { - ASSERT_EQ(i::Token::RBRACE, scanner_->peek()); + ASSERT_EQ(Token::RBRACE, scanner()->peek()); if (!is_classic_mode()) { - int end_pos = scanner_->location().end_pos; + int end_pos = scanner()->location().end_pos; CheckOctalLiteral(start_position, end_pos, &ok); if (ok) { CheckDelayedStrictModeViolation(start_position, end_pos, &ok); @@ -98,50 +97,38 @@ PreParser::PreParseResult PreParser::PreParseLazyFunction( // That means that contextual checks (like a label being declared where // it is used) are generally omitted. -void PreParser::ReportUnexpectedToken(i::Token::Value token) { +void PreParser::ReportUnexpectedToken(Token::Value token) { // We don't report stack overflows here, to avoid increasing the // stack depth even further. Instead we report it after parsing is // over, in ParseProgram. - if (token == i::Token::ILLEGAL && stack_overflow_) { + if (token == Token::ILLEGAL && stack_overflow()) { return; } - i::Scanner::Location source_location = scanner_->location(); + Scanner::Location source_location = scanner()->location(); // Four of the tokens are treated specially switch (token) { - case i::Token::EOS: + case Token::EOS: return ReportMessageAt(source_location, "unexpected_eos", NULL); - case i::Token::NUMBER: + case Token::NUMBER: return ReportMessageAt(source_location, "unexpected_token_number", NULL); - case i::Token::STRING: + case Token::STRING: return ReportMessageAt(source_location, "unexpected_token_string", NULL); - case i::Token::IDENTIFIER: + case Token::IDENTIFIER: return ReportMessageAt(source_location, "unexpected_token_identifier", NULL); - case i::Token::FUTURE_RESERVED_WORD: + case Token::FUTURE_RESERVED_WORD: return ReportMessageAt(source_location, "unexpected_reserved", NULL); - case i::Token::FUTURE_STRICT_RESERVED_WORD: + case Token::FUTURE_STRICT_RESERVED_WORD: return ReportMessageAt(source_location, "unexpected_strict_reserved", NULL); default: - const char* name = i::Token::String(token); + const char* name = Token::String(token); ReportMessageAt(source_location, "unexpected_token", name); } } -// Checks whether octal literal last seen is between beg_pos and end_pos. -// If so, reports an error. -void PreParser::CheckOctalLiteral(int beg_pos, int end_pos, bool* ok) { - i::Scanner::Location octal = scanner_->octal_position(); - if (beg_pos <= octal.beg_pos && octal.end_pos <= end_pos) { - ReportMessageAt(octal, "strict_octal_literal", NULL); - scanner_->clear_octal_position(); - *ok = false; - } -} - - #define CHECK_OK ok); \ if (!*ok) return kUnknownSourceElements; \ ((void)0 @@ -162,10 +149,10 @@ PreParser::Statement PreParser::ParseSourceElement(bool* ok) { // GeneratorDeclaration switch (peek()) { - case i::Token::FUNCTION: + case Token::FUNCTION: return ParseFunctionDeclaration(ok); - case i::Token::LET: - case i::Token::CONST: + case Token::LET: + case Token::CONST: return ParseVariableStatement(kSourceElement, ok); default: return ParseStatement(ok); @@ -184,7 +171,7 @@ PreParser::SourceElements PreParser::ParseSourceElements(int end_token, if (allow_directive_prologue) { if (statement.IsUseStrictLiteral()) { set_language_mode(allow_harmony_scoping() ? - i::EXTENDED_MODE : i::STRICT_MODE); + EXTENDED_MODE : STRICT_MODE); } else if (!statement.IsStringLiteral()) { allow_directive_prologue = false; } @@ -229,55 +216,55 @@ PreParser::Statement PreParser::ParseStatement(bool* ok) { // Keep the source position of the statement switch (peek()) { - case i::Token::LBRACE: + case Token::LBRACE: return ParseBlock(ok); - case i::Token::CONST: - case i::Token::LET: - case i::Token::VAR: + case Token::CONST: + case Token::LET: + case Token::VAR: return ParseVariableStatement(kStatement, ok); - case i::Token::SEMICOLON: + case Token::SEMICOLON: Next(); return Statement::Default(); - case i::Token::IF: + case Token::IF: return ParseIfStatement(ok); - case i::Token::DO: + case Token::DO: return ParseDoWhileStatement(ok); - case i::Token::WHILE: + case Token::WHILE: return ParseWhileStatement(ok); - case i::Token::FOR: + case Token::FOR: return ParseForStatement(ok); - case i::Token::CONTINUE: + case Token::CONTINUE: return ParseContinueStatement(ok); - case i::Token::BREAK: + case Token::BREAK: return ParseBreakStatement(ok); - case i::Token::RETURN: + case Token::RETURN: return ParseReturnStatement(ok); - case i::Token::WITH: + case Token::WITH: return ParseWithStatement(ok); - case i::Token::SWITCH: + case Token::SWITCH: return ParseSwitchStatement(ok); - case i::Token::THROW: + case Token::THROW: return ParseThrowStatement(ok); - case i::Token::TRY: + case Token::TRY: return ParseTryStatement(ok); - case i::Token::FUNCTION: { - i::Scanner::Location start_location = scanner_->peek_location(); + case Token::FUNCTION: { + Scanner::Location start_location = scanner()->peek_location(); Statement statement = ParseFunctionDeclaration(CHECK_OK); - i::Scanner::Location end_location = scanner_->location(); + Scanner::Location end_location = scanner()->location(); if (!is_classic_mode()) { ReportMessageAt(start_location.beg_pos, end_location.end_pos, "strict_function", NULL); @@ -288,7 +275,7 @@ PreParser::Statement PreParser::ParseStatement(bool* ok) { } } - case i::Token::DEBUGGER: + case Token::DEBUGGER: return ParseDebuggerStatement(ok); default: @@ -303,11 +290,11 @@ PreParser::Statement PreParser::ParseFunctionDeclaration(bool* ok) { // GeneratorDeclaration :: // 'function' '*' Identifier '(' FormalParameterListopt ')' // '{' FunctionBody '}' - Expect(i::Token::FUNCTION, CHECK_OK); + Expect(Token::FUNCTION, CHECK_OK); - bool is_generator = allow_generators_ && Check(i::Token::MUL); + bool is_generator = allow_generators() && Check(Token::MUL); Identifier identifier = ParseIdentifier(CHECK_OK); - i::Scanner::Location location = scanner_->location(); + Scanner::Location location = scanner()->location(); Expression function_value = ParseFunctionLiteral(is_generator, CHECK_OK); @@ -333,15 +320,15 @@ PreParser::Statement PreParser::ParseBlock(bool* ok) { // Note that a Block does not introduce a new execution scope! // (ECMA-262, 3rd, 12.2) // - Expect(i::Token::LBRACE, CHECK_OK); - while (peek() != i::Token::RBRACE) { + Expect(Token::LBRACE, CHECK_OK); + while (peek() != Token::RBRACE) { if (is_extended_mode()) { ParseSourceElement(CHECK_OK); } else { ParseStatement(CHECK_OK); } } - Expect(i::Token::RBRACE, ok); + Expect(Token::RBRACE, ok); return Statement::Default(); } @@ -385,9 +372,9 @@ PreParser::Statement PreParser::ParseVariableDeclarations( // ConstBinding :: // BindingPattern '=' AssignmentExpression bool require_initializer = false; - if (peek() == i::Token::VAR) { - Consume(i::Token::VAR); - } else if (peek() == i::Token::CONST) { + if (peek() == Token::VAR) { + Consume(Token::VAR); + } else if (peek() == Token::CONST) { // TODO(ES6): The ES6 Draft Rev4 section 12.2.2 reads: // // ConstDeclaration : const ConstBinding (',' ConstBinding)* ';' @@ -398,20 +385,20 @@ PreParser::Statement PreParser::ParseVariableDeclarations( // However disallowing const in classic mode will break compatibility with // existing pages. Therefore we keep allowing const with the old // non-harmony semantics in classic mode. - Consume(i::Token::CONST); + Consume(Token::CONST); switch (language_mode()) { - case i::CLASSIC_MODE: + case CLASSIC_MODE: break; - case i::STRICT_MODE: { - i::Scanner::Location location = scanner_->peek_location(); + case STRICT_MODE: { + Scanner::Location location = scanner()->peek_location(); ReportMessageAt(location, "strict_const", NULL); *ok = false; return Statement::Default(); } - case i::EXTENDED_MODE: + case EXTENDED_MODE: if (var_context != kSourceElement && var_context != kForStatement) { - i::Scanner::Location location = scanner_->peek_location(); + Scanner::Location location = scanner()->peek_location(); ReportMessageAt(location.beg_pos, location.end_pos, "unprotected_const", NULL); *ok = false; @@ -420,7 +407,7 @@ PreParser::Statement PreParser::ParseVariableDeclarations( require_initializer = true; break; } - } else if (peek() == i::Token::LET) { + } else if (peek() == Token::LET) { // ES6 Draft Rev4 section 12.2.1: // // LetDeclaration : let LetBindingList ; @@ -428,16 +415,16 @@ PreParser::Statement PreParser::ParseVariableDeclarations( // * It is a Syntax Error if the code that matches this production is not // contained in extended code. if (!is_extended_mode()) { - i::Scanner::Location location = scanner_->peek_location(); + Scanner::Location location = scanner()->peek_location(); ReportMessageAt(location.beg_pos, location.end_pos, "illegal_let", NULL); *ok = false; return Statement::Default(); } - Consume(i::Token::LET); + Consume(Token::LET); if (var_context != kSourceElement && var_context != kForStatement) { - i::Scanner::Location location = scanner_->peek_location(); + Scanner::Location location = scanner()->peek_location(); ReportMessageAt(location.beg_pos, location.end_pos, "unprotected_let", NULL); *ok = false; @@ -455,22 +442,22 @@ PreParser::Statement PreParser::ParseVariableDeclarations( int nvars = 0; // the number of variables declared do { // Parse variable name. - if (nvars > 0) Consume(i::Token::COMMA); + if (nvars > 0) Consume(Token::COMMA); Identifier identifier = ParseIdentifier(CHECK_OK); if (!is_classic_mode() && !identifier.IsValidStrictVariable()) { - StrictModeIdentifierViolation(scanner_->location(), + StrictModeIdentifierViolation(scanner()->location(), "strict_var_name", identifier, ok); return Statement::Default(); } nvars++; - if (peek() == i::Token::ASSIGN || require_initializer) { - Expect(i::Token::ASSIGN, CHECK_OK); + if (peek() == Token::ASSIGN || require_initializer) { + Expect(Token::ASSIGN, CHECK_OK); ParseAssignmentExpression(var_context != kForStatement, CHECK_OK); if (decl_props != NULL) *decl_props = kHasInitializers; } - } while (peek() == i::Token::COMMA); + } while (peek() == Token::COMMA); if (num_decl != NULL) *num_decl = nvars; return Statement::Default(); @@ -488,8 +475,8 @@ PreParser::Statement PreParser::ParseExpressionOrLabelledStatement(bool* ok) { ASSERT(is_classic_mode() || (!expr.AsIdentifier().IsFutureStrictReserved() && !expr.AsIdentifier().IsYield())); - if (peek() == i::Token::COLON) { - Consume(i::Token::COLON); + if (peek() == Token::COLON) { + Consume(Token::COLON); return ParseStatement(ok); } // Preparsing is disabled for extensions (because the extension details @@ -506,12 +493,12 @@ PreParser::Statement PreParser::ParseIfStatement(bool* ok) { // IfStatement :: // 'if' '(' Expression ')' Statement ('else' Statement)? - Expect(i::Token::IF, CHECK_OK); - Expect(i::Token::LPAREN, CHECK_OK); + Expect(Token::IF, CHECK_OK); + Expect(Token::LPAREN, CHECK_OK); ParseExpression(true, CHECK_OK); - Expect(i::Token::RPAREN, CHECK_OK); + Expect(Token::RPAREN, CHECK_OK); ParseStatement(CHECK_OK); - if (peek() == i::Token::ELSE) { + if (peek() == Token::ELSE) { Next(); ParseStatement(CHECK_OK); } @@ -523,12 +510,12 @@ PreParser::Statement PreParser::ParseContinueStatement(bool* ok) { // ContinueStatement :: // 'continue' [no line terminator] Identifier? ';' - Expect(i::Token::CONTINUE, CHECK_OK); - i::Token::Value tok = peek(); - if (!scanner_->HasAnyLineTerminatorBeforeNext() && - tok != i::Token::SEMICOLON && - tok != i::Token::RBRACE && - tok != i::Token::EOS) { + Expect(Token::CONTINUE, CHECK_OK); + Token::Value tok = peek(); + if (!scanner()->HasAnyLineTerminatorBeforeNext() && + tok != Token::SEMICOLON && + tok != Token::RBRACE && + tok != Token::EOS) { ParseIdentifier(CHECK_OK); } ExpectSemicolon(CHECK_OK); @@ -540,12 +527,12 @@ PreParser::Statement PreParser::ParseBreakStatement(bool* ok) { // BreakStatement :: // 'break' [no line terminator] Identifier? ';' - Expect(i::Token::BREAK, CHECK_OK); - i::Token::Value tok = peek(); - if (!scanner_->HasAnyLineTerminatorBeforeNext() && - tok != i::Token::SEMICOLON && - tok != i::Token::RBRACE && - tok != i::Token::EOS) { + Expect(Token::BREAK, CHECK_OK); + Token::Value tok = peek(); + if (!scanner()->HasAnyLineTerminatorBeforeNext() && + tok != Token::SEMICOLON && + tok != Token::RBRACE && + tok != Token::EOS) { ParseIdentifier(CHECK_OK); } ExpectSemicolon(CHECK_OK); @@ -560,18 +547,18 @@ PreParser::Statement PreParser::ParseReturnStatement(bool* ok) { // Consume the return token. It is necessary to do the before // reporting any errors on it, because of the way errors are // reported (underlining). - Expect(i::Token::RETURN, CHECK_OK); + Expect(Token::RETURN, CHECK_OK); // An ECMAScript program is considered syntactically incorrect if it // contains a return statement that is not within the body of a // function. See ECMA-262, section 12.9, page 67. // This is not handled during preparsing. - i::Token::Value tok = peek(); - if (!scanner_->HasAnyLineTerminatorBeforeNext() && - tok != i::Token::SEMICOLON && - tok != i::Token::RBRACE && - tok != i::Token::EOS) { + Token::Value tok = peek(); + if (!scanner()->HasAnyLineTerminatorBeforeNext() && + tok != Token::SEMICOLON && + tok != Token::RBRACE && + tok != Token::EOS) { ParseExpression(true, CHECK_OK); } ExpectSemicolon(CHECK_OK); @@ -582,16 +569,16 @@ PreParser::Statement PreParser::ParseReturnStatement(bool* ok) { PreParser::Statement PreParser::ParseWithStatement(bool* ok) { // WithStatement :: // 'with' '(' Expression ')' Statement - Expect(i::Token::WITH, CHECK_OK); + Expect(Token::WITH, CHECK_OK); if (!is_classic_mode()) { - i::Scanner::Location location = scanner_->location(); + Scanner::Location location = scanner()->location(); ReportMessageAt(location, "strict_mode_with", NULL); *ok = false; return Statement::Default(); } - Expect(i::Token::LPAREN, CHECK_OK); + Expect(Token::LPAREN, CHECK_OK); ParseExpression(true, CHECK_OK); - Expect(i::Token::RPAREN, CHECK_OK); + Expect(Token::RPAREN, CHECK_OK); Scope::InsideWith iw(scope_); ParseStatement(CHECK_OK); @@ -603,30 +590,30 @@ PreParser::Statement PreParser::ParseSwitchStatement(bool* ok) { // SwitchStatement :: // 'switch' '(' Expression ')' '{' CaseClause* '}' - Expect(i::Token::SWITCH, CHECK_OK); - Expect(i::Token::LPAREN, CHECK_OK); + Expect(Token::SWITCH, CHECK_OK); + Expect(Token::LPAREN, CHECK_OK); ParseExpression(true, CHECK_OK); - Expect(i::Token::RPAREN, CHECK_OK); + Expect(Token::RPAREN, CHECK_OK); - Expect(i::Token::LBRACE, CHECK_OK); - i::Token::Value token = peek(); - while (token != i::Token::RBRACE) { - if (token == i::Token::CASE) { - Expect(i::Token::CASE, CHECK_OK); + Expect(Token::LBRACE, CHECK_OK); + Token::Value token = peek(); + while (token != Token::RBRACE) { + if (token == Token::CASE) { + Expect(Token::CASE, CHECK_OK); ParseExpression(true, CHECK_OK); } else { - Expect(i::Token::DEFAULT, CHECK_OK); + Expect(Token::DEFAULT, CHECK_OK); } - Expect(i::Token::COLON, CHECK_OK); + Expect(Token::COLON, CHECK_OK); token = peek(); - while (token != i::Token::CASE && - token != i::Token::DEFAULT && - token != i::Token::RBRACE) { + while (token != Token::CASE && + token != Token::DEFAULT && + token != Token::RBRACE) { ParseStatement(CHECK_OK); token = peek(); } } - Expect(i::Token::RBRACE, ok); + Expect(Token::RBRACE, ok); return Statement::Default(); } @@ -635,13 +622,13 @@ PreParser::Statement PreParser::ParseDoWhileStatement(bool* ok) { // DoStatement :: // 'do' Statement 'while' '(' Expression ')' ';' - Expect(i::Token::DO, CHECK_OK); + Expect(Token::DO, CHECK_OK); ParseStatement(CHECK_OK); - Expect(i::Token::WHILE, CHECK_OK); - Expect(i::Token::LPAREN, CHECK_OK); + Expect(Token::WHILE, CHECK_OK); + Expect(Token::LPAREN, CHECK_OK); ParseExpression(true, CHECK_OK); - Expect(i::Token::RPAREN, ok); - if (peek() == i::Token::SEMICOLON) Consume(i::Token::SEMICOLON); + Expect(Token::RPAREN, ok); + if (peek() == Token::SEMICOLON) Consume(Token::SEMICOLON); return Statement::Default(); } @@ -650,20 +637,19 @@ PreParser::Statement PreParser::ParseWhileStatement(bool* ok) { // WhileStatement :: // 'while' '(' Expression ')' Statement - Expect(i::Token::WHILE, CHECK_OK); - Expect(i::Token::LPAREN, CHECK_OK); + Expect(Token::WHILE, CHECK_OK); + Expect(Token::LPAREN, CHECK_OK); ParseExpression(true, CHECK_OK); - Expect(i::Token::RPAREN, CHECK_OK); + Expect(Token::RPAREN, CHECK_OK); ParseStatement(ok); return Statement::Default(); } bool PreParser::CheckInOrOf(bool accept_OF) { - if (peek() == i::Token::IN || - (allow_for_of() && accept_OF && peek() == i::Token::IDENTIFIER && - scanner_->is_next_contextual_keyword(v8::internal::CStrVector("of")))) { - Next(); + if (Check(Token::IN) || + (allow_for_of() && accept_OF && + CheckContextualKeyword(CStrVector("of")))) { return true; } return false; @@ -674,12 +660,12 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) { // ForStatement :: // 'for' '(' Expression? ';' Expression? ';' Expression? ')' Statement - Expect(i::Token::FOR, CHECK_OK); - Expect(i::Token::LPAREN, CHECK_OK); - if (peek() != i::Token::SEMICOLON) { - if (peek() == i::Token::VAR || peek() == i::Token::CONST || - peek() == i::Token::LET) { - bool is_let = peek() == i::Token::LET; + Expect(Token::FOR, CHECK_OK); + Expect(Token::LPAREN, CHECK_OK); + if (peek() != Token::SEMICOLON) { + if (peek() == Token::VAR || peek() == Token::CONST || + peek() == Token::LET) { + bool is_let = peek() == Token::LET; int decl_count; VariableDeclarationProperties decl_props = kHasNoInitializers; ParseVariableDeclarations( @@ -689,7 +675,7 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) { bool accept_OF = !has_initializers; if (accept_IN && CheckInOrOf(accept_OF)) { ParseExpression(true, CHECK_OK); - Expect(i::Token::RPAREN, CHECK_OK); + Expect(Token::RPAREN, CHECK_OK); ParseStatement(CHECK_OK); return Statement::Default(); @@ -698,7 +684,7 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) { Expression lhs = ParseExpression(false, CHECK_OK); if (CheckInOrOf(lhs.IsIdentifier())) { ParseExpression(true, CHECK_OK); - Expect(i::Token::RPAREN, CHECK_OK); + Expect(Token::RPAREN, CHECK_OK); ParseStatement(CHECK_OK); return Statement::Default(); @@ -707,17 +693,17 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) { } // Parsed initializer at this point. - Expect(i::Token::SEMICOLON, CHECK_OK); + Expect(Token::SEMICOLON, CHECK_OK); - if (peek() != i::Token::SEMICOLON) { + if (peek() != Token::SEMICOLON) { ParseExpression(true, CHECK_OK); } - Expect(i::Token::SEMICOLON, CHECK_OK); + Expect(Token::SEMICOLON, CHECK_OK); - if (peek() != i::Token::RPAREN) { + if (peek() != Token::RPAREN) { ParseExpression(true, CHECK_OK); } - Expect(i::Token::RPAREN, CHECK_OK); + Expect(Token::RPAREN, CHECK_OK); ParseStatement(ok); return Statement::Default(); @@ -728,9 +714,9 @@ PreParser::Statement PreParser::ParseThrowStatement(bool* ok) { // ThrowStatement :: // 'throw' [no line terminator] Expression ';' - Expect(i::Token::THROW, CHECK_OK); - if (scanner_->HasAnyLineTerminatorBeforeNext()) { - i::Scanner::Location pos = scanner_->location(); + Expect(Token::THROW, CHECK_OK); + if (scanner()->HasAnyLineTerminatorBeforeNext()) { + Scanner::Location pos = scanner()->location(); ReportMessageAt(pos, "newline_after_throw", NULL); *ok = false; return Statement::Default(); @@ -756,30 +742,30 @@ PreParser::Statement PreParser::ParseTryStatement(bool* ok) { // In preparsing, allow any number of catch/finally blocks, including zero // of both. - Expect(i::Token::TRY, CHECK_OK); + Expect(Token::TRY, CHECK_OK); ParseBlock(CHECK_OK); bool catch_or_finally_seen = false; - if (peek() == i::Token::CATCH) { - Consume(i::Token::CATCH); - Expect(i::Token::LPAREN, CHECK_OK); + if (peek() == Token::CATCH) { + Consume(Token::CATCH); + Expect(Token::LPAREN, CHECK_OK); Identifier id = ParseIdentifier(CHECK_OK); if (!is_classic_mode() && !id.IsValidStrictVariable()) { - StrictModeIdentifierViolation(scanner_->location(), + StrictModeIdentifierViolation(scanner()->location(), "strict_catch_variable", id, ok); return Statement::Default(); } - Expect(i::Token::RPAREN, CHECK_OK); + Expect(Token::RPAREN, CHECK_OK); { Scope::InsideWith iw(scope_); ParseBlock(CHECK_OK); } catch_or_finally_seen = true; } - if (peek() == i::Token::FINALLY) { - Consume(i::Token::FINALLY); + if (peek() == Token::FINALLY) { + Consume(Token::FINALLY); ParseBlock(CHECK_OK); catch_or_finally_seen = true; } @@ -797,7 +783,7 @@ PreParser::Statement PreParser::ParseDebuggerStatement(bool* ok) { // DebuggerStatement :: // 'debugger' ';' - Expect(i::Token::DEBUGGER, CHECK_OK); + Expect(Token::DEBUGGER, CHECK_OK); ExpectSemicolon(ok); return Statement::Default(); } @@ -818,8 +804,8 @@ PreParser::Expression PreParser::ParseExpression(bool accept_IN, bool* ok) { // Expression ',' AssignmentExpression Expression result = ParseAssignmentExpression(accept_IN, CHECK_OK); - while (peek() == i::Token::COMMA) { - Expect(i::Token::COMMA, CHECK_OK); + while (peek() == Token::COMMA) { + Expect(Token::COMMA, CHECK_OK); ParseAssignmentExpression(accept_IN, CHECK_OK); result = Expression::Default(); } @@ -835,14 +821,14 @@ PreParser::Expression PreParser::ParseAssignmentExpression(bool accept_IN, // YieldExpression // LeftHandSideExpression AssignmentOperator AssignmentExpression - if (scope_->is_generator() && peek() == i::Token::YIELD) { + if (scope_->is_generator() && peek() == Token::YIELD) { return ParseYieldExpression(ok); } - i::Scanner::Location before = scanner_->peek_location(); + Scanner::Location before = scanner()->peek_location(); Expression expression = ParseConditionalExpression(accept_IN, CHECK_OK); - if (!i::Token::IsAssignmentOp(peek())) { + if (!Token::IsAssignmentOp(peek())) { // Parsed conditional expression only (no assignment). return expression; } @@ -850,17 +836,17 @@ PreParser::Expression PreParser::ParseAssignmentExpression(bool accept_IN, if (!is_classic_mode() && expression.IsIdentifier() && expression.AsIdentifier().IsEvalOrArguments()) { - i::Scanner::Location after = scanner_->location(); + Scanner::Location after = scanner()->location(); ReportMessageAt(before.beg_pos, after.end_pos, "strict_lhs_assignment", NULL); *ok = false; return Expression::Default(); } - i::Token::Value op = Next(); // Get assignment operator. + Token::Value op = Next(); // Get assignment operator. ParseAssignmentExpression(accept_IN, CHECK_OK); - if ((op == i::Token::ASSIGN) && expression.IsThisProperty()) { + if ((op == Token::ASSIGN) && expression.IsThisProperty()) { scope_->AddProperty(); } @@ -872,8 +858,8 @@ PreParser::Expression PreParser::ParseAssignmentExpression(bool accept_IN, PreParser::Expression PreParser::ParseYieldExpression(bool* ok) { // YieldExpression :: // 'yield' '*'? AssignmentExpression - Consume(i::Token::YIELD); - Check(i::Token::MUL); + Consume(Token::YIELD); + Check(Token::MUL); ParseAssignmentExpression(false, CHECK_OK); @@ -890,26 +876,18 @@ PreParser::Expression PreParser::ParseConditionalExpression(bool accept_IN, // We start using the binary expression parser for prec >= 4 only! Expression expression = ParseBinaryExpression(4, accept_IN, CHECK_OK); - if (peek() != i::Token::CONDITIONAL) return expression; - Consume(i::Token::CONDITIONAL); + if (peek() != Token::CONDITIONAL) return expression; + Consume(Token::CONDITIONAL); // In parsing the first assignment expression in conditional // expressions we always accept the 'in' keyword; see ECMA-262, // section 11.12, page 58. ParseAssignmentExpression(true, CHECK_OK); - Expect(i::Token::COLON, CHECK_OK); + Expect(Token::COLON, CHECK_OK); ParseAssignmentExpression(accept_IN, CHECK_OK); return Expression::Default(); } -int PreParser::Precedence(i::Token::Value tok, bool accept_IN) { - if (tok == i::Token::IN && !accept_IN) - return 0; // 0 precedence will terminate binary expression parsing - - return i::Token::Precedence(tok); -} - - // Precedence >= 4 PreParser::Expression PreParser::ParseBinaryExpression(int prec, bool accept_IN, @@ -940,19 +918,19 @@ PreParser::Expression PreParser::ParseUnaryExpression(bool* ok) { // '~' UnaryExpression // '!' UnaryExpression - i::Token::Value op = peek(); - if (i::Token::IsUnaryOp(op)) { + Token::Value op = peek(); + if (Token::IsUnaryOp(op)) { op = Next(); ParseUnaryExpression(ok); return Expression::Default(); - } else if (i::Token::IsCountOp(op)) { + } else if (Token::IsCountOp(op)) { op = Next(); - i::Scanner::Location before = scanner_->peek_location(); + Scanner::Location before = scanner()->peek_location(); Expression expression = ParseUnaryExpression(CHECK_OK); if (!is_classic_mode() && expression.IsIdentifier() && expression.AsIdentifier().IsEvalOrArguments()) { - i::Scanner::Location after = scanner_->location(); + Scanner::Location after = scanner()->location(); ReportMessageAt(before.beg_pos, after.end_pos, "strict_lhs_prefix", NULL); *ok = false; @@ -968,14 +946,14 @@ PreParser::Expression PreParser::ParsePostfixExpression(bool* ok) { // PostfixExpression :: // LeftHandSideExpression ('++' | '--')? - i::Scanner::Location before = scanner_->peek_location(); + Scanner::Location before = scanner()->peek_location(); Expression expression = ParseLeftHandSideExpression(CHECK_OK); - if (!scanner_->HasAnyLineTerminatorBeforeNext() && - i::Token::IsCountOp(peek())) { + if (!scanner()->HasAnyLineTerminatorBeforeNext() && + Token::IsCountOp(peek())) { if (!is_classic_mode() && expression.IsIdentifier() && expression.AsIdentifier().IsEvalOrArguments()) { - i::Scanner::Location after = scanner_->location(); + Scanner::Location after = scanner()->location(); ReportMessageAt(before.beg_pos, after.end_pos, "strict_lhs_postfix", NULL); *ok = false; @@ -993,7 +971,7 @@ PreParser::Expression PreParser::ParseLeftHandSideExpression(bool* ok) { // (NewExpression | MemberExpression) ... Expression result = Expression::Default(); - if (peek() == i::Token::NEW) { + if (peek() == Token::NEW) { result = ParseNewExpression(CHECK_OK); } else { result = ParseMemberExpression(CHECK_OK); @@ -1001,10 +979,10 @@ PreParser::Expression PreParser::ParseLeftHandSideExpression(bool* ok) { while (true) { switch (peek()) { - case i::Token::LBRACK: { - Consume(i::Token::LBRACK); + case Token::LBRACK: { + Consume(Token::LBRACK); ParseExpression(true, CHECK_OK); - Expect(i::Token::RBRACK, CHECK_OK); + Expect(Token::RBRACK, CHECK_OK); if (result.IsThis()) { result = Expression::ThisProperty(); } else { @@ -1013,14 +991,14 @@ PreParser::Expression PreParser::ParseLeftHandSideExpression(bool* ok) { break; } - case i::Token::LPAREN: { + case Token::LPAREN: { ParseArguments(CHECK_OK); result = Expression::Default(); break; } - case i::Token::PERIOD: { - Consume(i::Token::PERIOD); + case Token::PERIOD: { + Consume(Token::PERIOD); ParseIdentifierName(CHECK_OK); if (result.IsThis()) { result = Expression::ThisProperty(); @@ -1051,9 +1029,9 @@ PreParser::Expression PreParser::ParseNewExpression(bool* ok) { // lists as long as it has 'new' prefixes left unsigned new_count = 0; do { - Consume(i::Token::NEW); + Consume(Token::NEW); new_count++; - } while (peek() == i::Token::NEW); + } while (peek() == Token::NEW); return ParseMemberWithNewPrefixesExpression(new_count, ok); } @@ -1072,17 +1050,17 @@ PreParser::Expression PreParser::ParseMemberWithNewPrefixesExpression( // Parse the initial primary or function expression. Expression result = Expression::Default(); - if (peek() == i::Token::FUNCTION) { - Consume(i::Token::FUNCTION); + if (peek() == Token::FUNCTION) { + Consume(Token::FUNCTION); - bool is_generator = allow_generators_ && Check(i::Token::MUL); + bool is_generator = allow_generators() && Check(Token::MUL); Identifier identifier = Identifier::Default(); if (peek_any_identifier()) { identifier = ParseIdentifier(CHECK_OK); } result = ParseFunctionLiteral(is_generator, CHECK_OK); if (result.IsStrictFunction() && !identifier.IsValidStrictVariable()) { - StrictModeIdentifierViolation(scanner_->location(), + StrictModeIdentifierViolation(scanner()->location(), "strict_function_name", identifier, ok); @@ -1094,10 +1072,10 @@ PreParser::Expression PreParser::ParseMemberWithNewPrefixesExpression( while (true) { switch (peek()) { - case i::Token::LBRACK: { - Consume(i::Token::LBRACK); + case Token::LBRACK: { + Consume(Token::LBRACK); ParseExpression(true, CHECK_OK); - Expect(i::Token::RBRACK, CHECK_OK); + Expect(Token::RBRACK, CHECK_OK); if (result.IsThis()) { result = Expression::ThisProperty(); } else { @@ -1105,8 +1083,8 @@ PreParser::Expression PreParser::ParseMemberWithNewPrefixesExpression( } break; } - case i::Token::PERIOD: { - Consume(i::Token::PERIOD); + case Token::PERIOD: { + Consume(Token::PERIOD); ParseIdentifierName(CHECK_OK); if (result.IsThis()) { result = Expression::ThisProperty(); @@ -1115,7 +1093,7 @@ PreParser::Expression PreParser::ParseMemberWithNewPrefixesExpression( } break; } - case i::Token::LPAREN: { + case Token::LPAREN: { if (new_count == 0) return result; // Consume one of the new prefixes (already parsed). ParseArguments(CHECK_OK); @@ -1146,59 +1124,59 @@ PreParser::Expression PreParser::ParsePrimaryExpression(bool* ok) { Expression result = Expression::Default(); switch (peek()) { - case i::Token::THIS: { + case Token::THIS: { Next(); result = Expression::This(); break; } - case i::Token::FUTURE_RESERVED_WORD: - case i::Token::FUTURE_STRICT_RESERVED_WORD: - case i::Token::YIELD: - case i::Token::IDENTIFIER: { + case Token::FUTURE_RESERVED_WORD: + case Token::FUTURE_STRICT_RESERVED_WORD: + case Token::YIELD: + case Token::IDENTIFIER: { Identifier id = ParseIdentifier(CHECK_OK); result = Expression::FromIdentifier(id); break; } - case i::Token::NULL_LITERAL: - case i::Token::TRUE_LITERAL: - case i::Token::FALSE_LITERAL: - case i::Token::NUMBER: { + case Token::NULL_LITERAL: + case Token::TRUE_LITERAL: + case Token::FALSE_LITERAL: + case Token::NUMBER: { Next(); break; } - case i::Token::STRING: { + case Token::STRING: { Next(); result = GetStringSymbol(); break; } - case i::Token::ASSIGN_DIV: + case Token::ASSIGN_DIV: result = ParseRegExpLiteral(true, CHECK_OK); break; - case i::Token::DIV: + case Token::DIV: result = ParseRegExpLiteral(false, CHECK_OK); break; - case i::Token::LBRACK: + case Token::LBRACK: result = ParseArrayLiteral(CHECK_OK); break; - case i::Token::LBRACE: + case Token::LBRACE: result = ParseObjectLiteral(CHECK_OK); break; - case i::Token::LPAREN: - Consume(i::Token::LPAREN); - parenthesized_function_ = (peek() == i::Token::FUNCTION); + case Token::LPAREN: + Consume(Token::LPAREN); + parenthesized_function_ = (peek() == Token::FUNCTION); result = ParseExpression(true, CHECK_OK); - Expect(i::Token::RPAREN, CHECK_OK); + Expect(Token::RPAREN, CHECK_OK); result = result.Parenthesize(); break; - case i::Token::MOD: + case Token::MOD: result = ParseV8Intrinsic(CHECK_OK); break; @@ -1216,54 +1194,21 @@ PreParser::Expression PreParser::ParsePrimaryExpression(bool* ok) { PreParser::Expression PreParser::ParseArrayLiteral(bool* ok) { // ArrayLiteral :: // '[' Expression? (',' Expression?)* ']' - Expect(i::Token::LBRACK, CHECK_OK); - while (peek() != i::Token::RBRACK) { - if (peek() != i::Token::COMMA) { + Expect(Token::LBRACK, CHECK_OK); + while (peek() != Token::RBRACK) { + if (peek() != Token::COMMA) { ParseAssignmentExpression(true, CHECK_OK); } - if (peek() != i::Token::RBRACK) { - Expect(i::Token::COMMA, CHECK_OK); + if (peek() != Token::RBRACK) { + Expect(Token::COMMA, CHECK_OK); } } - Expect(i::Token::RBRACK, CHECK_OK); + Expect(Token::RBRACK, CHECK_OK); scope_->NextMaterializedLiteralIndex(); return Expression::Default(); } -void PreParser::CheckDuplicate(DuplicateFinder* finder, - i::Token::Value property, - int type, - bool* ok) { - int old_type; - if (property == i::Token::NUMBER) { - old_type = finder->AddNumber(scanner_->literal_ascii_string(), type); - } else if (scanner_->is_literal_ascii()) { - old_type = finder->AddAsciiSymbol(scanner_->literal_ascii_string(), - type); - } else { - old_type = finder->AddUtf16Symbol(scanner_->literal_utf16_string(), type); - } - if (HasConflict(old_type, type)) { - if (IsDataDataConflict(old_type, type)) { - // Both are data properties. - if (is_classic_mode()) return; - ReportMessageAt(scanner_->location(), - "strict_duplicate_property", NULL); - } else if (IsDataAccessorConflict(old_type, type)) { - // Both a data and an accessor property with the same name. - ReportMessageAt(scanner_->location(), - "accessor_data_property", NULL); - } else { - ASSERT(IsAccessorAccessorConflict(old_type, type)); - // Both accessors of the same type. - ReportMessageAt(scanner_->location(), - "accessor_get_set", NULL); - } - *ok = false; - } -} - PreParser::Expression PreParser::ParseObjectLiteral(bool* ok) { // ObjectLiteral :: @@ -1272,25 +1217,26 @@ PreParser::Expression PreParser::ParseObjectLiteral(bool* ok) { // | (('get' | 'set') (IdentifierName | String | Number) FunctionLiteral) // )*[','] '}' - Expect(i::Token::LBRACE, CHECK_OK); - DuplicateFinder duplicate_finder(scanner_->unicode_cache()); - while (peek() != i::Token::RBRACE) { - i::Token::Value next = peek(); + ObjectLiteralChecker checker(this, language_mode()); + + Expect(Token::LBRACE, CHECK_OK); + while (peek() != Token::RBRACE) { + Token::Value next = peek(); switch (next) { - case i::Token::IDENTIFIER: - case i::Token::FUTURE_RESERVED_WORD: - case i::Token::FUTURE_STRICT_RESERVED_WORD: { + case Token::IDENTIFIER: + case Token::FUTURE_RESERVED_WORD: + case Token::FUTURE_STRICT_RESERVED_WORD: { bool is_getter = false; bool is_setter = false; ParseIdentifierNameOrGetOrSet(&is_getter, &is_setter, CHECK_OK); - if ((is_getter || is_setter) && peek() != i::Token::COLON) { - i::Token::Value name = Next(); - bool is_keyword = i::Token::IsKeyword(name); - if (name != i::Token::IDENTIFIER && - name != i::Token::FUTURE_RESERVED_WORD && - name != i::Token::FUTURE_STRICT_RESERVED_WORD && - name != i::Token::NUMBER && - name != i::Token::STRING && + if ((is_getter || is_setter) && peek() != Token::COLON) { + Token::Value name = Next(); + bool is_keyword = Token::IsKeyword(name); + if (name != Token::IDENTIFIER && + name != Token::FUTURE_RESERVED_WORD && + name != Token::FUTURE_STRICT_RESERVED_WORD && + name != Token::NUMBER && + name != Token::STRING && !is_keyword) { *ok = false; return Expression::Default(); @@ -1298,30 +1244,30 @@ PreParser::Expression PreParser::ParseObjectLiteral(bool* ok) { if (!is_keyword) { LogSymbol(); } - PropertyType type = is_getter ? kGetterProperty : kSetterProperty; - CheckDuplicate(&duplicate_finder, name, type, CHECK_OK); + PropertyKind type = is_getter ? kGetterProperty : kSetterProperty; + checker.CheckProperty(name, type, CHECK_OK); ParseFunctionLiteral(false, CHECK_OK); - if (peek() != i::Token::RBRACE) { - Expect(i::Token::COMMA, CHECK_OK); + if (peek() != Token::RBRACE) { + Expect(Token::COMMA, CHECK_OK); } continue; // restart the while } - CheckDuplicate(&duplicate_finder, next, kValueProperty, CHECK_OK); + checker.CheckProperty(next, kValueProperty, CHECK_OK); break; } - case i::Token::STRING: + case Token::STRING: Consume(next); - CheckDuplicate(&duplicate_finder, next, kValueProperty, CHECK_OK); + checker.CheckProperty(next, kValueProperty, CHECK_OK); GetStringSymbol(); break; - case i::Token::NUMBER: + case Token::NUMBER: Consume(next); - CheckDuplicate(&duplicate_finder, next, kValueProperty, CHECK_OK); + checker.CheckProperty(next, kValueProperty, CHECK_OK); break; default: - if (i::Token::IsKeyword(next)) { + if (Token::IsKeyword(next)) { Consume(next); - CheckDuplicate(&duplicate_finder, next, kValueProperty, CHECK_OK); + checker.CheckProperty(next, kValueProperty, CHECK_OK); } else { // Unexpected token. *ok = false; @@ -1329,13 +1275,13 @@ PreParser::Expression PreParser::ParseObjectLiteral(bool* ok) { } } - Expect(i::Token::COLON, CHECK_OK); + Expect(Token::COLON, CHECK_OK); ParseAssignmentExpression(true, CHECK_OK); // TODO(1240767): Consider allowing trailing comma. - if (peek() != i::Token::RBRACE) Expect(i::Token::COMMA, CHECK_OK); + if (peek() != Token::RBRACE) Expect(Token::COMMA, CHECK_OK); } - Expect(i::Token::RBRACE, CHECK_OK); + Expect(Token::RBRACE, CHECK_OK); scope_->NextMaterializedLiteralIndex(); return Expression::Default(); @@ -1344,18 +1290,18 @@ PreParser::Expression PreParser::ParseObjectLiteral(bool* ok) { PreParser::Expression PreParser::ParseRegExpLiteral(bool seen_equal, bool* ok) { - if (!scanner_->ScanRegExpPattern(seen_equal)) { + if (!scanner()->ScanRegExpPattern(seen_equal)) { Next(); - ReportMessageAt(scanner_->location(), "unterminated_regexp", NULL); + ReportMessageAt(scanner()->location(), "unterminated_regexp", NULL); *ok = false; return Expression::Default(); } scope_->NextMaterializedLiteralIndex(); - if (!scanner_->ScanRegExpFlags()) { + if (!scanner()->ScanRegExpFlags()) { Next(); - ReportMessageAt(scanner_->location(), "invalid_regexp_flags", NULL); + ReportMessageAt(scanner()->location(), "invalid_regexp_flags", NULL); *ok = false; return Expression::Default(); } @@ -1368,21 +1314,21 @@ PreParser::Arguments PreParser::ParseArguments(bool* ok) { // Arguments :: // '(' (AssignmentExpression)*[','] ')' - Expect(i::Token::LPAREN, ok); + Expect(Token::LPAREN, ok); if (!*ok) return -1; - bool done = (peek() == i::Token::RPAREN); + bool done = (peek() == Token::RPAREN); int argc = 0; while (!done) { ParseAssignmentExpression(true, ok); if (!*ok) return -1; argc++; - done = (peek() == i::Token::RPAREN); + done = (peek() == Token::RPAREN); if (!done) { - Expect(i::Token::COMMA, ok); + Expect(Token::COMMA, ok); if (!*ok) return -1; } } - Expect(i::Token::RPAREN, ok); + Expect(Token::RPAREN, ok); return argc; } @@ -1399,57 +1345,57 @@ PreParser::Expression PreParser::ParseFunctionLiteral(bool is_generator, function_scope.set_is_generator(is_generator); // FormalParameterList :: // '(' (Identifier)*[','] ')' - Expect(i::Token::LPAREN, CHECK_OK); - int start_position = scanner_->location().beg_pos; - bool done = (peek() == i::Token::RPAREN); - DuplicateFinder duplicate_finder(scanner_->unicode_cache()); + Expect(Token::LPAREN, CHECK_OK); + int start_position = position(); + bool done = (peek() == Token::RPAREN); + DuplicateFinder duplicate_finder(scanner()->unicode_cache()); while (!done) { Identifier id = ParseIdentifier(CHECK_OK); if (!id.IsValidStrictVariable()) { - StrictModeIdentifierViolation(scanner_->location(), + StrictModeIdentifierViolation(scanner()->location(), "strict_param_name", id, CHECK_OK); } int prev_value; - if (scanner_->is_literal_ascii()) { + if (scanner()->is_literal_ascii()) { prev_value = - duplicate_finder.AddAsciiSymbol(scanner_->literal_ascii_string(), 1); + duplicate_finder.AddAsciiSymbol(scanner()->literal_ascii_string(), 1); } else { prev_value = - duplicate_finder.AddUtf16Symbol(scanner_->literal_utf16_string(), 1); + duplicate_finder.AddUtf16Symbol(scanner()->literal_utf16_string(), 1); } if (prev_value != 0) { - SetStrictModeViolation(scanner_->location(), + SetStrictModeViolation(scanner()->location(), "strict_param_dupe", CHECK_OK); } - done = (peek() == i::Token::RPAREN); + done = (peek() == Token::RPAREN); if (!done) { - Expect(i::Token::COMMA, CHECK_OK); + Expect(Token::COMMA, CHECK_OK); } } - Expect(i::Token::RPAREN, CHECK_OK); + Expect(Token::RPAREN, CHECK_OK); // Determine if the function will be lazily compiled. // Currently only happens to top-level functions. // Optimistically assume that all top-level functions are lazily compiled. bool is_lazily_compiled = (outer_scope_type == kTopLevelScope && - !inside_with && allow_lazy_ && + !inside_with && allow_lazy() && !parenthesized_function_); parenthesized_function_ = false; - Expect(i::Token::LBRACE, CHECK_OK); + Expect(Token::LBRACE, CHECK_OK); if (is_lazily_compiled) { ParseLazyFunctionLiteralBody(CHECK_OK); } else { - ParseSourceElements(i::Token::RBRACE, ok); + ParseSourceElements(Token::RBRACE, ok); } - Expect(i::Token::RBRACE, CHECK_OK); + Expect(Token::RBRACE, CHECK_OK); if (!is_classic_mode()) { - int end_position = scanner_->location().end_pos; + int end_position = scanner()->location().end_pos; CheckOctalLiteral(start_position, end_position, CHECK_OK); CheckDelayedStrictModeViolation(start_position, end_position, CHECK_OK); return Expression::StrictFunction(); @@ -1460,15 +1406,15 @@ PreParser::Expression PreParser::ParseFunctionLiteral(bool is_generator, void PreParser::ParseLazyFunctionLiteralBody(bool* ok) { - int body_start = scanner_->location().beg_pos; + int body_start = position(); log_->PauseRecording(); - ParseSourceElements(i::Token::RBRACE, ok); + ParseSourceElements(Token::RBRACE, ok); log_->ResumeRecording(); if (!*ok) return; // Position right after terminal '}'. - ASSERT_EQ(i::Token::RBRACE, scanner_->peek()); - int body_end = scanner_->peek_location().end_pos; + ASSERT_EQ(Token::RBRACE, scanner()->peek()); + int body_end = scanner()->peek_location().end_pos; log_->LogFunction(body_start, body_end, scope_->materialized_literal_count(), scope_->expected_properties(), @@ -1479,8 +1425,8 @@ void PreParser::ParseLazyFunctionLiteralBody(bool* ok) { PreParser::Expression PreParser::ParseV8Intrinsic(bool* ok) { // CallRuntime :: // '%' Identifier Arguments - Expect(i::Token::MOD, CHECK_OK); - if (!allow_natives_syntax_) { + Expect(Token::MOD, CHECK_OK); + if (!allow_natives_syntax()) { *ok = false; return Expression::Default(); } @@ -1493,29 +1439,12 @@ PreParser::Expression PreParser::ParseV8Intrinsic(bool* ok) { #undef CHECK_OK -void PreParser::ExpectSemicolon(bool* ok) { - // Check for automatic semicolon insertion according to - // the rules given in ECMA-262, section 7.9, page 21. - i::Token::Value tok = peek(); - if (tok == i::Token::SEMICOLON) { - Next(); - return; - } - if (scanner_->HasAnyLineTerminatorBeforeNext() || - tok == i::Token::RBRACE || - tok == i::Token::EOS) { - return; - } - Expect(i::Token::SEMICOLON, ok); -} - - void PreParser::LogSymbol() { - int identifier_pos = scanner_->location().beg_pos; - if (scanner_->is_literal_ascii()) { - log_->LogAsciiSymbol(identifier_pos, scanner_->literal_ascii_string()); + int identifier_pos = position(); + if (scanner()->is_literal_ascii()) { + log_->LogAsciiSymbol(identifier_pos, scanner()->literal_ascii_string()); } else { - log_->LogUtf16Symbol(identifier_pos, scanner_->literal_utf16_string()); + log_->LogUtf16Symbol(identifier_pos, scanner()->literal_utf16_string()); } } @@ -1524,10 +1453,10 @@ PreParser::Expression PreParser::GetStringSymbol() { const int kUseStrictLength = 10; const char* kUseStrictChars = "use strict"; LogSymbol(); - if (scanner_->is_literal_ascii() && - scanner_->literal_length() == kUseStrictLength && - !scanner_->literal_contains_escapes() && - !strncmp(scanner_->literal_ascii_string().start(), kUseStrictChars, + if (scanner()->is_literal_ascii() && + scanner()->literal_length() == kUseStrictLength && + !scanner()->literal_contains_escapes() && + !strncmp(scanner()->literal_ascii_string().start(), kUseStrictChars, kUseStrictLength)) { return Expression::UseStrictStringLiteral(); } @@ -1537,22 +1466,22 @@ PreParser::Expression PreParser::GetStringSymbol() { PreParser::Identifier PreParser::GetIdentifierSymbol() { LogSymbol(); - if (scanner_->current_token() == i::Token::FUTURE_RESERVED_WORD) { + if (scanner()->current_token() == Token::FUTURE_RESERVED_WORD) { return Identifier::FutureReserved(); - } else if (scanner_->current_token() == - i::Token::FUTURE_STRICT_RESERVED_WORD) { + } else if (scanner()->current_token() == + Token::FUTURE_STRICT_RESERVED_WORD) { return Identifier::FutureStrictReserved(); - } else if (scanner_->current_token() == i::Token::YIELD) { + } else if (scanner()->current_token() == Token::YIELD) { return Identifier::Yield(); } - if (scanner_->is_literal_ascii()) { + if (scanner()->is_literal_ascii()) { // Detect strict-mode poison words. - if (scanner_->literal_length() == 4 && - !strncmp(scanner_->literal_ascii_string().start(), "eval", 4)) { + if (scanner()->literal_length() == 4 && + !strncmp(scanner()->literal_ascii_string().start(), "eval", 4)) { return Identifier::Eval(); } - if (scanner_->literal_length() == 9 && - !strncmp(scanner_->literal_ascii_string().start(), "arguments", 9)) { + if (scanner()->literal_length() == 9 && + !strncmp(scanner()->literal_ascii_string().start(), "arguments", 9)) { return Identifier::Arguments(); } } @@ -1561,32 +1490,32 @@ PreParser::Identifier PreParser::GetIdentifierSymbol() { PreParser::Identifier PreParser::ParseIdentifier(bool* ok) { - i::Token::Value next = Next(); + Token::Value next = Next(); switch (next) { - case i::Token::FUTURE_RESERVED_WORD: { - i::Scanner::Location location = scanner_->location(); + case Token::FUTURE_RESERVED_WORD: { + Scanner::Location location = scanner()->location(); ReportMessageAt(location.beg_pos, location.end_pos, "reserved_word", NULL); *ok = false; return GetIdentifierSymbol(); } - case i::Token::YIELD: + case Token::YIELD: if (scope_->is_generator()) { // 'yield' in a generator is only valid as part of a YieldExpression. - ReportMessageAt(scanner_->location(), "unexpected_token", "yield"); + ReportMessageAt(scanner()->location(), "unexpected_token", "yield"); *ok = false; return Identifier::Yield(); } // FALLTHROUGH - case i::Token::FUTURE_STRICT_RESERVED_WORD: + case Token::FUTURE_STRICT_RESERVED_WORD: if (!is_classic_mode()) { - i::Scanner::Location location = scanner_->location(); + Scanner::Location location = scanner()->location(); ReportMessageAt(location.beg_pos, location.end_pos, "strict_reserved_word", NULL); *ok = false; } // FALLTHROUGH - case i::Token::IDENTIFIER: + case Token::IDENTIFIER: return GetIdentifierSymbol(); default: *ok = false; @@ -1595,7 +1524,7 @@ PreParser::Identifier PreParser::ParseIdentifier(bool* ok) { } -void PreParser::SetStrictModeViolation(i::Scanner::Location location, +void PreParser::SetStrictModeViolation(Scanner::Location location, const char* type, bool* ok) { if (!is_classic_mode()) { @@ -1619,7 +1548,7 @@ void PreParser::SetStrictModeViolation(i::Scanner::Location location, void PreParser::CheckDelayedStrictModeViolation(int beg_pos, int end_pos, bool* ok) { - i::Scanner::Location location = strict_mode_violation_location_; + Scanner::Location location = strict_mode_violation_location_; if (location.IsValid() && location.beg_pos > beg_pos && location.end_pos < end_pos) { ReportMessageAt(location, strict_mode_violation_type_, NULL); @@ -1628,7 +1557,7 @@ void PreParser::CheckDelayedStrictModeViolation(int beg_pos, } -void PreParser::StrictModeIdentifierViolation(i::Scanner::Location location, +void PreParser::StrictModeIdentifierViolation(Scanner::Location location, const char* eval_args_type, Identifier identifier, bool* ok) { @@ -1649,17 +1578,16 @@ void PreParser::StrictModeIdentifierViolation(i::Scanner::Location location, PreParser::Identifier PreParser::ParseIdentifierName(bool* ok) { - i::Token::Value next = Next(); - if (i::Token::IsKeyword(next)) { - int pos = scanner_->location().beg_pos; - const char* keyword = i::Token::String(next); - log_->LogAsciiSymbol(pos, i::Vector<const char>(keyword, - i::StrLength(keyword))); + Token::Value next = Next(); + if (Token::IsKeyword(next)) { + int pos = position(); + const char* keyword = Token::String(next); + log_->LogAsciiSymbol(pos, Vector<const char>(keyword, StrLength(keyword))); return Identifier::Default(); } - if (next == i::Token::IDENTIFIER || - next == i::Token::FUTURE_RESERVED_WORD || - next == i::Token::FUTURE_STRICT_RESERVED_WORD) { + if (next == Token::IDENTIFIER || + next == Token::FUTURE_RESERVED_WORD || + next == Token::FUTURE_STRICT_RESERVED_WORD) { return GetIdentifierSymbol(); } *ok = false; @@ -1676,9 +1604,9 @@ PreParser::Identifier PreParser::ParseIdentifierNameOrGetOrSet(bool* is_get, bool* ok) { Identifier result = ParseIdentifierName(ok); if (!*ok) return Identifier::Default(); - if (scanner_->is_literal_ascii() && - scanner_->literal_length() == 3) { - const char* token = scanner_->literal_ascii_string().start(); + if (scanner()->is_literal_ascii() && + scanner()->literal_length() == 3) { + const char* token = scanner()->literal_ascii_string().start(); *is_get = strncmp(token, "get", 3) == 0; *is_set = !*is_get && strncmp(token, "set", 3) == 0; } @@ -1686,147 +1614,36 @@ PreParser::Identifier PreParser::ParseIdentifierNameOrGetOrSet(bool* is_get, } -bool PreParser::peek_any_identifier() { - i::Token::Value next = peek(); - return next == i::Token::IDENTIFIER || - next == i::Token::FUTURE_RESERVED_WORD || - next == i::Token::FUTURE_STRICT_RESERVED_WORD || - next == i::Token::YIELD; -} - - -int DuplicateFinder::AddAsciiSymbol(i::Vector<const char> key, int value) { - return AddSymbol(i::Vector<const byte>::cast(key), true, value); -} - - -int DuplicateFinder::AddUtf16Symbol(i::Vector<const uint16_t> key, int value) { - return AddSymbol(i::Vector<const byte>::cast(key), false, value); -} - -int DuplicateFinder::AddSymbol(i::Vector<const byte> key, - bool is_ascii, - int value) { - uint32_t hash = Hash(key, is_ascii); - byte* encoding = BackupKey(key, is_ascii); - i::HashMap::Entry* entry = map_.Lookup(encoding, hash, true); - int old_value = static_cast<int>(reinterpret_cast<intptr_t>(entry->value)); - entry->value = - reinterpret_cast<void*>(static_cast<intptr_t>(value | old_value)); - return old_value; -} - - -int DuplicateFinder::AddNumber(i::Vector<const char> key, int value) { - ASSERT(key.length() > 0); - // Quick check for already being in canonical form. - if (IsNumberCanonical(key)) { - return AddAsciiSymbol(key, value); - } - - int flags = i::ALLOW_HEX | i::ALLOW_OCTAL | i::ALLOW_IMPLICIT_OCTAL | - i::ALLOW_BINARY; - double double_value = StringToDouble(unicode_constants_, key, flags, 0.0); - int length; - const char* string; - if (!std::isfinite(double_value)) { - string = "Infinity"; - length = 8; // strlen("Infinity"); - } else { - string = DoubleToCString(double_value, - i::Vector<char>(number_buffer_, kBufferSize)); - length = i::StrLength(string); - } - return AddSymbol(i::Vector<const byte>(reinterpret_cast<const byte*>(string), - length), true, value); -} - - -bool DuplicateFinder::IsNumberCanonical(i::Vector<const char> number) { - // Test for a safe approximation of number literals that are already - // in canonical form: max 15 digits, no leading zeroes, except an - // integer part that is a single zero, and no trailing zeros below - // the decimal point. - int pos = 0; - int length = number.length(); - if (number.length() > 15) return false; - if (number[pos] == '0') { - pos++; +void PreParser::ObjectLiteralChecker::CheckProperty(Token::Value property, + PropertyKind type, + bool* ok) { + int old; + if (property == Token::NUMBER) { + old = finder_.AddNumber(scanner()->literal_ascii_string(), type); + } else if (scanner()->is_literal_ascii()) { + old = finder_.AddAsciiSymbol(scanner()->literal_ascii_string(), type); } else { - while (pos < length && - static_cast<unsigned>(number[pos] - '0') <= ('9' - '0')) pos++; - } - if (length == pos) return true; - if (number[pos] != '.') return false; - pos++; - bool invalid_last_digit = true; - while (pos < length) { - byte digit = number[pos] - '0'; - if (digit > '9' - '0') return false; - invalid_last_digit = (digit == 0); - pos++; + old = finder_.AddUtf16Symbol(scanner()->literal_utf16_string(), type); } - return !invalid_last_digit; -} - - -uint32_t DuplicateFinder::Hash(i::Vector<const byte> key, bool is_ascii) { - // Primitive hash function, almost identical to the one used - // for strings (except that it's seeded by the length and ASCII-ness). - int length = key.length(); - uint32_t hash = (length << 1) | (is_ascii ? 1 : 0) ; - for (int i = 0; i < length; i++) { - uint32_t c = key[i]; - hash = (hash + c) * 1025; - hash ^= (hash >> 6); - } - return hash; -} - - -bool DuplicateFinder::Match(void* first, void* second) { - // Decode lengths. - // Length + ASCII-bit is encoded as base 128, most significant heptet first, - // with a 8th bit being non-zero while there are more heptets. - // The value encodes the number of bytes following, and whether the original - // was ASCII. - byte* s1 = reinterpret_cast<byte*>(first); - byte* s2 = reinterpret_cast<byte*>(second); - uint32_t length_ascii_field = 0; - byte c1; - do { - c1 = *s1; - if (c1 != *s2) return false; - length_ascii_field = (length_ascii_field << 7) | (c1 & 0x7f); - s1++; - s2++; - } while ((c1 & 0x80) != 0); - int length = static_cast<int>(length_ascii_field >> 1); - return memcmp(s1, s2, length) == 0; -} - - -byte* DuplicateFinder::BackupKey(i::Vector<const byte> bytes, - bool is_ascii) { - uint32_t ascii_length = (bytes.length() << 1) | (is_ascii ? 1 : 0); - backing_store_.StartSequence(); - // Emit ascii_length as base-128 encoded number, with the 7th bit set - // on the byte of every heptet except the last, least significant, one. - if (ascii_length >= (1 << 7)) { - if (ascii_length >= (1 << 14)) { - if (ascii_length >= (1 << 21)) { - if (ascii_length >= (1 << 28)) { - backing_store_.Add(static_cast<byte>((ascii_length >> 28) | 0x80)); - } - backing_store_.Add(static_cast<byte>((ascii_length >> 21) | 0x80u)); - } - backing_store_.Add(static_cast<byte>((ascii_length >> 14) | 0x80u)); + PropertyKind old_type = static_cast<PropertyKind>(old); + if (HasConflict(old_type, type)) { + if (IsDataDataConflict(old_type, type)) { + // Both are data properties. + if (language_mode_ == CLASSIC_MODE) return; + parser()->ReportMessageAt(scanner()->location(), + "strict_duplicate_property"); + } else if (IsDataAccessorConflict(old_type, type)) { + // Both a data and an accessor property with the same name. + parser()->ReportMessageAt(scanner()->location(), + "accessor_data_property"); + } else { + ASSERT(IsAccessorAccessorConflict(old_type, type)); + // Both accessors of the same type. + parser()->ReportMessageAt(scanner()->location(), + "accessor_get_set"); } - backing_store_.Add(static_cast<byte>((ascii_length >> 7) | 0x80u)); + *ok = false; } - backing_store_.Add(static_cast<byte>(ascii_length & 0x7f)); - - backing_store_.AddBlock(bytes); - return backing_store_.EndSequence().start(); } -} } // v8::preparser + +} } // v8::internal diff --git a/deps/v8/src/preparser.h b/deps/v8/src/preparser.h index 9358d6bd18..e99b4b0a18 100644 --- a/deps/v8/src/preparser.h +++ b/deps/v8/src/preparser.h @@ -33,14 +33,178 @@ #include "scanner.h" namespace v8 { - namespace internal { -class UnicodeCache; -} -namespace preparser { +// Common base class shared between parser and pre-parser. +class ParserBase { + public: + ParserBase(Scanner* scanner, uintptr_t stack_limit) + : scanner_(scanner), + stack_limit_(stack_limit), + stack_overflow_(false), + allow_lazy_(false), + allow_natives_syntax_(false), + allow_generators_(false), + allow_for_of_(false) { } + // TODO(mstarzinger): Only virtual until message reporting has been unified. + virtual ~ParserBase() { } + + // Getters that indicate whether certain syntactical constructs are + // allowed to be parsed by this instance of the parser. + bool allow_lazy() const { return allow_lazy_; } + bool allow_natives_syntax() const { return allow_natives_syntax_; } + bool allow_generators() const { return allow_generators_; } + bool allow_for_of() const { return allow_for_of_; } + bool allow_modules() const { return scanner()->HarmonyModules(); } + bool allow_harmony_scoping() const { return scanner()->HarmonyScoping(); } + bool allow_harmony_numeric_literals() const { + return scanner()->HarmonyNumericLiterals(); + } + + // Setters that determine whether certain syntactical constructs are + // allowed to be parsed by this instance of the parser. + void set_allow_lazy(bool allow) { allow_lazy_ = allow; } + void set_allow_natives_syntax(bool allow) { allow_natives_syntax_ = allow; } + void set_allow_generators(bool allow) { allow_generators_ = allow; } + void set_allow_for_of(bool allow) { allow_for_of_ = allow; } + void set_allow_modules(bool allow) { scanner()->SetHarmonyModules(allow); } + void set_allow_harmony_scoping(bool allow) { + scanner()->SetHarmonyScoping(allow); + } + void set_allow_harmony_numeric_literals(bool allow) { + scanner()->SetHarmonyNumericLiterals(allow); + } + + protected: + Scanner* scanner() const { return scanner_; } + int position() { return scanner_->location().beg_pos; } + int peek_position() { return scanner_->peek_location().beg_pos; } + bool stack_overflow() const { return stack_overflow_; } + void set_stack_overflow() { stack_overflow_ = true; } + + INLINE(Token::Value peek()) { + if (stack_overflow_) return Token::ILLEGAL; + return scanner()->peek(); + } + + INLINE(Token::Value Next()) { + if (stack_overflow_) return Token::ILLEGAL; + { + int marker; + if (reinterpret_cast<uintptr_t>(&marker) < stack_limit_) { + // Any further calls to Next or peek will return the illegal token. + // The current call must return the next token, which might already + // have been peek'ed. + stack_overflow_ = true; + } + } + return scanner()->Next(); + } + + void Consume(Token::Value token) { + Token::Value next = Next(); + USE(next); + USE(token); + ASSERT(next == token); + } + + bool Check(Token::Value token) { + Token::Value next = peek(); + if (next == token) { + Consume(next); + return true; + } + return false; + } + + void Expect(Token::Value token, bool* ok) { + Token::Value next = Next(); + if (next != token) { + ReportUnexpectedToken(next); + *ok = false; + } + } + + bool peek_any_identifier(); + void ExpectSemicolon(bool* ok); + bool CheckContextualKeyword(Vector<const char> keyword); + void ExpectContextualKeyword(Vector<const char> keyword, bool* ok); + + // Strict mode octal literal validation. + void CheckOctalLiteral(int beg_pos, int end_pos, bool* ok); + + // Determine precedence of given token. + static int Precedence(Token::Value token, bool accept_IN); + + // Report syntax errors. + virtual void ReportUnexpectedToken(Token::Value token) = 0; + virtual void ReportMessageAt(Scanner::Location loc, const char* type) = 0; + + // Used to detect duplicates in object literals. Each of the values + // kGetterProperty, kSetterProperty and kValueProperty represents + // a type of object literal property. When parsing a property, its + // type value is stored in the DuplicateFinder for the property name. + // Values are chosen so that having intersection bits means the there is + // an incompatibility. + // I.e., you can add a getter to a property that already has a setter, since + // kGetterProperty and kSetterProperty doesn't intersect, but not if it + // already has a getter or a value. Adding the getter to an existing + // setter will store the value (kGetterProperty | kSetterProperty), which + // is incompatible with adding any further properties. + enum PropertyKind { + kNone = 0, + // Bit patterns representing different object literal property types. + kGetterProperty = 1, + kSetterProperty = 2, + kValueProperty = 7, + // Helper constants. + kValueFlag = 4 + }; + + // Validation per ECMA 262 - 11.1.5 "Object Initialiser". + class ObjectLiteralChecker { + public: + ObjectLiteralChecker(ParserBase* parser, LanguageMode mode) + : parser_(parser), + finder_(scanner()->unicode_cache()), + language_mode_(mode) { } + + void CheckProperty(Token::Value property, PropertyKind type, bool* ok); + + private: + ParserBase* parser() const { return parser_; } + Scanner* scanner() const { return parser_->scanner(); } + + // Checks the type of conflict based on values coming from PropertyType. + bool HasConflict(PropertyKind type1, PropertyKind type2) { + return (type1 & type2) != 0; + } + bool IsDataDataConflict(PropertyKind type1, PropertyKind type2) { + return ((type1 & type2) & kValueFlag) != 0; + } + bool IsDataAccessorConflict(PropertyKind type1, PropertyKind type2) { + return ((type1 ^ type2) & kValueFlag) != 0; + } + bool IsAccessorAccessorConflict(PropertyKind type1, PropertyKind type2) { + return ((type1 | type2) & kValueFlag) == 0; + } + + ParserBase* parser_; + DuplicateFinder finder_; + LanguageMode language_mode_; + }; + + private: + Scanner* scanner_; + uintptr_t stack_limit_; + bool stack_overflow_; + + bool allow_lazy_; + bool allow_natives_syntax_; + bool allow_generators_; + bool allow_for_of_; +}; -typedef uint8_t byte; // Preparsing checks a JavaScript program and emits preparse-data that helps // a later parsing to be faster. @@ -54,104 +218,25 @@ typedef uint8_t byte; // rather it is to speed up properly written and correct programs. // That means that contextual checks (like a label being declared where // it is used) are generally omitted. - -namespace i = v8::internal; - -class DuplicateFinder { - public: - explicit DuplicateFinder(i::UnicodeCache* constants) - : unicode_constants_(constants), - backing_store_(16), - map_(&Match) { } - - int AddAsciiSymbol(i::Vector<const char> key, int value); - int AddUtf16Symbol(i::Vector<const uint16_t> key, int value); - // Add a a number literal by converting it (if necessary) - // to the string that ToString(ToNumber(literal)) would generate. - // and then adding that string with AddAsciiSymbol. - // This string is the actual value used as key in an object literal, - // and the one that must be different from the other keys. - int AddNumber(i::Vector<const char> key, int value); - - private: - int AddSymbol(i::Vector<const byte> key, bool is_ascii, int value); - // Backs up the key and its length in the backing store. - // The backup is stored with a base 127 encoding of the - // length (plus a bit saying whether the string is ASCII), - // followed by the bytes of the key. - byte* BackupKey(i::Vector<const byte> key, bool is_ascii); - - // Compare two encoded keys (both pointing into the backing store) - // for having the same base-127 encoded lengths and ASCII-ness, - // and then having the same 'length' bytes following. - static bool Match(void* first, void* second); - // Creates a hash from a sequence of bytes. - static uint32_t Hash(i::Vector<const byte> key, bool is_ascii); - // Checks whether a string containing a JS number is its canonical - // form. - static bool IsNumberCanonical(i::Vector<const char> key); - - // Size of buffer. Sufficient for using it to call DoubleToCString in - // from conversions.h. - static const int kBufferSize = 100; - - i::UnicodeCache* unicode_constants_; - // Backing store used to store strings used as hashmap keys. - i::SequenceCollector<unsigned char> backing_store_; - i::HashMap map_; - // Buffer used for string->number->canonical string conversions. - char number_buffer_[kBufferSize]; -}; - - -class PreParser { +class PreParser : public ParserBase { public: enum PreParseResult { kPreParseStackOverflow, kPreParseSuccess }; - - PreParser(i::Scanner* scanner, - i::ParserRecorder* log, + PreParser(Scanner* scanner, + ParserRecorder* log, uintptr_t stack_limit) - : scanner_(scanner), + : ParserBase(scanner, stack_limit), log_(log), scope_(NULL), - stack_limit_(stack_limit), - strict_mode_violation_location_(i::Scanner::Location::invalid()), + strict_mode_violation_location_(Scanner::Location::invalid()), strict_mode_violation_type_(NULL), - stack_overflow_(false), - allow_lazy_(false), - allow_natives_syntax_(false), - allow_generators_(false), - allow_for_of_(false), parenthesized_function_(false) { } ~PreParser() {} - bool allow_natives_syntax() const { return allow_natives_syntax_; } - bool allow_lazy() const { return allow_lazy_; } - bool allow_modules() const { return scanner_->HarmonyModules(); } - bool allow_harmony_scoping() const { return scanner_->HarmonyScoping(); } - bool allow_generators() const { return allow_generators_; } - bool allow_for_of() const { return allow_for_of_; } - bool allow_harmony_numeric_literals() const { - return scanner_->HarmonyNumericLiterals(); - } - - void set_allow_natives_syntax(bool allow) { allow_natives_syntax_ = allow; } - void set_allow_lazy(bool allow) { allow_lazy_ = allow; } - void set_allow_modules(bool allow) { scanner_->SetHarmonyModules(allow); } - void set_allow_harmony_scoping(bool allow) { - scanner_->SetHarmonyScoping(allow); - } - void set_allow_generators(bool allow) { allow_generators_ = allow; } - void set_allow_for_of(bool allow) { allow_for_of_ = allow; } - void set_allow_harmony_numeric_literals(bool allow) { - scanner_->SetHarmonyNumericLiterals(allow); - } - // Pre-parse the program from the character stream; returns true on // success (even if parsing failed, the pre-parse data successfully // captured the syntax error), and false if a stack-overflow happened @@ -159,13 +244,13 @@ class PreParser { PreParseResult PreParseProgram() { Scope top_scope(&scope_, kTopLevelScope); bool ok = true; - int start_position = scanner_->peek_location().beg_pos; - ParseSourceElements(i::Token::EOS, &ok); - if (stack_overflow_) return kPreParseStackOverflow; + int start_position = scanner()->peek_location().beg_pos; + ParseSourceElements(Token::EOS, &ok); + if (stack_overflow()) return kPreParseStackOverflow; if (!ok) { - ReportUnexpectedToken(scanner_->current_token()); + ReportUnexpectedToken(scanner()->current_token()); } else if (!scope_->is_classic_mode()) { - CheckOctalLiteral(start_position, scanner_->location().end_pos, &ok); + CheckOctalLiteral(start_position, scanner()->location().end_pos, &ok); } return kPreParseSuccess; } @@ -178,50 +263,11 @@ class PreParser { // keyword and parameters, and have consumed the initial '{'. // At return, unless an error occurred, the scanner is positioned before the // the final '}'. - PreParseResult PreParseLazyFunction(i::LanguageMode mode, + PreParseResult PreParseLazyFunction(LanguageMode mode, bool is_generator, - i::ParserRecorder* log); + ParserRecorder* log); private: - // Used to detect duplicates in object literals. Each of the values - // kGetterProperty, kSetterProperty and kValueProperty represents - // a type of object literal property. When parsing a property, its - // type value is stored in the DuplicateFinder for the property name. - // Values are chosen so that having intersection bits means the there is - // an incompatibility. - // I.e., you can add a getter to a property that already has a setter, since - // kGetterProperty and kSetterProperty doesn't intersect, but not if it - // already has a getter or a value. Adding the getter to an existing - // setter will store the value (kGetterProperty | kSetterProperty), which - // is incompatible with adding any further properties. - enum PropertyType { - kNone = 0, - // Bit patterns representing different object literal property types. - kGetterProperty = 1, - kSetterProperty = 2, - kValueProperty = 7, - // Helper constants. - kValueFlag = 4 - }; - - // Checks the type of conflict based on values coming from PropertyType. - bool HasConflict(int type1, int type2) { return (type1 & type2) != 0; } - bool IsDataDataConflict(int type1, int type2) { - return ((type1 & type2) & kValueFlag) != 0; - } - bool IsDataAccessorConflict(int type1, int type2) { - return ((type1 ^ type2) & kValueFlag) != 0; - } - bool IsAccessorAccessorConflict(int type1, int type2) { - return ((type1 | type2) & kValueFlag) == 0; - } - - - void CheckDuplicate(DuplicateFinder* finder, - i::Token::Value property, - int type, - bool* ok); - // These types form an algebra over syntactic categories that is just // rich enough to let us recognize and propagate the constructs that // are either being counted in the preparser data, or is important @@ -441,7 +487,7 @@ class PreParser { } bool IsStringLiteral() { - return code_ != kUnknownStatement; + return code_ == kStringLiteralExpressionStatement; } bool IsUseStrictLiteral() { @@ -480,7 +526,7 @@ class PreParser { expected_properties_(0), with_nesting_count_(0), language_mode_( - (prev_ != NULL) ? prev_->language_mode() : i::CLASSIC_MODE), + (prev_ != NULL) ? prev_->language_mode() : CLASSIC_MODE), is_generator_(false) { *variable = this; } @@ -494,12 +540,12 @@ class PreParser { bool is_generator() { return is_generator_; } void set_is_generator(bool is_generator) { is_generator_ = is_generator; } bool is_classic_mode() { - return language_mode_ == i::CLASSIC_MODE; + return language_mode_ == CLASSIC_MODE; } - i::LanguageMode language_mode() { + LanguageMode language_mode() { return language_mode_; } - void set_language_mode(i::LanguageMode language_mode) { + void set_language_mode(LanguageMode language_mode) { language_mode_ = language_mode; } @@ -523,13 +569,16 @@ class PreParser { int materialized_literal_count_; int expected_properties_; int with_nesting_count_; - i::LanguageMode language_mode_; + LanguageMode language_mode_; bool is_generator_; }; // Report syntax error - void ReportUnexpectedToken(i::Token::Value token); - void ReportMessageAt(i::Scanner::Location location, + void ReportUnexpectedToken(Token::Value token); + void ReportMessageAt(Scanner::Location location, const char* type) { + ReportMessageAt(location, type, NULL); + } + void ReportMessageAt(Scanner::Location location, const char* type, const char* name_opt) { log_->LogMessage(location.beg_pos, location.end_pos, type, name_opt); @@ -541,8 +590,6 @@ class PreParser { log_->LogMessage(start_pos, end_pos, type, name_opt); } - void CheckOctalLiteral(int beg_pos, int end_pos, bool* ok); - // All ParseXXX functions take as the last argument an *ok parameter // which is set to false if parsing failed; it is unchanged otherwise. // By making the 'exception handling' explicit, we are forced to check @@ -606,87 +653,40 @@ class PreParser { // Log the currently parsed string literal. Expression GetStringSymbol(); - i::Token::Value peek() { - if (stack_overflow_) return i::Token::ILLEGAL; - return scanner_->peek(); - } - - i::Token::Value Next() { - if (stack_overflow_) return i::Token::ILLEGAL; - { - int marker; - if (reinterpret_cast<uintptr_t>(&marker) < stack_limit_) { - // Further calls to peek/Next will return illegal token. - // The current one will still be returned. It might already - // have been seen using peek. - stack_overflow_ = true; - } - } - return scanner_->Next(); - } - - bool peek_any_identifier(); - - void set_language_mode(i::LanguageMode language_mode) { + void set_language_mode(LanguageMode language_mode) { scope_->set_language_mode(language_mode); } bool is_classic_mode() { - return scope_->language_mode() == i::CLASSIC_MODE; + return scope_->language_mode() == CLASSIC_MODE; } bool is_extended_mode() { - return scope_->language_mode() == i::EXTENDED_MODE; + return scope_->language_mode() == EXTENDED_MODE; } - i::LanguageMode language_mode() { return scope_->language_mode(); } - - void Consume(i::Token::Value token) { Next(); } - - void Expect(i::Token::Value token, bool* ok) { - if (Next() != token) { - *ok = false; - } - } - - bool Check(i::Token::Value token) { - i::Token::Value next = peek(); - if (next == token) { - Consume(next); - return true; - } - return false; - } - void ExpectSemicolon(bool* ok); + LanguageMode language_mode() { return scope_->language_mode(); } bool CheckInOrOf(bool accept_OF); - static int Precedence(i::Token::Value tok, bool accept_IN); - - void SetStrictModeViolation(i::Scanner::Location, + void SetStrictModeViolation(Scanner::Location, const char* type, bool* ok); void CheckDelayedStrictModeViolation(int beg_pos, int end_pos, bool* ok); - void StrictModeIdentifierViolation(i::Scanner::Location, + void StrictModeIdentifierViolation(Scanner::Location, const char* eval_args_type, Identifier identifier, bool* ok); - i::Scanner* scanner_; - i::ParserRecorder* log_; + ParserRecorder* log_; Scope* scope_; - uintptr_t stack_limit_; - i::Scanner::Location strict_mode_violation_location_; + Scanner::Location strict_mode_violation_location_; const char* strict_mode_violation_type_; - bool stack_overflow_; - bool allow_lazy_; - bool allow_natives_syntax_; - bool allow_generators_; - bool allow_for_of_; bool parenthesized_function_; }; -} } // v8::preparser + +} } // v8::internal #endif // V8_PREPARSER_H diff --git a/deps/v8/src/prettyprinter.cc b/deps/v8/src/prettyprinter.cc index b1bac4cd4a..4b441b9ae4 100644 --- a/deps/v8/src/prettyprinter.cc +++ b/deps/v8/src/prettyprinter.cc @@ -200,11 +200,25 @@ void PrettyPrinter::VisitSwitchStatement(SwitchStatement* node) { Print(") { "); ZoneList<CaseClause*>* cases = node->cases(); for (int i = 0; i < cases->length(); i++) - PrintCaseClause(cases->at(i)); + Visit(cases->at(i)); Print("}"); } +void PrettyPrinter::VisitCaseClause(CaseClause* clause) { + if (clause->is_default()) { + Print("default"); + } else { + Print("case "); + Visit(clause->label()); + } + Print(": "); + PrintStatements(clause->statements()); + if (clause->statements()->length() > 0) + Print(" "); +} + + void PrettyPrinter::VisitDoWhileStatement(DoWhileStatement* node) { PrintLabels(node->labels()); Print("do "); @@ -297,10 +311,9 @@ void PrettyPrinter::VisitFunctionLiteral(FunctionLiteral* node) { } -void PrettyPrinter::VisitSharedFunctionInfoLiteral( - SharedFunctionInfoLiteral* node) { +void PrettyPrinter::VisitNativeFunctionLiteral(NativeFunctionLiteral* node) { Print("("); - PrintLiteral(node->shared_function_info(), true); + PrintLiteral(node->name(), false); Print(")"); } @@ -621,20 +634,6 @@ void PrettyPrinter::PrintFunctionLiteral(FunctionLiteral* function) { } -void PrettyPrinter::PrintCaseClause(CaseClause* clause) { - if (clause->is_default()) { - Print("default"); - } else { - Print("case "); - Visit(clause->label()); - } - Print(": "); - PrintStatements(clause->statements()); - if (clause->statements()->length() > 0) - Print(" "); -} - - //----------------------------------------------------------------------------- class IndentedScope BASE_EMBEDDED { @@ -762,18 +761,6 @@ void AstPrinter::PrintArguments(ZoneList<Expression*>* arguments) { } -void AstPrinter::PrintCaseClause(CaseClause* clause) { - if (clause->is_default()) { - IndentedScope indent(this, "DEFAULT"); - PrintStatements(clause->statements()); - } else { - IndentedScope indent(this, "CASE"); - Visit(clause->label()); - PrintStatements(clause->statements()); - } -} - - void AstPrinter::VisitBlock(Block* node) { const char* block_txt = node->is_initializer_block() ? "BLOCK INIT" : "BLOCK"; IndentedScope indent(this, block_txt); @@ -901,7 +888,19 @@ void AstPrinter::VisitSwitchStatement(SwitchStatement* node) { PrintLabelsIndented(node->labels()); PrintIndentedVisit("TAG", node->tag()); for (int i = 0; i < node->cases()->length(); i++) { - PrintCaseClause(node->cases()->at(i)); + Visit(node->cases()->at(i)); + } +} + + +void AstPrinter::VisitCaseClause(CaseClause* clause) { + if (clause->is_default()) { + IndentedScope indent(this, "DEFAULT"); + PrintStatements(clause->statements()); + } else { + IndentedScope indent(this, "CASE"); + Visit(clause->label()); + PrintStatements(clause->statements()); } } @@ -982,10 +981,9 @@ void AstPrinter::VisitFunctionLiteral(FunctionLiteral* node) { } -void AstPrinter::VisitSharedFunctionInfoLiteral( - SharedFunctionInfoLiteral* node) { - IndentedScope indent(this, "FUNC LITERAL"); - PrintLiteralIndented("SHARED INFO", node->shared_function_info(), true); +void AstPrinter::VisitNativeFunctionLiteral(NativeFunctionLiteral* node) { + IndentedScope indent(this, "NATIVE FUNC LITERAL"); + PrintLiteralIndented("NAME", node->name(), false); } diff --git a/deps/v8/src/profile-generator-inl.h b/deps/v8/src/profile-generator-inl.h index f2feb73fc9..e363f67761 100644 --- a/deps/v8/src/profile-generator-inl.h +++ b/deps/v8/src/profile-generator-inl.h @@ -33,27 +33,19 @@ namespace v8 { namespace internal { -const char* StringsStorage::GetFunctionName(Name* name) { - return GetFunctionName(GetName(name)); -} - - -const char* StringsStorage::GetFunctionName(const char* name) { - return strlen(name) > 0 ? name : ProfileGenerator::kAnonymousFunctionName; -} - - CodeEntry::CodeEntry(Logger::LogEventsAndTags tag, const char* name, const char* name_prefix, const char* resource_name, - int line_number) + int line_number, + int column_number) : tag_(tag), builtin_id_(Builtins::builtin_count), name_prefix_(name_prefix), name_(name), resource_name_(resource_name), line_number_(line_number), + column_number_(column_number), shared_id_(0), script_id_(v8::Script::kNoScriptId), no_frame_ranges_(NULL), @@ -77,25 +69,6 @@ ProfileNode::ProfileNode(ProfileTree* tree, CodeEntry* entry) children_(CodeEntriesMatch), id_(tree->next_node_id()) { } - -CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) { - switch (tag) { - case GC: - return gc_entry_; - case JS: - case COMPILER: - // DOM events handlers are reported as OTHER / EXTERNAL entries. - // To avoid confusing people, let's put all these entries into - // one bucket. - case OTHER: - case EXTERNAL: - return program_entry_; - case IDLE: - return idle_entry_; - default: return NULL; - } -} - } } // namespace v8::internal #endif // V8_PROFILE_GENERATOR_INL_H_ diff --git a/deps/v8/src/profile-generator.cc b/deps/v8/src/profile-generator.cc index 38c1f785d9..acf54da1c7 100644 --- a/deps/v8/src/profile-generator.cc +++ b/deps/v8/src/profile-generator.cc @@ -41,6 +41,12 @@ namespace v8 { namespace internal { +bool StringsStorage::StringsMatch(void* key1, void* key2) { + return strcmp(reinterpret_cast<char*>(key1), + reinterpret_cast<char*>(key2)) == 0; +} + + StringsStorage::StringsStorage(Heap* heap) : hash_seed_(heap->HashSeed()), names_(StringsMatch) { } @@ -57,12 +63,15 @@ StringsStorage::~StringsStorage() { const char* StringsStorage::GetCopy(const char* src) { int len = static_cast<int>(strlen(src)); - Vector<char> dst = Vector<char>::New(len + 1); - OS::StrNCpy(dst, src, len); - dst[len] = '\0'; - uint32_t hash = - StringHasher::HashSequentialString(dst.start(), len, hash_seed_); - return AddOrDisposeString(dst.start(), hash); + HashMap::Entry* entry = GetEntry(src, len); + if (entry->value == NULL) { + Vector<char> dst = Vector<char>::New(len + 1); + OS::StrNCpy(dst, src, len); + dst[len] = '\0'; + entry->key = dst.start(); + entry->value = entry->key; + } + return reinterpret_cast<const char*>(entry->value); } @@ -75,15 +84,16 @@ const char* StringsStorage::GetFormatted(const char* format, ...) { } -const char* StringsStorage::AddOrDisposeString(char* str, uint32_t hash) { - HashMap::Entry* cache_entry = names_.Lookup(str, hash, true); - if (cache_entry->value == NULL) { +const char* StringsStorage::AddOrDisposeString(char* str, int len) { + HashMap::Entry* entry = GetEntry(str, len); + if (entry->value == NULL) { // New entry added. - cache_entry->value = str; + entry->key = str; + entry->value = str; } else { DeleteArray(str); } - return reinterpret_cast<const char*>(cache_entry->value); + return reinterpret_cast<const char*>(entry->value); } @@ -92,11 +102,9 @@ const char* StringsStorage::GetVFormatted(const char* format, va_list args) { int len = OS::VSNPrintF(str, format, args); if (len == -1) { DeleteArray(str.start()); - return format; + return GetCopy(format); } - uint32_t hash = StringHasher::HashSequentialString( - str.start(), len, hash_seed_); - return AddOrDisposeString(str.start(), hash); + return AddOrDisposeString(str.start(), len); } @@ -104,11 +112,11 @@ const char* StringsStorage::GetName(Name* name) { if (name->IsString()) { String* str = String::cast(name); int length = Min(kMaxNameSize, str->length()); + int actual_length = 0; SmartArrayPointer<char> data = - str->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL, 0, length); - uint32_t hash = StringHasher::HashSequentialString( - *data, length, name->GetHeap()->HashSeed()); - return AddOrDisposeString(data.Detach(), hash); + str->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL, 0, length, + &actual_length); + return AddOrDisposeString(data.Detach(), actual_length); } else if (name->IsSymbol()) { return "<symbol>"; } @@ -121,6 +129,21 @@ const char* StringsStorage::GetName(int index) { } +const char* StringsStorage::GetFunctionName(Name* name) { + return BeautifyFunctionName(GetName(name)); +} + + +const char* StringsStorage::GetFunctionName(const char* name) { + return BeautifyFunctionName(GetCopy(name)); +} + + +const char* StringsStorage::BeautifyFunctionName(const char* name) { + return (*name == 0) ? ProfileGenerator::kAnonymousFunctionName : name; +} + + size_t StringsStorage::GetUsedMemorySize() const { size_t size = sizeof(*this); size += sizeof(HashMap::Entry) * names_.capacity(); @@ -131,6 +154,12 @@ size_t StringsStorage::GetUsedMemorySize() const { } +HashMap::Entry* StringsStorage::GetEntry(const char* str, int len) { + uint32_t hash = StringHasher::HashSequentialString(str, len, hash_seed_); + return names_.Lookup(const_cast<char*>(str), hash, true); +} + + const char* const CodeEntry::kEmptyNamePrefix = ""; const char* const CodeEntry::kEmptyResourceName = ""; const char* const CodeEntry::kEmptyBailoutReason = ""; @@ -141,15 +170,6 @@ CodeEntry::~CodeEntry() { } -void CodeEntry::CopyData(const CodeEntry& source) { - tag_ = source.tag_; - name_prefix_ = source.name_prefix_; - name_ = source.name_; - resource_name_ = source.resource_name_; - line_number_ = source.line_number_; -} - - uint32_t CodeEntry::GetCallUid() const { uint32_t hash = ComputeIntegerHash(tag_, v8::internal::kZeroHashSeed); if (shared_id_ != 0) { @@ -546,12 +566,14 @@ CodeEntry* CpuProfilesCollection::NewCodeEntry( const char* name, const char* name_prefix, const char* resource_name, - int line_number) { + int line_number, + int column_number) { CodeEntry* code_entry = new CodeEntry(tag, name, name_prefix, resource_name, - line_number); + line_number, + column_number); code_entries_.Add(code_entry); return code_entry; } @@ -660,4 +682,22 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) { } +CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) { + switch (tag) { + case GC: + return gc_entry_; + case JS: + case COMPILER: + // DOM events handlers are reported as OTHER / EXTERNAL entries. + // To avoid confusing people, let's put all these entries into + // one bucket. + case OTHER: + case EXTERNAL: + return program_entry_; + case IDLE: + return idle_entry_; + default: return NULL; + } +} + } } // namespace v8::internal diff --git a/deps/v8/src/profile-generator.h b/deps/v8/src/profile-generator.h index 0a4502cc1b..6e4758bece 100644 --- a/deps/v8/src/profile-generator.h +++ b/deps/v8/src/profile-generator.h @@ -49,20 +49,18 @@ class StringsStorage { const char* GetVFormatted(const char* format, va_list args); const char* GetName(Name* name); const char* GetName(int index); - inline const char* GetFunctionName(Name* name); - inline const char* GetFunctionName(const char* name); + const char* GetFunctionName(Name* name); + const char* GetFunctionName(const char* name); size_t GetUsedMemorySize() const; private: static const int kMaxNameSize = 1024; - INLINE(static bool StringsMatch(void* key1, void* key2)) { - return strcmp(reinterpret_cast<char*>(key1), - reinterpret_cast<char*>(key2)) == 0; - } - const char* AddOrDisposeString(char* str, uint32_t hash); + static bool StringsMatch(void* key1, void* key2); + const char* BeautifyFunctionName(const char* name); + const char* AddOrDisposeString(char* str, int len); + HashMap::Entry* GetEntry(const char* str, int len); - // Mapping of strings by String::Hash to const char* strings. uint32_t hash_seed_; HashMap names_; @@ -73,28 +71,30 @@ class StringsStorage { class CodeEntry { public: // CodeEntry doesn't own name strings, just references them. - INLINE(CodeEntry(Logger::LogEventsAndTags tag, + inline CodeEntry(Logger::LogEventsAndTags tag, const char* name, const char* name_prefix = CodeEntry::kEmptyNamePrefix, const char* resource_name = CodeEntry::kEmptyResourceName, - int line_number = v8::CpuProfileNode::kNoLineNumberInfo)); + int line_number = v8::CpuProfileNode::kNoLineNumberInfo, + int column_number = v8::CpuProfileNode::kNoColumnNumberInfo); ~CodeEntry(); - INLINE(bool is_js_function() const) { return is_js_function_tag(tag_); } - INLINE(const char* name_prefix() const) { return name_prefix_; } - INLINE(bool has_name_prefix() const) { return name_prefix_[0] != '\0'; } - INLINE(const char* name() const) { return name_; } - INLINE(const char* resource_name() const) { return resource_name_; } - INLINE(int line_number() const) { return line_number_; } - INLINE(void set_shared_id(int shared_id)) { shared_id_ = shared_id; } - INLINE(int script_id() const) { return script_id_; } - INLINE(void set_script_id(int script_id)) { script_id_ = script_id; } - INLINE(void set_bailout_reason(const char* bailout_reason)) { + bool is_js_function() const { return is_js_function_tag(tag_); } + const char* name_prefix() const { return name_prefix_; } + bool has_name_prefix() const { return name_prefix_[0] != '\0'; } + const char* name() const { return name_; } + const char* resource_name() const { return resource_name_; } + int line_number() const { return line_number_; } + int column_number() const { return column_number_; } + void set_shared_id(int shared_id) { shared_id_ = shared_id; } + int script_id() const { return script_id_; } + void set_script_id(int script_id) { script_id_ = script_id; } + void set_bailout_reason(const char* bailout_reason) { bailout_reason_ = bailout_reason; } - INLINE(const char* bailout_reason() const) { return bailout_reason_; } + const char* bailout_reason() const { return bailout_reason_; } - INLINE(static bool is_js_function_tag(Logger::LogEventsAndTags tag)); + static inline bool is_js_function_tag(Logger::LogEventsAndTags tag); List<OffsetRange>* no_frame_ranges() const { return no_frame_ranges_; } void set_no_frame_ranges(List<OffsetRange>* ranges) { @@ -104,7 +104,6 @@ class CodeEntry { void SetBuiltinId(Builtins::Name id); Builtins::Name builtin_id() const { return builtin_id_; } - void CopyData(const CodeEntry& source); uint32_t GetCallUid() const; bool IsSameAs(CodeEntry* entry) const; @@ -119,6 +118,7 @@ class CodeEntry { const char* name_; const char* resource_name_; int line_number_; + int column_number_; int shared_id_; int script_id_; List<OffsetRange>* no_frame_ranges_; @@ -132,27 +132,27 @@ class ProfileTree; class ProfileNode { public: - INLINE(ProfileNode(ProfileTree* tree, CodeEntry* entry)); + inline ProfileNode(ProfileTree* tree, CodeEntry* entry); ProfileNode* FindChild(CodeEntry* entry); ProfileNode* FindOrAddChild(CodeEntry* entry); - INLINE(void IncrementSelfTicks()) { ++self_ticks_; } - INLINE(void IncreaseSelfTicks(unsigned amount)) { self_ticks_ += amount; } + void IncrementSelfTicks() { ++self_ticks_; } + void IncreaseSelfTicks(unsigned amount) { self_ticks_ += amount; } - INLINE(CodeEntry* entry() const) { return entry_; } - INLINE(unsigned self_ticks() const) { return self_ticks_; } - INLINE(const List<ProfileNode*>* children() const) { return &children_list_; } + CodeEntry* entry() const { return entry_; } + unsigned self_ticks() const { return self_ticks_; } + const List<ProfileNode*>* children() const { return &children_list_; } unsigned id() const { return id_; } void Print(int indent); private: - INLINE(static bool CodeEntriesMatch(void* entry1, void* entry2)) { + static bool CodeEntriesMatch(void* entry1, void* entry2) { return reinterpret_cast<CodeEntry*>(entry1)->IsSameAs( reinterpret_cast<CodeEntry*>(entry2)); } - INLINE(static uint32_t CodeEntryHash(CodeEntry* entry)) { + static uint32_t CodeEntryHash(CodeEntry* entry) { return entry->GetCallUid(); } @@ -304,7 +304,8 @@ class CpuProfilesCollection { const char* name, const char* name_prefix = CodeEntry::kEmptyNamePrefix, const char* resource_name = CodeEntry::kEmptyResourceName, - int line_number = v8::CpuProfileNode::kNoLineNumberInfo); + int line_number = v8::CpuProfileNode::kNoLineNumberInfo, + int column_number = v8::CpuProfileNode::kNoColumnNumberInfo); // Called from profile generator thread. void AddPathToCurrentProfiles(const Vector<CodeEntry*>& path); @@ -331,7 +332,7 @@ class ProfileGenerator { void RecordTickSample(const TickSample& sample); - INLINE(CodeMap* code_map()) { return &code_map_; } + CodeMap* code_map() { return &code_map_; } static const char* const kAnonymousFunctionName; static const char* const kProgramEntryName; @@ -342,7 +343,7 @@ class ProfileGenerator { static const char* const kUnresolvedFunctionName; private: - INLINE(CodeEntry* EntryForVMState(StateTag tag)); + CodeEntry* EntryForVMState(StateTag tag); CpuProfilesCollection* profiles_; CodeMap code_map_; diff --git a/deps/v8/src/property-details.h b/deps/v8/src/property-details.h index 7f44b79277..659fbd1da6 100644 --- a/deps/v8/src/property-details.h +++ b/deps/v8/src/property-details.h @@ -82,6 +82,7 @@ class Representation { public: enum Kind { kNone, + kByte, kSmi, kInteger32, kDouble, @@ -95,6 +96,7 @@ class Representation { static Representation None() { return Representation(kNone); } static Representation Tagged() { return Representation(kTagged); } + static Representation Byte() { return Representation(kByte); } static Representation Smi() { return Representation(kSmi); } static Representation Integer32() { return Representation(kInteger32); } static Representation Double() { return Representation(kDouble); } @@ -139,6 +141,7 @@ class Representation { Kind kind() const { return static_cast<Kind>(kind_); } bool IsNone() const { return kind_ == kNone; } + bool IsByte() const { return kind_ == kByte; } bool IsTagged() const { return kind_ == kTagged; } bool IsSmi() const { return kind_ == kSmi; } bool IsSmiOrTagged() const { return IsSmi() || IsTagged(); } @@ -148,7 +151,7 @@ class Representation { bool IsHeapObject() const { return kind_ == kHeapObject; } bool IsExternal() const { return kind_ == kExternal; } bool IsSpecialization() const { - return kind_ == kInteger32 || kind_ == kDouble || kind_ == kSmi; + return IsByte() || IsSmi() || IsInteger32() || IsDouble(); } const char* Mnemonic() const; diff --git a/deps/v8/src/proxy.js b/deps/v8/src/proxy.js index de9be50ddc..4c03f21538 100644 --- a/deps/v8/src/proxy.js +++ b/deps/v8/src/proxy.js @@ -40,7 +40,7 @@ function ProxyCreate(handler, proto) { throw MakeTypeError("handler_non_object", ["create"]) if (IS_UNDEFINED(proto)) proto = null - else if (!(IS_SPEC_OBJECT(proto) || proto === null)) + else if (!(IS_SPEC_OBJECT(proto) || IS_NULL(proto))) throw MakeTypeError("proto_non_object", ["create"]) return %CreateJSProxy(handler, proto) } @@ -56,7 +56,7 @@ function ProxyCreateFunction(handler, callTrap, constructTrap) { // Make sure the trap receives 'undefined' as this. var construct = constructTrap constructTrap = function() { - return %Apply(construct, void 0, arguments, 0, %_ArgumentsLength()); + return %Apply(construct, UNDEFINED, arguments, 0, %_ArgumentsLength()); } } else { throw MakeTypeError("trap_function_expected", diff --git a/deps/v8/src/regexp.js b/deps/v8/src/regexp.js index cb11ad107c..22b08775b3 100644 --- a/deps/v8/src/regexp.js +++ b/deps/v8/src/regexp.js @@ -189,7 +189,7 @@ function RegExpExec(string) { // matchIndices is either null or the lastMatchInfo array. var matchIndices = %_RegExpExec(this, string, i, lastMatchInfo); - if (matchIndices === null) { + if (IS_NULL(matchIndices)) { this.lastIndex = 0; return null; } @@ -232,7 +232,7 @@ function RegExpTest(string) { %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, string, lastIndex]); // matchIndices is either null or the lastMatchInfo array. var matchIndices = %_RegExpExec(this, string, i, lastMatchInfo); - if (matchIndices === null) { + if (IS_NULL(matchIndices)) { this.lastIndex = 0; return false; } @@ -253,7 +253,7 @@ function RegExpTest(string) { %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [regexp, string, lastIndex]); // matchIndices is either null or the lastMatchInfo array. var matchIndices = %_RegExpExec(regexp, string, 0, lastMatchInfo); - if (matchIndices === null) { + if (IS_NULL(matchIndices)) { this.lastIndex = 0; return false; } @@ -384,7 +384,7 @@ function RegExpMakeCaptureGetter(n) { var lastMatchInfo = new InternalPackedArray( 2, // REGEXP_NUMBER_OF_CAPTURES "", // Last subject. - void 0, // Last input - settable with RegExpSetInput. + UNDEFINED, // Last input - settable with RegExpSetInput. 0, // REGEXP_FIRST_CAPTURE + 0 0 // REGEXP_FIRST_CAPTURE + 1 ); diff --git a/deps/v8/src/rewriter.cc b/deps/v8/src/rewriter.cc index 06335a80c7..70b362fd7d 100644 --- a/deps/v8/src/rewriter.cc +++ b/deps/v8/src/rewriter.cc @@ -207,6 +207,11 @@ void Processor::VisitSwitchStatement(SwitchStatement* node) { } +void Processor::VisitCaseClause(CaseClause* clause) { + UNREACHABLE(); +} + + void Processor::VisitContinueStatement(ContinueStatement* node) { is_set_ = false; } @@ -271,13 +276,12 @@ bool Rewriter::Rewrite(CompilationInfo* info) { // eval('with ({x:1}) x = 1'); // the end position of the function generated for executing the eval code // coincides with the end of the with scope which is the position of '1'. - int position = function->end_position(); + int pos = function->end_position(); VariableProxy* result_proxy = processor.factory()->NewVariableProxy( - result->name(), false, result->interface(), position); + result->name(), false, result->interface(), pos); result_proxy->BindTo(result); Statement* result_statement = - processor.factory()->NewReturnStatement(result_proxy); - result_statement->set_statement_pos(position); + processor.factory()->NewReturnStatement(result_proxy, pos); body->Add(result_statement, info->zone()); } } diff --git a/deps/v8/src/runtime-profiler.cc b/deps/v8/src/runtime-profiler.cc index 95dcc4f983..7c900b37d9 100644 --- a/deps/v8/src/runtime-profiler.cc +++ b/deps/v8/src/runtime-profiler.cc @@ -33,7 +33,6 @@ #include "bootstrapper.h" #include "code-stubs.h" #include "compilation-cache.h" -#include "deoptimizer.h" #include "execution.h" #include "full-codegen.h" #include "global-handles.h" @@ -185,7 +184,7 @@ void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) { PrintF("]\n"); } - Deoptimizer::PatchInterruptCode(isolate_, shared->code()); + BackEdgeTable::Patch(isolate_, shared->code()); } diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc index c09fb1d499..15cfc854bf 100644 --- a/deps/v8/src/runtime.cc +++ b/deps/v8/src/runtime.cc @@ -31,6 +31,7 @@ #include "v8.h" #include "accessors.h" +#include "allocation-site-scopes.h" #include "api.h" #include "arguments.h" #include "bootstrapper.h" @@ -348,10 +349,8 @@ MaybeObject* TransitionElements(Handle<Object> object, ElementsKind from_kind = Handle<JSObject>::cast(object)->map()->elements_kind(); if (Map::IsValidElementsTransition(from_kind, to_kind)) { - Handle<Object> result = JSObject::TransitionElementsKind( - Handle<JSObject>::cast(object), to_kind); - if (result.is_null()) return isolate->ThrowIllegalOperation(); - return *result; + JSObject::TransitionElementsKind(Handle<JSObject>::cast(object), to_kind); + return *object; } return isolate->ThrowIllegalOperation(); } @@ -488,44 +487,39 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateObjectLiteral) { bool has_function_literal = (flags & ObjectLiteral::kHasFunction) != 0; // Check if boilerplate exists. If not, create it first. - Handle<Object> boilerplate(literals->get(literals_index), isolate); - if (*boilerplate == isolate->heap()->undefined_value()) { - boilerplate = CreateObjectLiteralBoilerplate(isolate, - literals, - constant_properties, - should_have_fast_elements, - has_function_literal); - RETURN_IF_EMPTY_HANDLE(isolate, boilerplate); - // Update the functions literal and return the boilerplate. - literals->set(literals_index, *boilerplate); - } - return JSObject::cast(*boilerplate)->DeepCopy(isolate); -} - - -RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateObjectLiteralShallow) { - HandleScope scope(isolate); - ASSERT(args.length() == 4); - CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0); - CONVERT_SMI_ARG_CHECKED(literals_index, 1); - CONVERT_ARG_HANDLE_CHECKED(FixedArray, constant_properties, 2); - CONVERT_SMI_ARG_CHECKED(flags, 3); - bool should_have_fast_elements = (flags & ObjectLiteral::kFastElements) != 0; - bool has_function_literal = (flags & ObjectLiteral::kHasFunction) != 0; + Handle<Object> literal_site(literals->get(literals_index), isolate); + Handle<AllocationSite> site; + Handle<JSObject> boilerplate; + if (*literal_site == isolate->heap()->undefined_value()) { + Handle<Object> raw_boilerplate = CreateObjectLiteralBoilerplate( + isolate, + literals, + constant_properties, + should_have_fast_elements, + has_function_literal); + RETURN_IF_EMPTY_HANDLE(isolate, raw_boilerplate); + boilerplate = Handle<JSObject>::cast(raw_boilerplate); + + AllocationSiteCreationContext creation_context(isolate); + site = creation_context.EnterNewScope(); + RETURN_IF_EMPTY_HANDLE(isolate, + JSObject::DeepWalk(boilerplate, &creation_context)); + creation_context.ExitScope(site, boilerplate); - // Check if boilerplate exists. If not, create it first. - Handle<Object> boilerplate(literals->get(literals_index), isolate); - if (*boilerplate == isolate->heap()->undefined_value()) { - boilerplate = CreateObjectLiteralBoilerplate(isolate, - literals, - constant_properties, - should_have_fast_elements, - has_function_literal); - RETURN_IF_EMPTY_HANDLE(isolate, boilerplate); // Update the functions literal and return the boilerplate. - literals->set(literals_index, *boilerplate); + literals->set(literals_index, *site); + } else { + site = Handle<AllocationSite>::cast(literal_site); + boilerplate = Handle<JSObject>(JSObject::cast(site->transition_info()), + isolate); } - return isolate->heap()->CopyJSObject(JSObject::cast(*boilerplate)); + + AllocationSiteUsageContext usage_context(isolate, site, true); + usage_context.EnterNewScope(); + Handle<Object> copy = JSObject::DeepCopy(boilerplate, &usage_context); + usage_context.ExitScope(site, boilerplate); + RETURN_IF_EMPTY_HANDLE(isolate, copy); + return *copy; } @@ -541,9 +535,16 @@ static Handle<AllocationSite> GetLiteralAllocationSite( ASSERT(*elements != isolate->heap()->empty_fixed_array()); Handle<Object> boilerplate = Runtime::CreateArrayLiteralBoilerplate(isolate, literals, elements); - if (boilerplate.is_null()) return site; - site = isolate->factory()->NewAllocationSite(); - site->set_transition_info(*boilerplate); + if (boilerplate.is_null()) return Handle<AllocationSite>::null(); + + AllocationSiteCreationContext creation_context(isolate); + site = creation_context.EnterNewScope(); + if (JSObject::DeepWalk(Handle<JSObject>::cast(boilerplate), + &creation_context).is_null()) { + return Handle<AllocationSite>::null(); + } + creation_context.ExitScope(site, Handle<JSObject>::cast(boilerplate)); + literals->set(literals_index, *site); } else { site = Handle<AllocationSite>::cast(literal_site); @@ -564,8 +565,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteral) { literals_index, elements); RETURN_IF_EMPTY_HANDLE(isolate, site); - JSObject* boilerplate = JSObject::cast(site->transition_info()); - return boilerplate->DeepCopy(isolate); + Handle<JSObject> boilerplate(JSObject::cast(site->transition_info())); + AllocationSiteUsageContext usage_context(isolate, site, true); + usage_context.EnterNewScope(); + Handle<JSObject> copy = JSObject::DeepCopy(boilerplate, &usage_context); + usage_context.ExitScope(site, boilerplate); + RETURN_IF_EMPTY_HANDLE(isolate, copy); + return *copy; } @@ -586,11 +592,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteralShallow) { isolate->counters()->cow_arrays_created_runtime()->Increment(); } - AllocationSiteMode mode = AllocationSite::GetMode( - boilerplate->GetElementsKind()); - if (mode == TRACK_ALLOCATION_SITE) { - return isolate->heap()->CopyJSObjectWithAllocationSite( - boilerplate, *site); + if (AllocationSite::GetMode(boilerplate->GetElementsKind()) == + TRACK_ALLOCATION_SITE) { + return isolate->heap()->CopyJSObject(boilerplate, *site); } return isolate->heap()->CopyJSObject(boilerplate); @@ -822,6 +826,16 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayBufferSliceImpl) { } +RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayBufferIsView) { + HandleScope scope(isolate); + ASSERT(args.length() == 1); + CONVERT_ARG_CHECKED(Object, object, 0); + return object->IsJSArrayBufferView() + ? isolate->heap()->true_value() + : isolate->heap()->false_value(); +} + + enum TypedArrayId { // arrayIds below should be synchromized with typedarray.js natives. ARRAY_ID_UINT8 = 1, @@ -954,17 +968,24 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitializeFromArrayLike) { HandleVector<Object>(NULL, 0))); } + // NOTE: not initializing backing store. // We assume that the caller of this function will initialize holder // with the loop // for(i = 0; i < length; i++) { holder[i] = source[i]; } + // We assume that the caller of this function is always a typed array + // constructor. // If source is a typed array, this loop will always run to completion, // so we are sure that the backing store will be initialized. - // Otherwise, we do not know (the indexing operation might throw). - // Hence we require zero initialization unless our source is a typed array. - bool should_zero_initialize = !source->IsJSTypedArray(); + // Otherwise, the indexing operation might throw, so the loop will not + // run to completion and the typed array might remain partly initialized. + // However we further assume that the caller of this function is a typed array + // constructor, and the exception will propagate out of the constructor, + // therefore uninitialized memory will not be accessible by a user program. + // + // TODO(dslomov): revise this once we support subclassing. if (!Runtime::SetupArrayBufferAllocatingData( - isolate, buffer, byte_length, should_zero_initialize)) { + isolate, buffer, byte_length, false)) { return isolate->Throw(*isolate->factory()-> NewRangeError("invalid_array_buffer_length", HandleVector<Object>(NULL, 0))); @@ -1578,24 +1599,24 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ClassOf) { RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPrototype) { - SealHandleScope shs(isolate); + HandleScope scope(isolate); ASSERT(args.length() == 1); - CONVERT_ARG_CHECKED(Object, obj, 0); + CONVERT_ARG_HANDLE_CHECKED(Object, obj, 0); // We don't expect access checks to be needed on JSProxy objects. ASSERT(!obj->IsAccessCheckNeeded() || obj->IsJSObject()); do { if (obj->IsAccessCheckNeeded() && - !isolate->MayNamedAccess(JSObject::cast(obj), - isolate->heap()->proto_string(), - v8::ACCESS_GET)) { - isolate->ReportFailedAccessCheck(JSObject::cast(obj), v8::ACCESS_GET); + !isolate->MayNamedAccessWrapper(Handle<JSObject>::cast(obj), + isolate->factory()->proto_string(), + v8::ACCESS_GET)) { + isolate->ReportFailedAccessCheck(JSObject::cast(*obj), v8::ACCESS_GET); RETURN_IF_SCHEDULED_EXCEPTION(isolate); return isolate->heap()->undefined_value(); } - obj = obj->GetPrototype(isolate); + obj = handle(obj->GetPrototype(isolate), isolate); } while (obj->IsJSObject() && - JSObject::cast(obj)->map()->is_hidden_prototype()); - return obj; + JSObject::cast(*obj)->map()->is_hidden_prototype()); + return *obj; } @@ -1654,6 +1675,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsInPrototypeChain) { static bool CheckAccessException(Object* callback, v8::AccessType access_type) { + DisallowHeapAllocation no_gc; if (callback->IsAccessorInfo()) { AccessorInfo* info = AccessorInfo::cast(callback); return @@ -1676,20 +1698,20 @@ static bool CheckAccessException(Object* callback, template<class Key> static bool CheckGenericAccess( - JSObject* receiver, - JSObject* holder, + Handle<JSObject> receiver, + Handle<JSObject> holder, Key key, v8::AccessType access_type, - bool (Isolate::*mayAccess)(JSObject*, Key, v8::AccessType)) { + bool (Isolate::*mayAccess)(Handle<JSObject>, Key, v8::AccessType)) { Isolate* isolate = receiver->GetIsolate(); - for (JSObject* current = receiver; + for (Handle<JSObject> current = receiver; true; - current = JSObject::cast(current->GetPrototype())) { + current = handle(JSObject::cast(current->GetPrototype()), isolate)) { if (current->IsAccessCheckNeeded() && !(isolate->*mayAccess)(current, key, access_type)) { return false; } - if (current == holder) break; + if (current.is_identical_to(holder)) break; } return true; } @@ -1702,28 +1724,29 @@ enum AccessCheckResult { }; -static AccessCheckResult CheckPropertyAccess( - JSObject* obj, - Name* name, - v8::AccessType access_type) { +static AccessCheckResult CheckPropertyAccess(Handle<JSObject> obj, + Handle<Name> name, + v8::AccessType access_type) { uint32_t index; if (name->AsArrayIndex(&index)) { // TODO(1095): we should traverse hidden prototype hierachy as well. if (CheckGenericAccess( - obj, obj, index, access_type, &Isolate::MayIndexedAccess)) { + obj, obj, index, access_type, &Isolate::MayIndexedAccessWrapper)) { return ACCESS_ALLOWED; } - obj->GetIsolate()->ReportFailedAccessCheck(obj, access_type); + obj->GetIsolate()->ReportFailedAccessCheck(*obj, access_type); return ACCESS_FORBIDDEN; } - LookupResult lookup(obj->GetIsolate()); - obj->LocalLookup(name, &lookup, true); + Isolate* isolate = obj->GetIsolate(); + LookupResult lookup(isolate); + obj->LocalLookup(*name, &lookup, true); if (!lookup.IsProperty()) return ACCESS_ABSENT; - if (CheckGenericAccess<Object*>( - obj, lookup.holder(), name, access_type, &Isolate::MayNamedAccess)) { + Handle<JSObject> holder(lookup.holder(), isolate); + if (CheckGenericAccess<Handle<Object> >( + obj, holder, name, access_type, &Isolate::MayNamedAccessWrapper)) { return ACCESS_ALLOWED; } @@ -1740,7 +1763,7 @@ static AccessCheckResult CheckPropertyAccess( case INTERCEPTOR: // If the object has an interceptor, try real named properties. // Overwrite the result to fetch the correct property later. - lookup.holder()->LookupRealNamedProperty(name, &lookup); + holder->LookupRealNamedProperty(*name, &lookup); if (lookup.IsProperty() && lookup.IsPropertyCallbacks()) { if (CheckAccessException(lookup.GetCallbackObject(), access_type)) { return ACCESS_ALLOWED; @@ -1751,7 +1774,7 @@ static AccessCheckResult CheckPropertyAccess( break; } - obj->GetIsolate()->ReportFailedAccessCheck(obj, access_type); + isolate->ReportFailedAccessCheck(*obj, access_type); return ACCESS_FORBIDDEN; } @@ -1769,30 +1792,30 @@ enum PropertyDescriptorIndices { }; -static MaybeObject* GetOwnProperty(Isolate* isolate, - Handle<JSObject> obj, - Handle<Name> name) { +static Handle<Object> GetOwnProperty(Isolate* isolate, + Handle<JSObject> obj, + Handle<Name> name) { Heap* heap = isolate->heap(); + Factory* factory = isolate->factory(); // Due to some WebKit tests, we want to make sure that we do not log // more than one access failure here. AccessCheckResult access_check_result = - CheckPropertyAccess(*obj, *name, v8::ACCESS_HAS); - RETURN_IF_SCHEDULED_EXCEPTION(isolate); + CheckPropertyAccess(obj, name, v8::ACCESS_HAS); + RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); switch (access_check_result) { - case ACCESS_FORBIDDEN: return heap->false_value(); + case ACCESS_FORBIDDEN: return factory->false_value(); case ACCESS_ALLOWED: break; - case ACCESS_ABSENT: return heap->undefined_value(); + case ACCESS_ABSENT: return factory->undefined_value(); } PropertyAttributes attrs = obj->GetLocalPropertyAttribute(*name); if (attrs == ABSENT) { - RETURN_IF_SCHEDULED_EXCEPTION(isolate); - return heap->undefined_value(); + RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); + return factory->undefined_value(); } ASSERT(!isolate->has_scheduled_exception()); AccessorPair* raw_accessors = obj->GetLocalPropertyAccessorPair(*name); Handle<AccessorPair> accessors(raw_accessors, isolate); - Handle<FixedArray> elms = isolate->factory()->NewFixedArray(DESCRIPTOR_SIZE); elms->set(ENUMERABLE_INDEX, heap->ToBoolean((attrs & DONT_ENUM) == 0)); elms->set(CONFIGURABLE_INDEX, heap->ToBoolean((attrs & DONT_DELETE) == 0)); @@ -1802,28 +1825,30 @@ static MaybeObject* GetOwnProperty(Isolate* isolate, elms->set(WRITABLE_INDEX, heap->ToBoolean((attrs & READ_ONLY) == 0)); // GetProperty does access check. Handle<Object> value = GetProperty(isolate, obj, name); - RETURN_IF_EMPTY_HANDLE(isolate, value); + RETURN_IF_EMPTY_HANDLE_VALUE(isolate, value, Handle<Object>::null()); elms->set(VALUE_INDEX, *value); } else { // Access checks are performed for both accessors separately. // When they fail, the respective field is not set in the descriptor. - Object* getter = accessors->GetComponent(ACCESSOR_GETTER); - Object* setter = accessors->GetComponent(ACCESSOR_SETTER); - if (!getter->IsMap() && CheckPropertyAccess(*obj, *name, v8::ACCESS_GET)) { + Handle<Object> getter(accessors->GetComponent(ACCESSOR_GETTER), isolate); + Handle<Object> setter(accessors->GetComponent(ACCESSOR_SETTER), isolate); + + if (!getter->IsMap() && CheckPropertyAccess(obj, name, v8::ACCESS_GET)) { ASSERT(!isolate->has_scheduled_exception()); - elms->set(GETTER_INDEX, getter); + elms->set(GETTER_INDEX, *getter); } else { - RETURN_IF_SCHEDULED_EXCEPTION(isolate); + RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); } - if (!setter->IsMap() && CheckPropertyAccess(*obj, *name, v8::ACCESS_SET)) { + + if (!setter->IsMap() && CheckPropertyAccess(obj, name, v8::ACCESS_SET)) { ASSERT(!isolate->has_scheduled_exception()); - elms->set(SETTER_INDEX, setter); + elms->set(SETTER_INDEX, *setter); } else { - RETURN_IF_SCHEDULED_EXCEPTION(isolate); + RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); } } - return *isolate->factory()->NewJSArrayWithElements(elms); + return isolate->factory()->NewJSArrayWithElements(elms); } @@ -1839,15 +1864,19 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOwnProperty) { ASSERT(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0); CONVERT_ARG_HANDLE_CHECKED(Name, name, 1); - return GetOwnProperty(isolate, obj, name); + Handle<Object> result = GetOwnProperty(isolate, obj, name); + RETURN_IF_EMPTY_HANDLE(isolate, result); + return *result; } RUNTIME_FUNCTION(MaybeObject*, Runtime_PreventExtensions) { - SealHandleScope shs(isolate); + HandleScope scope(isolate); ASSERT(args.length() == 1); - CONVERT_ARG_CHECKED(JSObject, obj, 0); - return obj->PreventExtensions(); + CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0); + Handle<Object> result = JSObject::PreventExtensions(obj); + RETURN_IF_EMPTY_HANDLE(isolate, result); + return *result; } @@ -1871,8 +1900,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpCompile) { CONVERT_ARG_HANDLE_CHECKED(JSRegExp, re, 0); CONVERT_ARG_HANDLE_CHECKED(String, pattern, 1); CONVERT_ARG_HANDLE_CHECKED(String, flags, 2); - Handle<Object> result = - RegExpImpl::Compile(re, pattern, flags); + Handle<Object> result = RegExpImpl::Compile(re, pattern, flags); RETURN_IF_EMPTY_HANDLE(isolate, result); return *result; } @@ -2164,7 +2192,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) { // Declare the property by setting it to the initial value if provided, // or undefined, and use the correct mode (e.g. READ_ONLY attribute for // constant declarations). - ASSERT(!object->HasLocalProperty(*name)); + ASSERT(!JSReceiver::HasLocalProperty(object, name)); Handle<Object> value(isolate->heap()->undefined_value(), isolate); if (*initial_value != NULL) value = initial_value; // Declaring a const context slot is a conflicting declaration if @@ -2196,7 +2224,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) { RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) { - SealHandleScope shs(isolate); + HandleScope scope(isolate); // args[0] == name // args[1] == language_mode // args[2] == value (optional) @@ -2207,7 +2235,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) { bool assign = args.length() == 3; CONVERT_ARG_HANDLE_CHECKED(String, name, 0); - GlobalObject* global = isolate->context()->global_object(); RUNTIME_ASSERT(args[1]->IsSmi()); CONVERT_LANGUAGE_MODE_ARG(language_mode, 1); StrictModeFlag strict_mode_flag = (language_mode == CLASSIC_MODE) @@ -2224,28 +2251,33 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) { // to assign to the property. // Note that objects can have hidden prototypes, so we need to traverse // the whole chain of hidden prototypes to do a 'local' lookup. - Object* object = global; LookupResult lookup(isolate); - JSObject::cast(object)->LocalLookup(*name, &lookup, true); + isolate->context()->global_object()->LocalLookup(*name, &lookup, true); if (lookup.IsInterceptor()) { - HandleScope handle_scope(isolate); PropertyAttributes intercepted = lookup.holder()->GetPropertyAttribute(*name); if (intercepted != ABSENT && (intercepted & READ_ONLY) == 0) { // Found an interceptor that's not read only. if (assign) { - return lookup.holder()->SetProperty( - &lookup, *name, args[2], attributes, strict_mode_flag); + CONVERT_ARG_HANDLE_CHECKED(Object, value, 2); + Handle<Object> result = JSObject::SetPropertyForResult( + handle(lookup.holder()), &lookup, name, value, attributes, + strict_mode_flag); + RETURN_IF_EMPTY_HANDLE(isolate, result); + return *result; } else { return isolate->heap()->undefined_value(); } } } - // Reload global in case the loop above performed a GC. - global = isolate->context()->global_object(); if (assign) { - return global->SetProperty(*name, args[2], attributes, strict_mode_flag); + CONVERT_ARG_HANDLE_CHECKED(Object, value, 2); + Handle<GlobalObject> global(isolate->context()->global_object()); + Handle<Object> result = JSReceiver::SetProperty( + global, name, value, attributes, strict_mode_flag); + RETURN_IF_EMPTY_HANDLE(isolate, result); + return *result; } return isolate->heap()->undefined_value(); } @@ -2901,19 +2933,19 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetCode) { source_shared->set_dont_flush(true); // Set the code, scope info, formal parameter count, and the length - // of the target shared function info. Set the source code of the - // target function to undefined. SetCode is only used for built-in - // constructors like String, Array, and Object, and some web code - // doesn't like seeing source code for constructors. + // of the target shared function info. target_shared->ReplaceCode(source_shared->code()); target_shared->set_scope_info(source_shared->scope_info()); target_shared->set_length(source_shared->length()); target_shared->set_formal_parameter_count( source_shared->formal_parameter_count()); - target_shared->set_script(isolate->heap()->undefined_value()); - - // Since we don't store the source we should never optimize this. - target_shared->code()->set_optimizable(false); + target_shared->set_script(source_shared->script()); + target_shared->set_start_position_and_type( + source_shared->start_position_and_type()); + target_shared->set_end_position(source_shared->end_position()); + bool was_native = target_shared->native(); + target_shared->set_compiler_hints(source_shared->compiler_hints()); + target_shared->set_native(was_native); // Set the code of the target function. target->ReplaceCode(source_shared->code()); @@ -2945,10 +2977,24 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetCode) { RUNTIME_FUNCTION(MaybeObject*, Runtime_SetExpectedNumberOfProperties) { HandleScope scope(isolate); ASSERT(args.length() == 2); - CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0); + CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0); CONVERT_SMI_ARG_CHECKED(num, 1); RUNTIME_ASSERT(num >= 0); - SetExpectedNofProperties(function, num); + // If objects constructed from this function exist then changing + // 'estimated_nof_properties' is dangerous since the previous value might + // have been compiled into the fast construct stub. Moreover, the inobject + // slack tracking logic might have adjusted the previous value, so even + // passing the same value is risky. + if (!func->shared()->live_objects_may_exist()) { + func->shared()->set_expected_nof_properties(num); + if (func->has_initial_map()) { + Handle<Map> new_initial_map = + func->GetIsolate()->factory()->CopyMap( + Handle<Map>(func->initial_map())); + new_initial_map->set_unused_property_fields(num); + func->set_initial_map(*new_initial_map); + } + } return isolate->heap()->undefined_value(); } @@ -3090,10 +3136,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ThrowGeneratorStateError) { RUNTIME_FUNCTION(MaybeObject*, Runtime_ObjectFreeze) { - SealHandleScope shs(isolate); + HandleScope scope(isolate); ASSERT(args.length() == 1); - CONVERT_ARG_CHECKED(JSObject, object, 0); - return object->Freeze(isolate); + CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0); + Handle<Object> result = JSObject::Freeze(object); + RETURN_IF_EMPTY_HANDLE(isolate, result); + return *result; } @@ -4778,7 +4826,7 @@ MaybeObject* Runtime::HasObjectProperty(Isolate* isolate, // Check if the given key is an array index. uint32_t index; if (key->ToArrayIndex(&index)) { - return isolate->heap()->ToBoolean(object->HasElement(index)); + return isolate->heap()->ToBoolean(JSReceiver::HasElement(object, index)); } // Convert the key to a name - possibly by calling back into JavaScript. @@ -4793,7 +4841,7 @@ MaybeObject* Runtime::HasObjectProperty(Isolate* isolate, name = Handle<Name>::cast(converted); } - return isolate->heap()->ToBoolean(object->HasProperty(*name)); + return isolate->heap()->ToBoolean(JSReceiver::HasProperty(object, name)); } MaybeObject* Runtime::GetObjectPropertyOrFail( @@ -5028,11 +5076,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) { // TODO(mstarzinger): So far this only works if property attributes don't // change, this should be fixed once we cleanup the underlying code. if (callback->IsForeign() && result.GetAttributes() == attr) { - return js_object->SetPropertyWithCallback(callback, - *name, - *obj_value, - result.holder(), - kStrictMode); + Handle<Object> result_object = + JSObject::SetPropertyWithCallback(js_object, + handle(callback, isolate), + name, + obj_value, + handle(result.holder()), + kStrictMode); + RETURN_IF_EMPTY_HANDLE(isolate, result_object); + return *result_object; } } @@ -5128,11 +5180,14 @@ MaybeObject* Runtime::SetObjectProperty(Isolate* isolate, if (object->IsJSProxy()) { bool has_pending_exception = false; - Handle<Object> name = key->IsSymbol() + Handle<Object> name_object = key->IsSymbol() ? key : Execution::ToString(isolate, key, &has_pending_exception); if (has_pending_exception) return Failure::Exception(); - return JSProxy::cast(*object)->SetProperty( - Name::cast(*name), *value, attr, strict_mode); + Handle<Name> name = Handle<Name>::cast(name_object); + Handle<Object> result = JSReceiver::SetProperty( + Handle<JSProxy>::cast(object), name, value, attr, strict_mode); + RETURN_IF_EMPTY_HANDLE(isolate, result); + return *result; } // If the object isn't a JavaScript object, we ignore the store. @@ -5172,7 +5227,6 @@ MaybeObject* Runtime::SetObjectProperty(Isolate* isolate, } if (key->IsName()) { - MaybeObject* result; Handle<Name> name = Handle<Name>::cast(key); if (name->AsArrayIndex(&index)) { if (js_object->HasExternalArrayElements()) { @@ -5184,13 +5238,15 @@ MaybeObject* Runtime::SetObjectProperty(Isolate* isolate, value = number; } } - result = js_object->SetElement( + MaybeObject* result = js_object->SetElement( index, *value, attr, strict_mode, true, set_mode); + if (result->IsFailure()) return result; } else { if (name->IsString()) Handle<String>::cast(name)->TryFlatten(); - result = js_object->SetProperty(*name, *value, attr, strict_mode); + Handle<Object> result = + JSReceiver::SetProperty(js_object, name, value, attr, strict_mode); + RETURN_IF_EMPTY_HANDLE(isolate, result); } - if (result->IsFailure()) return result; return *value; } @@ -5205,7 +5261,10 @@ MaybeObject* Runtime::SetObjectProperty(Isolate* isolate, return js_object->SetElement( index, *value, attr, strict_mode, true, set_mode); } else { - return js_object->SetProperty(*name, *value, attr, strict_mode); + Handle<Object> result = + JSReceiver::SetProperty(js_object, name, value, attr, strict_mode); + RETURN_IF_EMPTY_HANDLE(isolate, result); + return *result; } } @@ -5504,7 +5563,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteProperty) { static MaybeObject* HasLocalPropertyImplementation(Isolate* isolate, Handle<JSObject> object, Handle<Name> key) { - if (object->HasLocalProperty(*key)) return isolate->heap()->true_value(); + if (JSReceiver::HasLocalProperty(object, key)) { + return isolate->heap()->true_value(); + } // Handle hidden prototypes. If there's a hidden prototype above this thing // then we have to check it for properties, because they are supposed to // look like they are on this object. @@ -5521,40 +5582,39 @@ static MaybeObject* HasLocalPropertyImplementation(Isolate* isolate, RUNTIME_FUNCTION(MaybeObject*, Runtime_HasLocalProperty) { - SealHandleScope shs(isolate); + HandleScope scope(isolate); ASSERT(args.length() == 2); - CONVERT_ARG_CHECKED(Name, key, 1); + CONVERT_ARG_HANDLE_CHECKED(Name, key, 1); + Handle<Object> object = args.at<Object>(0); uint32_t index; const bool key_is_array_index = key->AsArrayIndex(&index); - Object* obj = args[0]; // Only JS objects can have properties. - if (obj->IsJSObject()) { - JSObject* object = JSObject::cast(obj); + if (object->IsJSObject()) { + Handle<JSObject> js_obj = Handle<JSObject>::cast(object); // Fast case: either the key is a real named property or it is not // an array index and there are no interceptors or hidden // prototypes. - if (object->HasRealNamedProperty(isolate, key)) { + if (JSObject::HasRealNamedProperty(js_obj, key)) { ASSERT(!isolate->has_scheduled_exception()); return isolate->heap()->true_value(); } else { RETURN_IF_SCHEDULED_EXCEPTION(isolate); } - Map* map = object->map(); + Map* map = js_obj->map(); if (!key_is_array_index && !map->has_named_interceptor() && !HeapObject::cast(map->prototype())->map()->is_hidden_prototype()) { return isolate->heap()->false_value(); } // Slow case. - HandleScope scope(isolate); return HasLocalPropertyImplementation(isolate, - Handle<JSObject>(object), + Handle<JSObject>(js_obj), Handle<Name>(key)); - } else if (obj->IsString() && key_is_array_index) { + } else if (object->IsString() && key_is_array_index) { // Well, there is one exception: Handle [] on strings. - String* string = String::cast(obj); + Handle<String> string = Handle<String>::cast(object); if (index < static_cast<uint32_t>(string->length())) { return isolate->heap()->true_value(); } @@ -5564,12 +5624,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_HasLocalProperty) { RUNTIME_FUNCTION(MaybeObject*, Runtime_HasProperty) { - SealHandleScope shs(isolate); + HandleScope scope(isolate); ASSERT(args.length() == 2); - CONVERT_ARG_CHECKED(JSReceiver, receiver, 0); - CONVERT_ARG_CHECKED(Name, key, 1); + CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0); + CONVERT_ARG_HANDLE_CHECKED(Name, key, 1); - bool result = receiver->HasProperty(key); + bool result = JSReceiver::HasProperty(receiver, key); RETURN_IF_SCHEDULED_EXCEPTION(isolate); if (isolate->has_pending_exception()) return Failure::Exception(); return isolate->heap()->ToBoolean(result); @@ -5577,12 +5637,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_HasProperty) { RUNTIME_FUNCTION(MaybeObject*, Runtime_HasElement) { - SealHandleScope shs(isolate); + HandleScope scope(isolate); ASSERT(args.length() == 2); - CONVERT_ARG_CHECKED(JSReceiver, receiver, 0); + CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0); CONVERT_SMI_ARG_CHECKED(index, 1); - bool result = receiver->HasElement(index); + bool result = JSReceiver::HasElement(receiver, index); RETURN_IF_SCHEDULED_EXCEPTION(isolate); if (isolate->has_pending_exception()) return Failure::Exception(); return isolate->heap()->ToBoolean(result); @@ -5923,12 +5983,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArgumentsProperty) { RUNTIME_FUNCTION(MaybeObject*, Runtime_ToFastProperties) { - SealHandleScope shs(isolate); + HandleScope scope(isolate); ASSERT(args.length() == 1); - Object* object = args[0]; - return (object->IsJSObject() && !object->IsGlobalObject()) - ? JSObject::cast(object)->TransformToFastProperties(0) - : object; + CONVERT_ARG_HANDLE_CHECKED(Object, object, 0); + if (object->IsJSObject() && !object->IsGlobalObject()) { + JSObject::TransformToFastProperties(Handle<JSObject>::cast(object), 0); + } + return *object; } @@ -7945,21 +8006,18 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewStrictArgumentsFast) { // Allocate the elements if needed. if (length > 0) { // Allocate the fixed array. - Object* obj; - { MaybeObject* maybe_obj = isolate->heap()->AllocateRawFixedArray(length); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; + FixedArray* array; + { MaybeObject* maybe_obj = + isolate->heap()->AllocateUninitializedFixedArray(length); + if (!maybe_obj->To(&array)) return maybe_obj; } DisallowHeapAllocation no_gc; - FixedArray* array = reinterpret_cast<FixedArray*>(obj); - array->set_map_no_write_barrier(isolate->heap()->fixed_array_map()); - array->set_length(length); - WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc); for (int i = 0; i < length; i++) { array->set(i, *--parameters, mode); } - JSObject::cast(result)->set_elements(FixedArray::cast(obj)); + JSObject::cast(result)->set_elements(array); } return result; } @@ -8288,7 +8346,7 @@ bool AllowOptimization(Isolate* isolate, Handle<JSFunction> function) { // If the function is not optimizable or debugger is active continue using the // code from the full compiler. - if (!FLAG_crankshaft || + if (!isolate->use_crankshaft() || function->shared()->optimization_disabled() || isolate->DebuggerHasBreakPoints()) { if (FLAG_trace_opt) { @@ -8436,14 +8494,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) { } -RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyOSR) { - SealHandleScope shs(isolate); - Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate); - delete deoptimizer; - return isolate->heap()->undefined_value(); -} - - RUNTIME_FUNCTION(MaybeObject*, Runtime_DeoptimizeFunction) { HandleScope scope(isolate); ASSERT(args.length() == 1); @@ -8501,8 +8551,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_OptimizeFunctionOnNextCall) { if (type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("osr"))) { // Start patching from the currently patched loop nesting level. int current_level = unoptimized->allow_osr_at_loop_nesting_level(); - ASSERT(Deoptimizer::VerifyInterruptCode( - isolate, unoptimized, current_level)); + ASSERT(BackEdgeTable::Verify(isolate, unoptimized, current_level)); for (int i = current_level + 1; i <= Code::kMaxLoopNestingMarker; i++) { unoptimized->set_allow_osr_at_loop_nesting_level(i); isolate->runtime_profiler()->AttemptOnStackReplacement(*function); @@ -8560,6 +8609,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) { } +RUNTIME_FUNCTION(MaybeObject*, Runtime_UnblockConcurrentRecompilation) { + RUNTIME_ASSERT(FLAG_block_concurrent_recompilation); + isolate->optimizing_compiler_thread()->Unblock(); + return isolate->heap()->undefined_value(); +} + + RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationCount) { HandleScope scope(isolate); ASSERT(args.length() == 1); @@ -8572,7 +8628,7 @@ static bool IsSuitableForOnStackReplacement(Isolate* isolate, Handle<JSFunction> function, Handle<Code> unoptimized) { // Keep track of whether we've succeeded in optimizing. - if (!unoptimized->optimizable()) return false; + if (!isolate->use_crankshaft() || !unoptimized->optimizable()) return false; // If we are trying to do OSR when there are already optimized // activations of the function, it means (a) the function is directly or // indirectly recursive and (b) an optimized invocation has been @@ -8611,7 +8667,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) { Handle<Code> result = Handle<Code>::null(); BailoutId ast_id = BailoutId::None(); - if (FLAG_concurrent_recompilation && FLAG_concurrent_osr) { + if (FLAG_concurrent_osr) { if (isolate->optimizing_compiler_thread()-> IsQueuedForOSR(function, pc_offset)) { // Still waiting for the optimizing compiler thread to finish. Carry on. @@ -8623,25 +8679,25 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) { return NULL; } - OptimizingCompiler* compiler = isolate->optimizing_compiler_thread()-> + RecompileJob* job = isolate->optimizing_compiler_thread()-> FindReadyOSRCandidate(function, pc_offset); - if (compiler == NULL) { + if (job == NULL) { if (IsSuitableForOnStackReplacement(isolate, function, unoptimized) && Compiler::RecompileConcurrent(function, pc_offset)) { if (function->IsMarkedForLazyRecompilation() || function->IsMarkedForConcurrentRecompilation()) { // Prevent regular recompilation if we queue this for OSR. // TODO(yangguo): remove this as soon as OSR becomes one-shot. - function->ReplaceCode(function->shared()->code()); + function->ReplaceCode(*unoptimized); } return NULL; } // Fall through to the end in case of failure. } else { // TODO(titzer): don't install the OSR code into the function. - ast_id = compiler->info()->osr_ast_id(); - result = Compiler::InstallOptimizedCode(compiler); + ast_id = job->info()->osr_ast_id(); + result = Compiler::InstallOptimizedCode(job); } } else if (IsSuitableForOnStackReplacement(isolate, function, unoptimized)) { ast_id = unoptimized->TranslatePcOffsetToAstId(pc_offset); @@ -8655,8 +8711,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) { result = JSFunction::CompileOsr(function, ast_id, CLEAR_EXCEPTION); } - // Revert the patched interrupt now, regardless of whether OSR succeeds. - Deoptimizer::RevertInterruptCode(isolate, *unoptimized); + // Revert the patched back edge table, regardless of whether OSR succeeds. + BackEdgeTable::Revert(isolate, *unoptimized); // Check whether we ended up with usable optimized code. if (!result.is_null() && result->kind() == Code::OPTIMIZED_FUNCTION) { @@ -9193,7 +9249,7 @@ static ObjectPair LoadContextSlotHelper(Arguments args, // property from it. if (!holder.is_null()) { Handle<JSReceiver> object = Handle<JSReceiver>::cast(holder); - ASSERT(object->IsJSProxy() || object->HasProperty(*name)); + ASSERT(object->IsJSProxy() || JSReceiver::HasProperty(object, name)); // GetProperty below can cause GC. Handle<Object> receiver_handle( object->IsGlobalObject() @@ -10174,7 +10230,7 @@ static bool IterateElements(Isolate* isolate, Handle<Object> element_value(elements->get(j), isolate); if (!element_value->IsTheHole()) { visitor->visit(j, element_value); - } else if (receiver->HasElement(j)) { + } else if (JSReceiver::HasElement(receiver, j)) { // Call GetElement on receiver, not its prototype, or getters won't // have the correct receiver. element_value = Object::GetElement(isolate, receiver, j); @@ -10199,7 +10255,7 @@ static bool IterateElements(Isolate* isolate, Handle<Object> element_value = isolate->factory()->NewNumber(double_value); visitor->visit(j, element_value); - } else if (receiver->HasElement(j)) { + } else if (JSReceiver::HasElement(receiver, j)) { // Call GetElement on receiver, not its prototype, or getters won't // have the correct receiver. Handle<Object> element_value = @@ -10492,11 +10548,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GlobalPrint) { // property. // Returns the number of non-undefined elements collected. RUNTIME_FUNCTION(MaybeObject*, Runtime_RemoveArrayHoles) { - SealHandleScope shs(isolate); + HandleScope scope(isolate); ASSERT(args.length() == 2); - CONVERT_ARG_CHECKED(JSObject, object, 0); + CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0); CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[1]); - return object->PrepareElementsForSort(limit); + return *JSObject::PrepareElementsForSort(object, limit); } @@ -10587,14 +10643,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArrayKeys) { RUNTIME_FUNCTION(MaybeObject*, Runtime_LookupAccessor) { - SealHandleScope shs(isolate); + HandleScope scope(isolate); ASSERT(args.length() == 3); - CONVERT_ARG_CHECKED(JSReceiver, receiver, 0); - CONVERT_ARG_CHECKED(Name, name, 1); + CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0); + CONVERT_ARG_HANDLE_CHECKED(Name, name, 1); CONVERT_SMI_ARG_CHECKED(flag, 2); AccessorComponent component = flag == 0 ? ACCESSOR_GETTER : ACCESSOR_SETTER; if (!receiver->IsJSObject()) return isolate->heap()->undefined_value(); - return JSObject::cast(receiver)->LookupAccessor(name, component); + Handle<Object> result = + JSObject::GetAccessor(Handle<JSObject>::cast(receiver), name, component); + RETURN_IF_EMPTY_HANDLE(isolate, result); + return *result; } @@ -10674,19 +10733,20 @@ static MaybeObject* DebugLookupResultValue(Heap* heap, case CALLBACKS: { Object* structure = result->GetCallbackObject(); if (structure->IsForeign() || structure->IsAccessorInfo()) { - MaybeObject* maybe_value = result->holder()->GetPropertyWithCallback( - receiver, structure, name); - if (!maybe_value->ToObject(&value)) { - if (maybe_value->IsRetryAfterGC()) return maybe_value; - ASSERT(maybe_value->IsException()); - maybe_value = heap->isolate()->pending_exception(); + Isolate* isolate = heap->isolate(); + HandleScope scope(isolate); + Handle<Object> value = JSObject::GetPropertyWithCallback( + handle(result->holder(), isolate), + handle(receiver, isolate), + handle(structure, isolate), + handle(name, isolate)); + if (value.is_null()) { + MaybeObject* exception = heap->isolate()->pending_exception(); heap->isolate()->clear_pending_exception(); - if (caught_exception != NULL) { - *caught_exception = true; - } - return maybe_value; + if (caught_exception != NULL) *caught_exception = true; + return exception; } - return value; + return *value; } else { return heap->undefined_value(); } @@ -10874,7 +10934,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugNamedInterceptorPropertyValue) { CONVERT_ARG_HANDLE_CHECKED(Name, name, 1); PropertyAttributes attributes; - return obj->GetPropertyWithInterceptor(*obj, *name, &attributes); + Handle<Object> result = + JSObject::GetPropertyWithInterceptor(obj, obj, name, &attributes); + RETURN_IF_EMPTY_HANDLE(isolate, result); + return *result; } @@ -11391,8 +11454,8 @@ static Handle<JSObject> MaterializeLocalContext(Isolate* isolate, // Third fill all context locals. Handle<Context> frame_context(Context::cast(frame->context())); Handle<Context> function_context(frame_context->declaration_context()); - if (!scope_info->CopyContextLocalsToScopeObject( - isolate, function_context, target)) { + if (!ScopeInfo::CopyContextLocalsToScopeObject( + scope_info, function_context, target)) { return Handle<JSObject>(); } @@ -11515,7 +11578,7 @@ static bool SetLocalVariableValue(Isolate* isolate, !function_context->IsNativeContext()) { Handle<JSObject> ext(JSObject::cast(function_context->extension())); - if (ext->HasProperty(*variable_name)) { + if (JSReceiver::HasProperty(ext, variable_name)) { // We don't expect this to do anything except replacing // property value. SetProperty(isolate, @@ -11549,8 +11612,8 @@ static Handle<JSObject> MaterializeClosure(Isolate* isolate, isolate->factory()->NewJSObject(isolate->object_function()); // Fill all context locals to the context extension. - if (!scope_info->CopyContextLocalsToScopeObject( - isolate, context, closure_scope)) { + if (!ScopeInfo::CopyContextLocalsToScopeObject( + scope_info, context, closure_scope)) { return Handle<JSObject>(); } @@ -11603,7 +11666,7 @@ static bool SetClosureVariableValue(Isolate* isolate, // be variables introduced by eval. if (context->has_extension()) { Handle<JSObject> ext(JSObject::cast(context->extension())); - if (ext->HasProperty(*variable_name)) { + if (JSReceiver::HasProperty(ext, variable_name)) { // We don't expect this to do anything except replacing property value. SetProperty(isolate, ext, @@ -11670,8 +11733,8 @@ static Handle<JSObject> MaterializeBlockScope( isolate->factory()->NewJSObject(isolate->object_function()); // Fill all context locals. - if (!scope_info->CopyContextLocalsToScopeObject( - isolate, context, block_scope)) { + if (!ScopeInfo::CopyContextLocalsToScopeObject( + scope_info, context, block_scope)) { return Handle<JSObject>(); } @@ -11693,8 +11756,8 @@ static Handle<JSObject> MaterializeModuleScope( isolate->factory()->NewJSObject(isolate->object_function()); // Fill all context locals. - if (!scope_info->CopyContextLocalsToScopeObject( - isolate, context, module_scope)) { + if (!ScopeInfo::CopyContextLocalsToScopeObject( + scope_info, context, module_scope)) { return Handle<JSObject>(); } @@ -12646,7 +12709,8 @@ static Handle<JSObject> MaterializeArgumentsObject( // Do not materialize the arguments object for eval or top-level code. // Skip if "arguments" is already taken. if (!function->shared()->is_function() || - target->HasLocalProperty(isolate->heap()->arguments_string())) { + JSReceiver::HasLocalProperty(target, + isolate->factory()->arguments_string())) { return target; } @@ -14533,22 +14597,23 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsObserved) { RUNTIME_FUNCTION(MaybeObject*, Runtime_SetIsObserved) { - SealHandleScope shs(isolate); + HandleScope scope(isolate); ASSERT(args.length() == 1); - CONVERT_ARG_CHECKED(JSReceiver, obj, 0); + CONVERT_ARG_HANDLE_CHECKED(JSReceiver, obj, 0); if (obj->IsJSGlobalProxy()) { Object* proto = obj->GetPrototype(); if (proto->IsNull()) return isolate->heap()->undefined_value(); ASSERT(proto->IsJSGlobalObject()); - obj = JSReceiver::cast(proto); + obj = handle(JSReceiver::cast(proto)); } if (obj->IsJSProxy()) return isolate->heap()->undefined_value(); ASSERT(!(obj->map()->is_observed() && obj->IsJSObject() && - JSObject::cast(obj)->HasFastElements())); + Handle<JSObject>::cast(obj)->HasFastElements())); ASSERT(obj->IsJSObject()); - return JSObject::cast(obj)->SetObserved(isolate); + JSObject::SetObserved(Handle<JSObject>::cast(obj)); + return isolate->heap()->undefined_value(); } @@ -14652,7 +14717,7 @@ static MaybeObject* ArrayConstructorCommon(Isolate* isolate, Handle<Cell> cell = Handle<Cell>::cast(type_info); Handle<AllocationSite> site = Handle<AllocationSite>( AllocationSite::cast(cell->value()), isolate); - ASSERT(!site->IsLiteralSite()); + ASSERT(!site->SitePointsToLiteral()); ElementsKind to_kind = site->GetElementsKind(); if (holey && !IsFastHoleyElementsKind(to_kind)) { to_kind = GetHoleyElementsKind(to_kind); @@ -14786,8 +14851,7 @@ const Runtime::Function* Runtime::FunctionForId(Runtime::FunctionId id) { } -void Runtime::PerformGC(Object* result) { - Isolate* isolate = Isolate::Current(); +void Runtime::PerformGC(Object* result, Isolate* isolate) { Failure* failure = Failure::cast(result); if (failure->IsRetryAfterGC()) { if (isolate->heap()->new_space()->AddFreshPage()) { diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h index 60c6677116..1b7e32e7a1 100644 --- a/deps/v8/src/runtime.h +++ b/deps/v8/src/runtime.h @@ -91,7 +91,6 @@ namespace internal { F(TryInstallRecompiledCode, 1, 1) \ F(NotifyDeoptimized, 1, 1) \ F(NotifyStubFailure, 0, 1) \ - F(NotifyOSR, 0, 1) \ F(DeoptimizeFunction, 1, 1) \ F(ClearFunctionTypeFeedback, 1, 1) \ F(RunningInSimulator, 0, 1) \ @@ -100,6 +99,7 @@ namespace internal { F(NeverOptimizeFunction, 1, 1) \ F(GetOptimizationStatus, -1, 1) \ F(GetOptimizationCount, 1, 1) \ + F(UnblockConcurrentRecompilation, 0, 1) \ F(CompileForOnStackReplacement, 2, 1) \ F(SetAllocationTimeout, 2, 1) \ F(AllocateInNewSpace, 1, 1) \ @@ -299,7 +299,6 @@ namespace internal { /* Literals */ \ F(MaterializeRegExpLiteral, 4, 1)\ F(CreateObjectLiteral, 4, 1) \ - F(CreateObjectLiteralShallow, 4, 1) \ F(CreateArrayLiteral, 3, 1) \ F(CreateArrayLiteralShallow, 3, 1) \ \ @@ -364,6 +363,7 @@ namespace internal { F(ArrayBufferInitialize, 2, 1)\ F(ArrayBufferGetByteLength, 1, 1)\ F(ArrayBufferSliceImpl, 3, 1) \ + F(ArrayBufferIsView, 1, 1) \ \ F(TypedArrayInitialize, 5, 1) \ F(TypedArrayInitializeFromArrayLike, 4, 1) \ @@ -838,7 +838,7 @@ class Runtime : public AllStatic { JSArrayBuffer* phantom_array_buffer); // Helper functions used stubs. - static void PerformGC(Object* result); + static void PerformGC(Object* result, Isolate* isolate); // Used in runtime.cc and hydrogen's VisitArrayLiteral. static Handle<Object> CreateArrayLiteralBoilerplate( diff --git a/deps/v8/src/runtime.js b/deps/v8/src/runtime.js index 5339570ef6..ce11c37079 100644 --- a/deps/v8/src/runtime.js +++ b/deps/v8/src/runtime.js @@ -526,8 +526,8 @@ function ToNumber(x) { : %StringToNumber(x); } if (IS_BOOLEAN(x)) return x ? 1 : 0; - if (IS_UNDEFINED(x)) return $NaN; - if (IS_SYMBOL(x)) return $NaN; + if (IS_UNDEFINED(x)) return NAN; + if (IS_SYMBOL(x)) return NAN; return (IS_NULL(x)) ? 0 : ToNumber(%DefaultNumber(x)); } @@ -537,8 +537,8 @@ function NonNumberToNumber(x) { : %StringToNumber(x); } if (IS_BOOLEAN(x)) return x ? 1 : 0; - if (IS_UNDEFINED(x)) return $NaN; - if (IS_SYMBOL(x)) return $NaN; + if (IS_UNDEFINED(x)) return NAN; + if (IS_SYMBOL(x)) return NAN; return (IS_NULL(x)) ? 0 : ToNumber(%DefaultNumber(x)); } diff --git a/deps/v8/src/sampler.cc b/deps/v8/src/sampler.cc index 0aaa1e9b77..684ef486c7 100644 --- a/deps/v8/src/sampler.cc +++ b/deps/v8/src/sampler.cc @@ -216,11 +216,7 @@ class Sampler::PlatformData : public PlatformDataCommon { class SimulatorHelper { public: inline bool Init(Sampler* sampler, Isolate* isolate) { - ThreadId thread_id = sampler->platform_data()->profiled_thread_id(); - Isolate::PerIsolateThreadData* per_thread_data = isolate-> - FindPerThreadDataForThread(thread_id); - if (!per_thread_data) return false; - simulator_ = per_thread_data->simulator(); + simulator_ = isolate->thread_local_top()->simulator_; // Check if there is active simulator. return simulator_ != NULL; } diff --git a/deps/v8/src/scanner.cc b/deps/v8/src/scanner.cc index 8b7cb569bd..26f840b23a 100644 --- a/deps/v8/src/scanner.cc +++ b/deps/v8/src/scanner.cc @@ -27,10 +27,14 @@ // Features shared by parsing and pre-parsing scanners. +#include <cmath> + #include "scanner.h" #include "../include/v8stdint.h" #include "char-predicates-inl.h" +#include "conversions-inl.h" +#include "list-inl.h" namespace v8 { namespace internal { @@ -1108,4 +1112,140 @@ bool Scanner::ScanRegExpFlags() { return true; } + +int DuplicateFinder::AddAsciiSymbol(Vector<const char> key, int value) { + return AddSymbol(Vector<const byte>::cast(key), true, value); +} + + +int DuplicateFinder::AddUtf16Symbol(Vector<const uint16_t> key, int value) { + return AddSymbol(Vector<const byte>::cast(key), false, value); +} + + +int DuplicateFinder::AddSymbol(Vector<const byte> key, + bool is_ascii, + int value) { + uint32_t hash = Hash(key, is_ascii); + byte* encoding = BackupKey(key, is_ascii); + HashMap::Entry* entry = map_.Lookup(encoding, hash, true); + int old_value = static_cast<int>(reinterpret_cast<intptr_t>(entry->value)); + entry->value = + reinterpret_cast<void*>(static_cast<intptr_t>(value | old_value)); + return old_value; +} + + +int DuplicateFinder::AddNumber(Vector<const char> key, int value) { + ASSERT(key.length() > 0); + // Quick check for already being in canonical form. + if (IsNumberCanonical(key)) { + return AddAsciiSymbol(key, value); + } + + int flags = ALLOW_HEX | ALLOW_OCTAL | ALLOW_IMPLICIT_OCTAL | ALLOW_BINARY; + double double_value = StringToDouble(unicode_constants_, key, flags, 0.0); + int length; + const char* string; + if (!std::isfinite(double_value)) { + string = "Infinity"; + length = 8; // strlen("Infinity"); + } else { + string = DoubleToCString(double_value, + Vector<char>(number_buffer_, kBufferSize)); + length = StrLength(string); + } + return AddSymbol(Vector<const byte>(reinterpret_cast<const byte*>(string), + length), true, value); +} + + +bool DuplicateFinder::IsNumberCanonical(Vector<const char> number) { + // Test for a safe approximation of number literals that are already + // in canonical form: max 15 digits, no leading zeroes, except an + // integer part that is a single zero, and no trailing zeros below + // the decimal point. + int pos = 0; + int length = number.length(); + if (number.length() > 15) return false; + if (number[pos] == '0') { + pos++; + } else { + while (pos < length && + static_cast<unsigned>(number[pos] - '0') <= ('9' - '0')) pos++; + } + if (length == pos) return true; + if (number[pos] != '.') return false; + pos++; + bool invalid_last_digit = true; + while (pos < length) { + byte digit = number[pos] - '0'; + if (digit > '9' - '0') return false; + invalid_last_digit = (digit == 0); + pos++; + } + return !invalid_last_digit; +} + + +uint32_t DuplicateFinder::Hash(Vector<const byte> key, bool is_ascii) { + // Primitive hash function, almost identical to the one used + // for strings (except that it's seeded by the length and ASCII-ness). + int length = key.length(); + uint32_t hash = (length << 1) | (is_ascii ? 1 : 0) ; + for (int i = 0; i < length; i++) { + uint32_t c = key[i]; + hash = (hash + c) * 1025; + hash ^= (hash >> 6); + } + return hash; +} + + +bool DuplicateFinder::Match(void* first, void* second) { + // Decode lengths. + // Length + ASCII-bit is encoded as base 128, most significant heptet first, + // with a 8th bit being non-zero while there are more heptets. + // The value encodes the number of bytes following, and whether the original + // was ASCII. + byte* s1 = reinterpret_cast<byte*>(first); + byte* s2 = reinterpret_cast<byte*>(second); + uint32_t length_ascii_field = 0; + byte c1; + do { + c1 = *s1; + if (c1 != *s2) return false; + length_ascii_field = (length_ascii_field << 7) | (c1 & 0x7f); + s1++; + s2++; + } while ((c1 & 0x80) != 0); + int length = static_cast<int>(length_ascii_field >> 1); + return memcmp(s1, s2, length) == 0; +} + + +byte* DuplicateFinder::BackupKey(Vector<const byte> bytes, + bool is_ascii) { + uint32_t ascii_length = (bytes.length() << 1) | (is_ascii ? 1 : 0); + backing_store_.StartSequence(); + // Emit ascii_length as base-128 encoded number, with the 7th bit set + // on the byte of every heptet except the last, least significant, one. + if (ascii_length >= (1 << 7)) { + if (ascii_length >= (1 << 14)) { + if (ascii_length >= (1 << 21)) { + if (ascii_length >= (1 << 28)) { + backing_store_.Add(static_cast<byte>((ascii_length >> 28) | 0x80)); + } + backing_store_.Add(static_cast<byte>((ascii_length >> 21) | 0x80u)); + } + backing_store_.Add(static_cast<byte>((ascii_length >> 14) | 0x80u)); + } + backing_store_.Add(static_cast<byte>((ascii_length >> 7) | 0x80u)); + } + backing_store_.Add(static_cast<byte>(ascii_length & 0x7f)); + + backing_store_.AddBlock(bytes); + return backing_store_.EndSequence().start(); +} + } } // namespace v8::internal diff --git a/deps/v8/src/scanner.h b/deps/v8/src/scanner.h index d7328085b7..3cefc833ac 100644 --- a/deps/v8/src/scanner.h +++ b/deps/v8/src/scanner.h @@ -34,6 +34,8 @@ #include "char-predicates.h" #include "checks.h" #include "globals.h" +#include "hashmap.h" +#include "list.h" #include "token.h" #include "unicode-inl.h" #include "utils.h" @@ -121,9 +123,10 @@ class Utf16CharacterStream { }; -class UnicodeCache { // --------------------------------------------------------------------- // Caching predicates used by scanners. + +class UnicodeCache { public: UnicodeCache() {} typedef unibrow::Utf8Decoder<512> Utf8Decoder; @@ -148,6 +151,56 @@ class UnicodeCache { }; +// --------------------------------------------------------------------- +// DuplicateFinder discovers duplicate symbols. + +class DuplicateFinder { + public: + explicit DuplicateFinder(UnicodeCache* constants) + : unicode_constants_(constants), + backing_store_(16), + map_(&Match) { } + + int AddAsciiSymbol(Vector<const char> key, int value); + int AddUtf16Symbol(Vector<const uint16_t> key, int value); + // Add a a number literal by converting it (if necessary) + // to the string that ToString(ToNumber(literal)) would generate. + // and then adding that string with AddAsciiSymbol. + // This string is the actual value used as key in an object literal, + // and the one that must be different from the other keys. + int AddNumber(Vector<const char> key, int value); + + private: + int AddSymbol(Vector<const byte> key, bool is_ascii, int value); + // Backs up the key and its length in the backing store. + // The backup is stored with a base 127 encoding of the + // length (plus a bit saying whether the string is ASCII), + // followed by the bytes of the key. + byte* BackupKey(Vector<const byte> key, bool is_ascii); + + // Compare two encoded keys (both pointing into the backing store) + // for having the same base-127 encoded lengths and ASCII-ness, + // and then having the same 'length' bytes following. + static bool Match(void* first, void* second); + // Creates a hash from a sequence of bytes. + static uint32_t Hash(Vector<const byte> key, bool is_ascii); + // Checks whether a string containing a JS number is its canonical + // form. + static bool IsNumberCanonical(Vector<const char> key); + + // Size of buffer. Sufficient for using it to call DoubleToCString in + // from conversions.h. + static const int kBufferSize = 100; + + UnicodeCache* unicode_constants_; + // Backing store used to store strings used as hashmap keys. + SequenceCollector<unsigned char> backing_store_; + HashMap map_; + // Buffer used for string->number->canonical string conversions. + char number_buffer_[kBufferSize]; +}; + + // ---------------------------------------------------------------------------- // LiteralBuffer - Collector of chars of literals. diff --git a/deps/v8/src/scopeinfo.cc b/deps/v8/src/scopeinfo.cc index ba138f2add..f1ae876ca3 100644 --- a/deps/v8/src/scopeinfo.cc +++ b/deps/v8/src/scopeinfo.cc @@ -363,14 +363,14 @@ int ScopeInfo::FunctionContextSlotIndex(String* name, VariableMode* mode) { } -bool ScopeInfo::CopyContextLocalsToScopeObject( - Isolate* isolate, - Handle<Context> context, - Handle<JSObject> scope_object) { - int local_count = ContextLocalCount(); +bool ScopeInfo::CopyContextLocalsToScopeObject(Handle<ScopeInfo> scope_info, + Handle<Context> context, + Handle<JSObject> scope_object) { + Isolate* isolate = scope_info->GetIsolate(); + int local_count = scope_info->ContextLocalCount(); if (local_count == 0) return true; // Fill all context locals to the context extension. - int start = ContextLocalNameEntriesIndex(); + int start = scope_info->ContextLocalNameEntriesIndex(); int end = start + local_count; for (int i = start; i < end; ++i) { int context_index = Context::MIN_CONTEXT_SLOTS + i - start; @@ -378,7 +378,7 @@ bool ScopeInfo::CopyContextLocalsToScopeObject( isolate, SetProperty(isolate, scope_object, - Handle<String>(String::cast(get(i))), + Handle<String>(String::cast(scope_info->get(i))), Handle<Object>(context->get(context_index), isolate), ::NONE, kNonStrictMode), diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/scopes.cc index ce1741a623..ee327fb79f 100644 --- a/deps/v8/src/scopes.cc +++ b/deps/v8/src/scopes.cc @@ -437,8 +437,8 @@ Variable* Scope::LookupFunctionVar(Handle<String> name, this, name, mode, true /* is valid LHS */, Variable::NORMAL, kCreatedInitialized); VariableProxy* proxy = factory->NewVariableProxy(var); - VariableDeclaration* declaration = - factory->NewVariableDeclaration(proxy, mode, this); + VariableDeclaration* declaration = factory->NewVariableDeclaration( + proxy, mode, this, RelocInfo::kNoPosition); DeclareFunctionVar(declaration); var->AllocateTo(Variable::CONTEXT, index); return var; diff --git a/deps/v8/src/serialize.cc b/deps/v8/src/serialize.cc index d05dd26122..7ed36665e2 100644 --- a/deps/v8/src/serialize.cc +++ b/deps/v8/src/serialize.cc @@ -532,55 +532,59 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) { UNCLASSIFIED, 52, "cpu_features"); - Add(ExternalReference::new_space_allocation_top_address(isolate).address(), - UNCLASSIFIED, - 53, - "Heap::NewSpaceAllocationTopAddress"); - Add(ExternalReference::new_space_allocation_limit_address(isolate).address(), - UNCLASSIFIED, - 54, - "Heap::NewSpaceAllocationLimitAddress"); Add(ExternalReference(Runtime::kAllocateInNewSpace, isolate).address(), UNCLASSIFIED, - 55, + 53, "Runtime::AllocateInNewSpace"); Add(ExternalReference::old_pointer_space_allocation_top_address( isolate).address(), UNCLASSIFIED, - 56, + 54, "Heap::OldPointerSpaceAllocationTopAddress"); Add(ExternalReference::old_pointer_space_allocation_limit_address( isolate).address(), UNCLASSIFIED, - 57, + 55, "Heap::OldPointerSpaceAllocationLimitAddress"); Add(ExternalReference(Runtime::kAllocateInOldPointerSpace, isolate).address(), UNCLASSIFIED, - 58, + 56, "Runtime::AllocateInOldPointerSpace"); Add(ExternalReference::old_data_space_allocation_top_address( isolate).address(), UNCLASSIFIED, - 59, + 57, "Heap::OldDataSpaceAllocationTopAddress"); Add(ExternalReference::old_data_space_allocation_limit_address( isolate).address(), UNCLASSIFIED, - 60, + 58, "Heap::OldDataSpaceAllocationLimitAddress"); Add(ExternalReference(Runtime::kAllocateInOldDataSpace, isolate).address(), UNCLASSIFIED, - 61, + 59, "Runtime::AllocateInOldDataSpace"); Add(ExternalReference::new_space_high_promotion_mode_active_address(isolate). address(), UNCLASSIFIED, - 62, + 60, "Heap::NewSpaceAllocationLimitAddress"); Add(ExternalReference::allocation_sites_list_address(isolate).address(), UNCLASSIFIED, - 63, + 61, "Heap::allocation_sites_list_address()"); + Add(ExternalReference::record_object_allocation_function(isolate).address(), + UNCLASSIFIED, + 62, + "HeapProfiler::RecordObjectAllocationFromMasm"); + Add(ExternalReference::address_of_uint32_bias().address(), + UNCLASSIFIED, + 63, + "uint32_bias"); + Add(ExternalReference::get_mark_code_as_executed_function(isolate).address(), + UNCLASSIFIED, + 64, + "Code::MarkCodeAsExecuted"); // Add a small set of deopt entry addresses to encoder without generating the // deopt table code, which isn't possible at deserialization time. @@ -835,6 +839,8 @@ void Deserializer::Deserialize(Isolate* isolate) { isolate_->heap()->undefined_value()); } + isolate_->heap()->InitializeWeakObjectToCodeTable(); + // Update data pointers to the external strings containing natives sources. for (int i = 0; i < Natives::GetBuiltinsCount(); i++) { Object* source = isolate_->heap()->natives_source_cache()->get(i); @@ -1284,7 +1290,6 @@ Serializer::Serializer(Isolate* isolate, SnapshotByteSink* sink) root_index_wave_front_(0) { // The serializer is meant to be used only to generate initial heap images // from a context in which there is only one isolate. - ASSERT(isolate_->IsDefaultIsolate()); for (int i = 0; i <= LAST_SPACE; i++) { fullness_[i] = 0; } @@ -1317,6 +1322,14 @@ void PartialSerializer::Serialize(Object** object) { } +bool Serializer::ShouldBeSkipped(Object** current) { + Object** roots = isolate()->heap()->roots_array_start(); + return current == &roots[Heap::kStoreBufferTopRootIndex] + || current == &roots[Heap::kStackLimitRootIndex] + || current == &roots[Heap::kRealStackLimitRootIndex]; +} + + void Serializer::VisitPointers(Object** start, Object** end) { Isolate* isolate = this->isolate();; @@ -1325,8 +1338,7 @@ void Serializer::VisitPointers(Object** start, Object** end) { root_index_wave_front_ = Max(root_index_wave_front_, static_cast<intptr_t>(current - start)); } - if (reinterpret_cast<Address>(current) == - isolate->heap()->store_buffer()->TopAddress()) { + if (ShouldBeSkipped(current)) { sink_->Put(kSkip, "Skip"); sink_->PutInt(kPointerSize, "SkipOneWord"); } else if ((*current)->IsSmi()) { @@ -1666,19 +1678,15 @@ void Serializer::ObjectSerializer::VisitEmbeddedPointer(RelocInfo* rinfo) { } -void Serializer::ObjectSerializer::VisitExternalReferences(Address* start, - Address* end) { - Address references_start = reinterpret_cast<Address>(start); +void Serializer::ObjectSerializer::VisitExternalReference(Address* p) { + Address references_start = reinterpret_cast<Address>(p); int skip = OutputRawData(references_start, kCanReturnSkipInsteadOfSkipping); - for (Address* current = start; current < end; current++) { - sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef"); - sink_->PutInt(skip, "SkipB4ExternalRef"); - skip = 0; - int reference_id = serializer_->EncodeExternalReference(*current); - sink_->PutInt(reference_id, "reference id"); - } - bytes_processed_so_far_ += static_cast<int>((end - start) * kPointerSize); + sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef"); + sink_->PutInt(skip, "SkipB4ExternalRef"); + int reference_id = serializer_->EncodeExternalReference(*p); + sink_->PutInt(reference_id, "reference id"); + bytes_processed_so_far_ += kPointerSize; } diff --git a/deps/v8/src/serialize.h b/deps/v8/src/serialize.h index 020a744fc0..47627ac2dd 100644 --- a/deps/v8/src/serialize.h +++ b/deps/v8/src/serialize.h @@ -339,10 +339,6 @@ class Deserializer: public SerializerDeserializer { private: virtual void VisitPointers(Object** start, Object** end); - virtual void VisitExternalReferences(Address* start, Address* end) { - UNREACHABLE(); - } - virtual void VisitRuntimeEntry(RelocInfo* rinfo) { UNREACHABLE(); } @@ -366,6 +362,10 @@ class Deserializer: public SerializerDeserializer { Address Allocate(int space_index, int size) { Address address = high_water_[space_index]; high_water_[space_index] = address + size; + HeapProfiler* profiler = isolate_->heap_profiler(); + if (profiler->is_tracking_allocations()) { + profiler->NewObjectEvent(address, size); + } return address; } @@ -517,7 +517,7 @@ class Serializer : public SerializerDeserializer { void Serialize(); void VisitPointers(Object** start, Object** end); void VisitEmbeddedPointer(RelocInfo* target); - void VisitExternalReferences(Address* start, Address* end); + void VisitExternalReference(Address* p); void VisitExternalReference(RelocInfo* rinfo); void VisitCodeTarget(RelocInfo* target); void VisitCodeEntry(Address entry_address); @@ -569,6 +569,10 @@ class Serializer : public SerializerDeserializer { int SpaceAreaSize(int space); + // Some roots should not be serialized, because their actual value depends on + // absolute addresses and they are reset after deserialization, anyway. + bool ShouldBeSkipped(Object** current); + Isolate* isolate_; // Keep track of the fullness of each space in order to generate // relative addresses for back references. diff --git a/deps/v8/src/snapshot-common.cc b/deps/v8/src/snapshot-common.cc index 96034e352b..4bdf63cedd 100644 --- a/deps/v8/src/snapshot-common.cc +++ b/deps/v8/src/snapshot-common.cc @@ -102,10 +102,19 @@ bool Snapshot::Initialize(const char* snapshot_file) { DeleteArray(str); return success; } else if (size_ > 0) { + ElapsedTimer timer; + if (FLAG_profile_deserialization) { + timer.Start(); + } SnapshotByteSource source(raw_data_, raw_size_); Deserializer deserializer(&source); ReserveSpaceForLinkedInSnapshot(&deserializer); - return V8::Initialize(&deserializer); + bool success = V8::Initialize(&deserializer); + if (FLAG_profile_deserialization) { + double ms = timer.Elapsed().InMillisecondsF(); + PrintF("[Snapshot loading and deserialization took %0.3f ms]\n", ms); + } + return success; } return false; } diff --git a/deps/v8/src/spaces-inl.h b/deps/v8/src/spaces-inl.h index be2ae2a57d..d5c114c5b0 100644 --- a/deps/v8/src/spaces-inl.h +++ b/deps/v8/src/spaces-inl.h @@ -28,6 +28,7 @@ #ifndef V8_SPACES_INL_H_ #define V8_SPACES_INL_H_ +#include "heap-profiler.h" #include "isolate.h" #include "spaces.h" #include "v8memory.h" @@ -263,22 +264,28 @@ void Page::set_prev_page(Page* page) { // allocation) so it can be used by all the allocation functions and for all // the paged spaces. HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) { - Address current_top = allocation_info_.top; + Address current_top = allocation_info_.top(); Address new_top = current_top + size_in_bytes; - if (new_top > allocation_info_.limit) return NULL; + if (new_top > allocation_info_.limit()) return NULL; - allocation_info_.top = new_top; + allocation_info_.set_top(new_top); return HeapObject::FromAddress(current_top); } // Raw allocation. -MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) { +MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes, + AllocationType event) { + HeapProfiler* profiler = heap()->isolate()->heap_profiler(); + HeapObject* object = AllocateLinearly(size_in_bytes); if (object != NULL) { if (identity() == CODE_SPACE) { SkipList::Update(object->address(), size_in_bytes); } + if (event == NEW_OBJECT && profiler->is_tracking_allocations()) { + profiler->NewObjectEvent(object->address(), size_in_bytes); + } return object; } @@ -291,6 +298,9 @@ MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) { if (identity() == CODE_SPACE) { SkipList::Update(object->address(), size_in_bytes); } + if (event == NEW_OBJECT && profiler->is_tracking_allocations()) { + profiler->NewObjectEvent(object->address(), size_in_bytes); + } return object; } @@ -299,6 +309,9 @@ MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) { if (identity() == CODE_SPACE) { SkipList::Update(object->address(), size_in_bytes); } + if (event == NEW_OBJECT && profiler->is_tracking_allocations()) { + profiler->NewObjectEvent(object->address(), size_in_bytes); + } return object; } @@ -311,31 +324,36 @@ MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) { MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) { - Address old_top = allocation_info_.top; + Address old_top = allocation_info_.top(); #ifdef DEBUG // If we are stressing compaction we waste some memory in new space // in order to get more frequent GCs. if (FLAG_stress_compaction && !heap()->linear_allocation()) { - if (allocation_info_.limit - old_top >= size_in_bytes * 4) { + if (allocation_info_.limit() - old_top >= size_in_bytes * 4) { int filler_size = size_in_bytes * 4; for (int i = 0; i < filler_size; i += kPointerSize) { *(reinterpret_cast<Object**>(old_top + i)) = heap()->one_pointer_filler_map(); } old_top += filler_size; - allocation_info_.top += filler_size; + allocation_info_.set_top(allocation_info_.top() + filler_size); } } #endif - if (allocation_info_.limit - old_top < size_in_bytes) { + if (allocation_info_.limit() - old_top < size_in_bytes) { return SlowAllocateRaw(size_in_bytes); } - Object* obj = HeapObject::FromAddress(old_top); - allocation_info_.top += size_in_bytes; + HeapObject* obj = HeapObject::FromAddress(old_top); + allocation_info_.set_top(allocation_info_.top() + size_in_bytes); ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); + HeapProfiler* profiler = heap()->isolate()->heap_profiler(); + if (profiler != NULL && profiler->is_tracking_allocations()) { + profiler->NewObjectEvent(obj->address(), size_in_bytes); + } + return obj; } diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc index 2faf41912e..fe5eeb5e43 100644 --- a/deps/v8/src/spaces.cc +++ b/deps/v8/src/spaces.cc @@ -29,6 +29,7 @@ #include "macro-assembler.h" #include "mark-compact.h" +#include "msan.h" #include "platform.h" namespace v8 { @@ -717,6 +718,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size, executable, owner); result->set_reserved_memory(&reservation); + MSAN_MEMORY_IS_INITIALIZED(base, chunk_size); return result; } @@ -958,8 +960,8 @@ PagedSpace::PagedSpace(Heap* heap, * AreaSize(); accounting_stats_.Clear(); - allocation_info_.top = NULL; - allocation_info_.limit = NULL; + allocation_info_.set_top(NULL); + allocation_info_.set_limit(NULL); anchor_.InitializeAsAnchor(this); } @@ -988,7 +990,7 @@ void PagedSpace::TearDown() { size_t PagedSpace::CommittedPhysicalMemory() { if (!VirtualMemory::HasLazyCommits()) return CommittedMemory(); - MemoryChunk::UpdateHighWaterMark(allocation_info_.top); + MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); size_t size = 0; PageIterator it(this); while (it.has_next()) { @@ -1056,7 +1058,7 @@ intptr_t PagedSpace::SizeOfFirstPage() { int size = 0; switch (identity()) { case OLD_POINTER_SPACE: - size = 64 * kPointerSize * KB; + size = 72 * kPointerSize * KB; break; case OLD_DATA_SPACE: size = 192 * KB; @@ -1077,7 +1079,12 @@ intptr_t PagedSpace::SizeOfFirstPage() { // upgraded to handle small pages. size = AreaSize(); } else { - size = 384 * KB; +#if V8_TARGET_ARCH_MIPS + // TODO(plind): Investigate larger code stubs size on MIPS. + size = 480 * KB; +#else + size = 416 * KB; +#endif } break; default: @@ -1135,8 +1142,9 @@ void PagedSpace::ReleasePage(Page* page, bool unlink) { DecreaseUnsweptFreeBytes(page); } - if (Page::FromAllocationTop(allocation_info_.top) == page) { - allocation_info_.top = allocation_info_.limit = NULL; + if (Page::FromAllocationTop(allocation_info_.top()) == page) { + allocation_info_.set_top(NULL); + allocation_info_.set_limit(NULL); } if (unlink) { @@ -1163,12 +1171,12 @@ void PagedSpace::Verify(ObjectVisitor* visitor) { if (was_swept_conservatively_) return; bool allocation_pointer_found_in_space = - (allocation_info_.top == allocation_info_.limit); + (allocation_info_.top() == allocation_info_.limit()); PageIterator page_iterator(this); while (page_iterator.has_next()) { Page* page = page_iterator.next(); CHECK(page->owner() == this); - if (page == Page::FromAllocationTop(allocation_info_.top)) { + if (page == Page::FromAllocationTop(allocation_info_.top())) { allocation_pointer_found_in_space = true; } CHECK(page->WasSweptPrecisely()); @@ -1279,8 +1287,8 @@ void NewSpace::TearDown() { } start_ = NULL; - allocation_info_.top = NULL; - allocation_info_.limit = NULL; + allocation_info_.set_top(NULL); + allocation_info_.set_limit(NULL); to_space_.TearDown(); from_space_.TearDown(); @@ -1337,22 +1345,22 @@ void NewSpace::Shrink() { } } } - allocation_info_.limit = to_space_.page_high(); + allocation_info_.set_limit(to_space_.page_high()); ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); } void NewSpace::UpdateAllocationInfo() { - MemoryChunk::UpdateHighWaterMark(allocation_info_.top); - allocation_info_.top = to_space_.page_low(); - allocation_info_.limit = to_space_.page_high(); + MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); + allocation_info_.set_top(to_space_.page_low()); + allocation_info_.set_limit(to_space_.page_high()); // Lower limit during incremental marking. if (heap()->incremental_marking()->IsMarking() && inline_allocation_limit_step() != 0) { Address new_limit = - allocation_info_.top + inline_allocation_limit_step(); - allocation_info_.limit = Min(new_limit, allocation_info_.limit); + allocation_info_.top() + inline_allocation_limit_step(); + allocation_info_.set_limit(Min(new_limit, allocation_info_.limit())); } ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); } @@ -1371,7 +1379,7 @@ void NewSpace::ResetAllocationInfo() { bool NewSpace::AddFreshPage() { - Address top = allocation_info_.top; + Address top = allocation_info_.top(); if (NewSpacePage::IsAtStart(top)) { // The current page is already empty. Don't try to make another. @@ -1403,15 +1411,16 @@ bool NewSpace::AddFreshPage() { MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) { - Address old_top = allocation_info_.top; + Address old_top = allocation_info_.top(); Address new_top = old_top + size_in_bytes; Address high = to_space_.page_high(); - if (allocation_info_.limit < high) { + if (allocation_info_.limit() < high) { // Incremental marking has lowered the limit to get a // chance to do a step. - allocation_info_.limit = Min( - allocation_info_.limit + inline_allocation_limit_step_, + Address new_limit = Min( + allocation_info_.limit() + inline_allocation_limit_step_, high); + allocation_info_.set_limit(new_limit); int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_); heap()->incremental_marking()->Step( bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD); @@ -1520,20 +1529,18 @@ void SemiSpace::TearDown() { bool SemiSpace::Commit() { ASSERT(!is_committed()); int pages = capacity_ / Page::kPageSize; - Address end = start_ + maximum_capacity_; - Address start = end - pages * Page::kPageSize; - if (!heap()->isolate()->memory_allocator()->CommitBlock(start, + if (!heap()->isolate()->memory_allocator()->CommitBlock(start_, capacity_, executable())) { return false; } - NewSpacePage* page = anchor(); - for (int i = 1; i <= pages; i++) { + NewSpacePage* current = anchor(); + for (int i = 0; i < pages; i++) { NewSpacePage* new_page = - NewSpacePage::Initialize(heap(), end - i * Page::kPageSize, this); - new_page->InsertAfter(page); - page = new_page; + NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this); + new_page->InsertAfter(current); + current = new_page; } committed_ = true; @@ -1577,20 +1584,18 @@ bool SemiSpace::GrowTo(int new_capacity) { int pages_before = capacity_ / Page::kPageSize; int pages_after = new_capacity / Page::kPageSize; - Address end = start_ + maximum_capacity_; - Address start = end - new_capacity; size_t delta = new_capacity - capacity_; ASSERT(IsAligned(delta, OS::AllocateAlignment())); if (!heap()->isolate()->memory_allocator()->CommitBlock( - start, delta, executable())) { + start_ + capacity_, delta, executable())) { return false; } capacity_ = new_capacity; NewSpacePage* last_page = anchor()->prev_page(); ASSERT(last_page != anchor()); - for (int i = pages_before + 1; i <= pages_after; i++) { - Address page_address = end - i * Page::kPageSize; + for (int i = pages_before; i < pages_after; i++) { + Address page_address = start_ + i * Page::kPageSize; NewSpacePage* new_page = NewSpacePage::Initialize(heap(), page_address, this); @@ -1610,25 +1615,20 @@ bool SemiSpace::ShrinkTo(int new_capacity) { ASSERT(new_capacity >= initial_capacity_); ASSERT(new_capacity < capacity_); if (is_committed()) { - // Semispaces grow backwards from the end of their allocated capacity, - // so we find the before and after start addresses relative to the - // end of the space. - Address space_end = start_ + maximum_capacity_; - Address old_start = space_end - capacity_; size_t delta = capacity_ - new_capacity; ASSERT(IsAligned(delta, OS::AllocateAlignment())); MemoryAllocator* allocator = heap()->isolate()->memory_allocator(); - if (!allocator->UncommitBlock(old_start, delta)) { + if (!allocator->UncommitBlock(start_ + new_capacity, delta)) { return false; } int pages_after = new_capacity / Page::kPageSize; NewSpacePage* new_last_page = - NewSpacePage::FromAddress(space_end - pages_after * Page::kPageSize); + NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize); new_last_page->set_next_page(anchor()); anchor()->set_prev_page(new_last_page); - ASSERT((current_page_ <= first_page()) && (current_page_ >= new_last_page)); + ASSERT((current_page_ >= first_page()) && (current_page_ <= new_last_page)); } capacity_ = new_capacity; @@ -1975,7 +1975,7 @@ void NewSpace::RecordPromotion(HeapObject* obj) { size_t NewSpace::CommittedPhysicalMemory() { if (!VirtualMemory::HasLazyCommits()) return CommittedMemory(); - MemoryChunk::UpdateHighWaterMark(allocation_info_.top); + MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); size_t size = to_space_.CommittedPhysicalMemory(); if (from_space_.is_committed()) { size += from_space_.CommittedPhysicalMemory(); @@ -2501,9 +2501,9 @@ bool NewSpace::ReserveSpace(int bytes) { Object* object = NULL; if (!maybe->ToObject(&object)) return false; HeapObject* allocation = HeapObject::cast(object); - Address top = allocation_info_.top; + Address top = allocation_info_.top(); if ((top - bytes) == allocation->address()) { - allocation_info_.top = allocation->address(); + allocation_info_.set_top(allocation->address()); return true; } // There may be a borderline case here where the allocation succeeded, but @@ -2549,9 +2549,9 @@ void PagedSpace::PrepareForMarkCompact() { bool PagedSpace::ReserveSpace(int size_in_bytes) { ASSERT(size_in_bytes <= AreaSize()); ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes)); - Address current_top = allocation_info_.top; + Address current_top = allocation_info_.top(); Address new_top = current_top + size_in_bytes; - if (new_top <= allocation_info_.limit) return true; + if (new_top <= allocation_info_.limit()) return true; HeapObject* new_area = free_list_.Allocate(size_in_bytes); if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes); @@ -2626,16 +2626,17 @@ bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) { void PagedSpace::EvictEvacuationCandidatesFromFreeLists() { - if (allocation_info_.top >= allocation_info_.limit) return; + if (allocation_info_.top() >= allocation_info_.limit()) return; - if (Page::FromAllocationTop(allocation_info_.top)->IsEvacuationCandidate()) { + if (Page::FromAllocationTop(allocation_info_.top())-> + IsEvacuationCandidate()) { // Create filler object to keep page iterable if it was iterable. int remaining = - static_cast<int>(allocation_info_.limit - allocation_info_.top); - heap()->CreateFillerObjectAt(allocation_info_.top, remaining); + static_cast<int>(allocation_info_.limit() - allocation_info_.top()); + heap()->CreateFillerObjectAt(allocation_info_.top(), remaining); - allocation_info_.top = NULL; - allocation_info_.limit = NULL; + allocation_info_.set_top(NULL); + allocation_info_.set_limit(NULL); } } @@ -2685,6 +2686,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { // Try to expand the space and allocate in the new next page. if (Expand()) { + ASSERT(CountTotalPages() > 1 || size_in_bytes <= free_list_.available()); return free_list_.Allocate(size_in_bytes); } diff --git a/deps/v8/src/spaces.h b/deps/v8/src/spaces.h index 43f44a5c70..2cd92c59d8 100644 --- a/deps/v8/src/spaces.h +++ b/deps/v8/src/spaces.h @@ -1317,18 +1317,53 @@ class PageIterator BASE_EMBEDDED { // space. class AllocationInfo { public: - AllocationInfo() : top(NULL), limit(NULL) { + AllocationInfo() : top_(NULL), limit_(NULL) { } - Address top; // Current allocation top. - Address limit; // Current allocation limit. + INLINE(void set_top(Address top)) { + SLOW_ASSERT(top == NULL || + (reinterpret_cast<intptr_t>(top) & HeapObjectTagMask()) == 0); + top_ = top; + } + + INLINE(Address top()) const { + SLOW_ASSERT(top_ == NULL || + (reinterpret_cast<intptr_t>(top_) & HeapObjectTagMask()) == 0); + return top_; + } + + Address* top_address() { + return &top_; + } + + INLINE(void set_limit(Address limit)) { + SLOW_ASSERT(limit == NULL || + (reinterpret_cast<intptr_t>(limit) & HeapObjectTagMask()) == 0); + limit_ = limit; + } + + INLINE(Address limit()) const { + SLOW_ASSERT(limit_ == NULL || + (reinterpret_cast<intptr_t>(limit_) & HeapObjectTagMask()) == 0); + return limit_; + } + + Address* limit_address() { + return &limit_; + } #ifdef DEBUG bool VerifyPagedAllocation() { - return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit)) - && (top <= limit); + return (Page::FromAllocationTop(top_) == Page::FromAllocationTop(limit_)) + && (top_ <= limit_); } #endif + + private: + // Current allocation top. + Address top_; + // Current allocation limit. + Address limit_; }; @@ -1707,16 +1742,29 @@ class PagedSpace : public Space { virtual intptr_t Waste() { return accounting_stats_.Waste(); } // Returns the allocation pointer in this space. - Address top() { return allocation_info_.top; } - Address limit() { return allocation_info_.limit; } + Address top() { return allocation_info_.top(); } + Address limit() { return allocation_info_.limit(); } + + // The allocation top address. + Address* allocation_top_address() { + return allocation_info_.top_address(); + } - // The allocation top and limit addresses. - Address* allocation_top_address() { return &allocation_info_.top; } - Address* allocation_limit_address() { return &allocation_info_.limit; } + // The allocation limit address. + Address* allocation_limit_address() { + return allocation_info_.limit_address(); + } + + enum AllocationType { + NEW_OBJECT, + MOVE_OBJECT + }; // Allocate the requested number of bytes in the space if possible, return a // failure object if not. - MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes); + MUST_USE_RESULT inline MaybeObject* AllocateRaw( + int size_in_bytes, + AllocationType event = NEW_OBJECT); virtual bool ReserveSpace(int bytes); @@ -1738,9 +1786,9 @@ class PagedSpace : public Space { void SetTop(Address top, Address limit) { ASSERT(top == limit || Page::FromAddress(top) == Page::FromAddress(limit - 1)); - MemoryChunk::UpdateHighWaterMark(allocation_info_.top); - allocation_info_.top = top; - allocation_info_.limit = limit; + MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); + allocation_info_.set_top(top); + allocation_info_.set_limit(limit); } void Allocate(int bytes) { @@ -2381,9 +2429,15 @@ class NewSpace : public Space { // Return the address of the allocation pointer in the active semispace. Address top() { - ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.top)); - return allocation_info_.top; + ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.top())); + return allocation_info_.top(); } + + void set_top(Address top) { + ASSERT(to_space_.current_page()->ContainsLimit(top)); + allocation_info_.set_top(top); + } + // Return the address of the first object in the active semispace. Address bottom() { return to_space_.space_start(); } @@ -2408,9 +2462,15 @@ class NewSpace : public Space { return reinterpret_cast<Address>(index << kPointerSizeLog2); } - // The allocation top and limit addresses. - Address* allocation_top_address() { return &allocation_info_.top; } - Address* allocation_limit_address() { return &allocation_info_.limit; } + // The allocation top and limit address. + Address* allocation_top_address() { + return allocation_info_.top_address(); + } + + // The allocation limit address. + Address* allocation_limit_address() { + return allocation_info_.limit_address(); + } MUST_USE_RESULT INLINE(MaybeObject* AllocateRaw(int size_in_bytes)); @@ -2420,13 +2480,14 @@ class NewSpace : public Space { void LowerInlineAllocationLimit(intptr_t step) { inline_allocation_limit_step_ = step; if (step == 0) { - allocation_info_.limit = to_space_.page_high(); + allocation_info_.set_limit(to_space_.page_high()); } else { - allocation_info_.limit = Min( - allocation_info_.top + inline_allocation_limit_step_, - allocation_info_.limit); + Address new_limit = Min( + allocation_info_.top() + inline_allocation_limit_step_, + allocation_info_.limit()); + allocation_info_.set_limit(new_limit); } - top_on_previous_step_ = allocation_info_.top; + top_on_previous_step_ = allocation_info_.top(); } // Get the extent of the inactive semispace (for use as a marking stack, @@ -2573,9 +2634,9 @@ class OldSpace : public PagedSpace { // For contiguous spaces, top should be in the space (or at the end) and limit // should be the end of the space. #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \ - SLOW_ASSERT((space).page_low() <= (info).top \ - && (info).top <= (space).page_high() \ - && (info).limit <= (space).page_high()) + SLOW_ASSERT((space).page_low() <= (info).top() \ + && (info).top() <= (space).page_high() \ + && (info).limit() <= (space).page_high()) // ----------------------------------------------------------------------------- diff --git a/deps/v8/src/store-buffer-inl.h b/deps/v8/src/store-buffer-inl.h index e1fcdee661..7e5432c841 100644 --- a/deps/v8/src/store-buffer-inl.h +++ b/deps/v8/src/store-buffer-inl.h @@ -41,6 +41,7 @@ Address StoreBuffer::TopAddress() { void StoreBuffer::Mark(Address addr) { ASSERT(!heap_->cell_space()->Contains(addr)); ASSERT(!heap_->code_space()->Contains(addr)); + ASSERT(!heap_->old_data_space()->Contains(addr)); Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top()); *top++ = addr; heap_->public_set_store_buffer_top(top); diff --git a/deps/v8/src/string.js b/deps/v8/src/string.js index cb82c16634..14b44ca41f 100644 --- a/deps/v8/src/string.js +++ b/deps/v8/src/string.js @@ -28,7 +28,6 @@ // This file relies on the fact that the following declaration has been made // in runtime.js: // var $String = global.String; -// var $NaN = 0/0; // ------------------------------------------------------------------- @@ -574,7 +573,7 @@ function StringSlice(start, end) { var s_len = s.length; var start_i = TO_INTEGER(start); var end_i = s_len; - if (end !== void 0) { + if (!IS_UNDEFINED(end)) { end_i = TO_INTEGER(end); } @@ -699,7 +698,7 @@ function StringSplitOnRegExp(subject, separator, limit, length) { %_CallFunction(result, %_SubString(subject, start, end), ArrayPushBuiltin); } else { - %_CallFunction(result, void 0, ArrayPushBuiltin); + %_CallFunction(result, UNDEFINED, ArrayPushBuiltin); } if (result.length === limit) break outer_loop; } @@ -756,7 +755,7 @@ function StringSubstr(start, n) { // Correct n: If not given, set to string length; if explicitly // set to undefined, zero, or negative, returns empty string. - if (n === void 0) { + if (IS_UNDEFINED(n)) { len = s.length; } else { len = TO_INTEGER(n); @@ -765,7 +764,7 @@ function StringSubstr(start, n) { // Correct start: If not given (or undefined), set to zero; otherwise // convert to integer and handle negative case. - if (start === void 0) { + if (IS_UNDEFINED(start)) { start = 0; } else { start = TO_INTEGER(start); diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc index 7b23d0c96a..67002a36b1 100644 --- a/deps/v8/src/stub-cache.cc +++ b/deps/v8/src/stub-cache.cc @@ -99,21 +99,11 @@ Code* StubCache::Set(Name* name, Map* map, Code* code) { } -Handle<JSObject> StubCache::StubHolder(Handle<JSObject> receiver, - Handle<JSObject> holder) { - InlineCacheHolderFlag cache_holder = - IC::GetCodeCacheForObject(*receiver, *holder); - return Handle<JSObject>(IC::GetCodeCacheHolder( - isolate_, *receiver, cache_holder)); -} - - Handle<Code> StubCache::FindIC(Handle<Name> name, Handle<Map> stub_holder_map, Code::Kind kind, - Code::StubType type, Code::ExtraICState extra_state) { - Code::Flags flags = Code::ComputeMonomorphicFlags(kind, extra_state, type); + Code::Flags flags = Code::ComputeMonomorphicFlags(kind, extra_state); Handle<Object> probe(stub_holder_map->FindInCodeCache(*name, flags), isolate_); if (probe->IsCode()) return Handle<Code>::cast(probe); @@ -124,41 +114,22 @@ Handle<Code> StubCache::FindIC(Handle<Name> name, Handle<Code> StubCache::FindIC(Handle<Name> name, Handle<JSObject> stub_holder, Code::Kind kind, - Code::StubType type, Code::ExtraICState extra_ic_state) { - return FindIC(name, Handle<Map>(stub_holder->map()), kind, - type, extra_ic_state); + return FindIC(name, Handle<Map>(stub_holder->map()), kind, extra_ic_state); } -Handle<Code> StubCache::FindLoadHandler(Handle<Name> name, - Handle<JSObject> receiver, - Handle<JSObject> stub_holder, - Code::Kind kind, - Code::StubType type) { - Code::ExtraICState extra_ic_state = Code::ComputeExtraICState( - receiver.is_identical_to(stub_holder) ? Code::OWN_STUB - : Code::PROTOTYPE_STUB); - ASSERT(type != Code::NORMAL); - Code::Flags flags = Code::ComputeMonomorphicFlags( - Code::STUB, extra_ic_state, type, kind); - Handle<Object> probe(stub_holder->map()->FindInCodeCache(*name, flags), - isolate_); - if (probe->IsCode()) return Handle<Code>::cast(probe); - return Handle<Code>::null(); -} - - -Handle<Code> StubCache::FindStoreHandler(Handle<Name> name, - Handle<JSObject> receiver, - Code::Kind kind, - Code::StubType type, - StrictModeFlag strict_mode) { - Code::ExtraICState extra_ic_state = Code::ComputeExtraICState( - STANDARD_STORE, strict_mode); - ASSERT(type != Code::NORMAL); +Handle<Code> StubCache::FindHandler(Handle<Name> name, + Handle<JSObject> receiver, + Code::Kind kind, + StrictModeFlag strict_mode) { + Code::ExtraICState extra_ic_state = Code::kNoExtraICState; + if (kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC) { + extra_ic_state = Code::ComputeExtraICState( + STANDARD_STORE, strict_mode); + } Code::Flags flags = Code::ComputeMonomorphicFlags( - Code::STUB, extra_ic_state, type, kind); + Code::HANDLER, extra_ic_state, Code::NORMAL, kind); Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags), isolate_); if (probe->IsCode()) return Handle<Code>::cast(probe); @@ -166,66 +137,29 @@ Handle<Code> StubCache::FindStoreHandler(Handle<Name> name, } -Handle<Code> StubCache::ComputeMonomorphicLoadIC(Handle<HeapObject> receiver, - Handle<Code> handler, - Handle<Name> name) { +Handle<Code> StubCache::ComputeMonomorphicIC(Handle<HeapObject> receiver, + Handle<Code> handler, + Handle<Name> name, + StrictModeFlag strict_mode) { + Code::Kind kind = handler->handler_kind(); Handle<Map> map(receiver->map()); - Handle<Code> ic = FindIC(name, map, Code::LOAD_IC, handler->type()); + Handle<Code> ic = FindIC(name, map, kind, strict_mode); if (!ic.is_null()) return ic; - LoadStubCompiler ic_compiler(isolate()); - ic = ic_compiler.CompileMonomorphicIC(map, handler, name); - - HeapObject::UpdateMapCodeCache(receiver, name, ic); - return ic; -} - - -Handle<Code> StubCache::ComputeMonomorphicKeyedLoadIC( - Handle<HeapObject> receiver, - Handle<Code> handler, - Handle<Name> name) { - Handle<Map> map(receiver->map()); - Handle<Code> ic = FindIC(name, map, Code::KEYED_LOAD_IC, handler->type()); - if (!ic.is_null()) return ic; - - KeyedLoadStubCompiler ic_compiler(isolate()); - ic = ic_compiler.CompileMonomorphicIC(map, handler, name); - - HeapObject::UpdateMapCodeCache(receiver, name, ic); - return ic; -} - - -Handle<Code> StubCache::ComputeMonomorphicStoreIC(Handle<HeapObject> receiver, - Handle<Code> handler, - Handle<Name> name, - StrictModeFlag strict_mode) { - Handle<Map> map(receiver->map()); - Handle<Code> ic = FindIC( - name, map, Code::STORE_IC, handler->type(), strict_mode); - if (!ic.is_null()) return ic; - - StoreStubCompiler ic_compiler(isolate(), strict_mode); - ic = ic_compiler.CompileMonomorphicIC(map, handler, name); - - HeapObject::UpdateMapCodeCache(receiver, name, ic); - return ic; -} - - -Handle<Code> StubCache::ComputeMonomorphicKeyedStoreIC( - Handle<HeapObject> receiver, - Handle<Code> handler, - Handle<Name> name, - StrictModeFlag strict_mode) { - Handle<Map> map(receiver->map()); - Handle<Code> ic = FindIC( - name, map, Code::KEYED_STORE_IC, handler->type(), strict_mode); - if (!ic.is_null()) return ic; - - KeyedStoreStubCompiler ic_compiler(isolate(), strict_mode, STANDARD_STORE); - ic = ic_compiler.CompileMonomorphicIC(map, handler, name); + if (kind == Code::LOAD_IC) { + LoadStubCompiler ic_compiler(isolate()); + ic = ic_compiler.CompileMonomorphicIC(map, handler, name); + } else if (kind == Code::KEYED_LOAD_IC) { + KeyedLoadStubCompiler ic_compiler(isolate()); + ic = ic_compiler.CompileMonomorphicIC(map, handler, name); + } else if (kind == Code::STORE_IC) { + StoreStubCompiler ic_compiler(isolate(), strict_mode); + ic = ic_compiler.CompileMonomorphicIC(map, handler, name); + } else { + ASSERT(kind == Code::KEYED_STORE_IC); + KeyedStoreStubCompiler ic_compiler(isolate(), strict_mode, STANDARD_STORE); + ic = ic_compiler.CompileMonomorphicIC(map, handler, name); + } HeapObject::UpdateMapCodeCache(receiver, name, ic); return ic; @@ -257,8 +191,7 @@ Handle<Code> StubCache::ComputeLoadNonexistent(Handle<Name> name, // Compile the stub that is either shared for all names or // name specific if there are global objects involved. - Handle<Code> handler = FindLoadHandler( - cache_name, receiver, receiver, Code::LOAD_IC, Code::NONEXISTENT); + Handle<Code> handler = FindHandler(cache_name, receiver, Code::LOAD_IC); if (!handler.is_null()) return handler; LoadStubCompiler compiler(isolate_); @@ -269,268 +202,22 @@ Handle<Code> StubCache::ComputeLoadNonexistent(Handle<Name> name, } -Handle<Code> StubCache::ComputeLoadField(Handle<Name> name, - Handle<JSObject> receiver, - Handle<JSObject> holder, - PropertyIndex field, - Representation representation) { - if (receiver.is_identical_to(holder)) { - LoadFieldStub stub(field.is_inobject(holder), - field.translate(holder), - representation); - return stub.GetCode(isolate()); - } - - Handle<JSObject> stub_holder = StubHolder(receiver, holder); - Handle<Code> stub = FindLoadHandler( - name, receiver, stub_holder, Code::LOAD_IC, Code::FIELD); - if (!stub.is_null()) return stub; - - LoadStubCompiler compiler(isolate_); - Handle<Code> handler = - compiler.CompileLoadField(receiver, holder, name, field, representation); - HeapObject::UpdateMapCodeCache(stub_holder, name, handler); - return handler; -} - - -Handle<Code> StubCache::ComputeLoadCallback( - Handle<Name> name, - Handle<JSObject> receiver, - Handle<JSObject> holder, - Handle<ExecutableAccessorInfo> callback) { - ASSERT(v8::ToCData<Address>(callback->getter()) != 0); - Handle<JSObject> stub_holder = StubHolder(receiver, holder); - Handle<Code> stub = FindLoadHandler( - name, receiver, stub_holder, Code::LOAD_IC, Code::CALLBACKS); - if (!stub.is_null()) return stub; - - LoadStubCompiler compiler(isolate_); - Handle<Code> handler = - compiler.CompileLoadCallback(receiver, holder, name, callback); - HeapObject::UpdateMapCodeCache(stub_holder, name, handler); - return handler; -} - - -Handle<Code> StubCache::ComputeLoadCallback( - Handle<Name> name, - Handle<JSObject> receiver, - Handle<JSObject> holder, - const CallOptimization& call_optimization) { - Handle<JSObject> stub_holder = StubHolder(receiver, holder); - Handle<Code> stub = FindLoadHandler( - name, receiver, stub_holder, Code::LOAD_IC, Code::CALLBACKS); - if (!stub.is_null()) return stub; - - LoadStubCompiler compiler(isolate_); - Handle<Code> handler = - compiler.CompileLoadCallback(receiver, holder, name, call_optimization); - HeapObject::UpdateMapCodeCache(stub_holder, name, handler); - return handler; -} - - -Handle<Code> StubCache::ComputeLoadViaGetter(Handle<Name> name, - Handle<JSObject> receiver, - Handle<JSObject> holder, - Handle<JSFunction> getter) { - Handle<JSObject> stub_holder = StubHolder(receiver, holder); - Handle<Code> stub = FindLoadHandler( - name, receiver, stub_holder, Code::LOAD_IC, Code::CALLBACKS); - if (!stub.is_null()) return stub; - - LoadStubCompiler compiler(isolate_); - Handle<Code> handler = - compiler.CompileLoadViaGetter(receiver, holder, name, getter); - HeapObject::UpdateMapCodeCache(stub_holder, name, handler); - return handler; -} - - -Handle<Code> StubCache::ComputeLoadConstant(Handle<Name> name, - Handle<JSObject> receiver, - Handle<JSObject> holder, - Handle<Object> value) { - Handle<JSObject> stub_holder = StubHolder(receiver, holder); - Handle<Code> handler = FindLoadHandler( - name, receiver, stub_holder, Code::LOAD_IC, Code::CONSTANT); - if (!handler.is_null()) return handler; - - LoadStubCompiler compiler(isolate_); - handler = compiler.CompileLoadConstant(receiver, holder, name, value); - HeapObject::UpdateMapCodeCache(stub_holder, name, handler); - - return handler; -} - - -Handle<Code> StubCache::ComputeLoadInterceptor(Handle<Name> name, - Handle<JSObject> receiver, - Handle<JSObject> holder) { - Handle<JSObject> stub_holder = StubHolder(receiver, holder); - Handle<Code> stub = FindLoadHandler( - name, receiver, stub_holder, Code::LOAD_IC, Code::INTERCEPTOR); - if (!stub.is_null()) return stub; - - LoadStubCompiler compiler(isolate_); - Handle<Code> handler = - compiler.CompileLoadInterceptor(receiver, holder, name); - HeapObject::UpdateMapCodeCache(stub_holder, name, handler); - return handler; -} - - -Handle<Code> StubCache::ComputeLoadNormal(Handle<Name> name, - Handle<JSObject> receiver) { - return isolate_->builtins()->LoadIC_Normal(); -} - - Handle<Code> StubCache::ComputeLoadGlobal(Handle<Name> name, Handle<JSObject> receiver, Handle<GlobalObject> holder, Handle<PropertyCell> cell, bool is_dont_delete) { - Handle<JSObject> stub_holder = StubHolder(receiver, holder); - Handle<Code> stub = FindIC(name, stub_holder, Code::LOAD_IC, Code::NORMAL); + Handle<Code> stub = FindIC(name, receiver, Code::LOAD_IC); if (!stub.is_null()) return stub; LoadStubCompiler compiler(isolate_); Handle<Code> ic = compiler.CompileLoadGlobal(receiver, holder, cell, name, is_dont_delete); - HeapObject::UpdateMapCodeCache(stub_holder, name, ic); + HeapObject::UpdateMapCodeCache(receiver, name, ic); return ic; } -Handle<Code> StubCache::ComputeKeyedLoadField(Handle<Name> name, - Handle<JSObject> receiver, - Handle<JSObject> holder, - PropertyIndex field, - Representation representation) { - if (receiver.is_identical_to(holder)) { - // TODO(titzer): this should use an HObjectAccess - KeyedLoadFieldStub stub(field.is_inobject(holder), - field.translate(holder), - representation); - return stub.GetCode(isolate()); - } - - Handle<JSObject> stub_holder = StubHolder(receiver, holder); - Handle<Code> stub = FindLoadHandler( - name, receiver, stub_holder, Code::KEYED_LOAD_IC, Code::FIELD); - if (!stub.is_null()) return stub; - - KeyedLoadStubCompiler compiler(isolate_); - Handle<Code> handler = - compiler.CompileLoadField(receiver, holder, name, field, representation); - HeapObject::UpdateMapCodeCache(stub_holder, name, handler); - return handler; -} - - -Handle<Code> StubCache::ComputeKeyedLoadConstant(Handle<Name> name, - Handle<JSObject> receiver, - Handle<JSObject> holder, - Handle<Object> value) { - Handle<JSObject> stub_holder = StubHolder(receiver, holder); - Handle<Code> handler = FindLoadHandler( - name, receiver, stub_holder, Code::KEYED_LOAD_IC, - Code::CONSTANT); - if (!handler.is_null()) return handler; - - KeyedLoadStubCompiler compiler(isolate_); - handler = compiler.CompileLoadConstant(receiver, holder, name, value); - HeapObject::UpdateMapCodeCache(stub_holder, name, handler); - return handler; -} - - -Handle<Code> StubCache::ComputeKeyedLoadInterceptor(Handle<Name> name, - Handle<JSObject> receiver, - Handle<JSObject> holder) { - Handle<JSObject> stub_holder = StubHolder(receiver, holder); - Handle<Code> stub = FindLoadHandler( - name, receiver, stub_holder, Code::KEYED_LOAD_IC, Code::INTERCEPTOR); - if (!stub.is_null()) return stub; - - KeyedLoadStubCompiler compiler(isolate_); - Handle<Code> handler = - compiler.CompileLoadInterceptor(receiver, holder, name); - HeapObject::UpdateMapCodeCache(stub_holder, name, handler); - return handler; -} - - -Handle<Code> StubCache::ComputeKeyedLoadCallback( - Handle<Name> name, - Handle<JSObject> receiver, - Handle<JSObject> holder, - Handle<ExecutableAccessorInfo> callback) { - Handle<JSObject> stub_holder = StubHolder(receiver, holder); - Handle<Code> stub = FindLoadHandler( - name, receiver, stub_holder, Code::KEYED_LOAD_IC, Code::CALLBACKS); - if (!stub.is_null()) return stub; - - KeyedLoadStubCompiler compiler(isolate_); - Handle<Code> handler = - compiler.CompileLoadCallback(receiver, holder, name, callback); - HeapObject::UpdateMapCodeCache(stub_holder, name, handler); - return handler; -} - - -Handle<Code> StubCache::ComputeKeyedLoadCallback( - Handle<Name> name, - Handle<JSObject> receiver, - Handle<JSObject> holder, - const CallOptimization& call_optimization) { - Handle<JSObject> stub_holder = StubHolder(receiver, holder); - Handle<Code> stub = FindLoadHandler( - name, receiver, stub_holder, Code::KEYED_LOAD_IC, Code::CALLBACKS); - if (!stub.is_null()) return stub; - - KeyedLoadStubCompiler compiler(isolate_); - Handle<Code> handler = - compiler.CompileLoadCallback(receiver, holder, name, call_optimization); - HeapObject::UpdateMapCodeCache(stub_holder, name, handler); - return handler; -} - - -Handle<Code> StubCache::ComputeStoreField(Handle<Name> name, - Handle<JSObject> receiver, - LookupResult* lookup, - StrictModeFlag strict_mode) { - Handle<Code> stub = FindStoreHandler( - name, receiver, Code::STORE_IC, Code::FIELD, strict_mode); - if (!stub.is_null()) return stub; - - StoreStubCompiler compiler(isolate_, strict_mode); - Handle<Code> handler = compiler.CompileStoreField(receiver, lookup, name); - HeapObject::UpdateMapCodeCache(receiver, name, handler); - return handler; -} - - -Handle<Code> StubCache::ComputeStoreTransition(Handle<Name> name, - Handle<JSObject> receiver, - LookupResult* lookup, - Handle<Map> transition, - StrictModeFlag strict_mode) { - Handle<Code> stub = FindStoreHandler( - name, receiver, Code::STORE_IC, Code::MAP_TRANSITION, strict_mode); - if (!stub.is_null()) return stub; - - StoreStubCompiler compiler(isolate_, strict_mode); - Handle<Code> handler = - compiler.CompileStoreTransition(receiver, lookup, transition, name); - HeapObject::UpdateMapCodeCache(receiver, name, handler); - return handler; -} - - Handle<Code> StubCache::ComputeKeyedLoadElement(Handle<Map> receiver_map) { Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC); Handle<Name> name = @@ -575,26 +262,18 @@ Handle<Code> StubCache::ComputeKeyedStoreElement( } -Handle<Code> StubCache::ComputeStoreNormal(StrictModeFlag strict_mode) { - return (strict_mode == kStrictMode) - ? isolate_->builtins()->Builtins::StoreIC_Normal_Strict() - : isolate_->builtins()->Builtins::StoreIC_Normal(); -} - - Handle<Code> StubCache::ComputeStoreGlobal(Handle<Name> name, Handle<GlobalObject> receiver, Handle<PropertyCell> cell, Handle<Object> value, StrictModeFlag strict_mode) { - Isolate* isolate = cell->GetIsolate(); - Handle<Type> union_type(PropertyCell::UpdateType(cell, value), isolate); + Handle<Type> union_type = PropertyCell::UpdatedType(cell, value); bool is_constant = union_type->IsConstant(); StoreGlobalStub stub(strict_mode, is_constant); Handle<Code> code = FindIC( name, Handle<JSObject>::cast(receiver), - Code::STORE_IC, Code::NORMAL, stub.GetExtraICState()); + Code::STORE_IC, stub.GetExtraICState()); if (!code.is_null()) return code; // Replace the placeholder cell and global object map with the actual global @@ -612,107 +291,6 @@ Handle<Code> StubCache::ComputeStoreGlobal(Handle<Name> name, } -Handle<Code> StubCache::ComputeStoreCallback( - Handle<Name> name, - Handle<JSObject> receiver, - Handle<JSObject> holder, - Handle<ExecutableAccessorInfo> callback, - StrictModeFlag strict_mode) { - ASSERT(v8::ToCData<Address>(callback->setter()) != 0); - Handle<Code> stub = FindStoreHandler( - name, receiver, Code::STORE_IC, Code::CALLBACKS, strict_mode); - if (!stub.is_null()) return stub; - - StoreStubCompiler compiler(isolate_, strict_mode); - Handle<Code> handler = compiler.CompileStoreCallback( - receiver, holder, name, callback); - HeapObject::UpdateMapCodeCache(receiver, name, handler); - return handler; -} - - -Handle<Code> StubCache::ComputeStoreCallback( - Handle<Name> name, - Handle<JSObject> receiver, - Handle<JSObject> holder, - const CallOptimization& call_optimization, - StrictModeFlag strict_mode) { - Handle<Code> stub = FindStoreHandler( - name, receiver, Code::STORE_IC, Code::CALLBACKS, strict_mode); - if (!stub.is_null()) return stub; - - StoreStubCompiler compiler(isolate_, strict_mode); - Handle<Code> handler = compiler.CompileStoreCallback( - receiver, holder, name, call_optimization); - HeapObject::UpdateMapCodeCache(receiver, name, handler); - return handler; -} - - -Handle<Code> StubCache::ComputeStoreViaSetter(Handle<Name> name, - Handle<JSObject> receiver, - Handle<JSObject> holder, - Handle<JSFunction> setter, - StrictModeFlag strict_mode) { - Handle<Code> stub = FindStoreHandler( - name, receiver, Code::STORE_IC, Code::CALLBACKS, strict_mode); - if (!stub.is_null()) return stub; - - StoreStubCompiler compiler(isolate_, strict_mode); - Handle<Code> handler = compiler.CompileStoreViaSetter( - receiver, holder, name, setter); - HeapObject::UpdateMapCodeCache(receiver, name, handler); - return handler; -} - - -Handle<Code> StubCache::ComputeStoreInterceptor(Handle<Name> name, - Handle<JSObject> receiver, - StrictModeFlag strict_mode) { - Handle<Code> stub = FindStoreHandler( - name, receiver, Code::STORE_IC, Code::INTERCEPTOR, strict_mode); - if (!stub.is_null()) return stub; - - StoreStubCompiler compiler(isolate_, strict_mode); - Handle<Code> handler = compiler.CompileStoreInterceptor(receiver, name); - HeapObject::UpdateMapCodeCache(receiver, name, handler); - return handler; -} - - -Handle<Code> StubCache::ComputeKeyedStoreField(Handle<Name> name, - Handle<JSObject> receiver, - LookupResult* lookup, - StrictModeFlag strict_mode) { - Handle<Code> stub = FindStoreHandler( - name, receiver, Code::KEYED_STORE_IC, Code::FIELD, strict_mode); - if (!stub.is_null()) return stub; - - KeyedStoreStubCompiler compiler(isolate(), strict_mode, STANDARD_STORE); - Handle<Code> handler = compiler.CompileStoreField(receiver, lookup, name); - HeapObject::UpdateMapCodeCache(receiver, name, handler); - return handler; -} - - -Handle<Code> StubCache::ComputeKeyedStoreTransition( - Handle<Name> name, - Handle<JSObject> receiver, - LookupResult* lookup, - Handle<Map> transition, - StrictModeFlag strict_mode) { - Handle<Code> stub = FindStoreHandler( - name, receiver, Code::KEYED_STORE_IC, Code::MAP_TRANSITION, strict_mode); - if (!stub.is_null()) return stub; - - KeyedStoreStubCompiler compiler(isolate(), strict_mode, STANDARD_STORE); - Handle<Code> handler = - compiler.CompileStoreTransition(receiver, lookup, transition, name); - HeapObject::UpdateMapCodeCache(receiver, name, handler); - return handler; -} - - #define CALL_LOGGER_TAG(kind, type) \ (kind == Code::CALL_IC ? Logger::type : Logger::KEYED_##type) @@ -858,17 +436,13 @@ Handle<Code> StubCache::ComputeCallGlobal(int argc, Handle<GlobalObject> holder, Handle<PropertyCell> cell, Handle<JSFunction> function) { - InlineCacheHolderFlag cache_holder = - IC::GetCodeCacheForObject(*receiver, *holder); - Handle<JSObject> stub_holder(IC::GetCodeCacheHolder( - isolate_, *receiver, cache_holder)); Code::Flags flags = Code::ComputeMonomorphicFlags( - kind, extra_state, Code::NORMAL, argc, cache_holder); - Handle<Object> probe(stub_holder->map()->FindInCodeCache(*name, flags), + kind, extra_state, Code::NORMAL, argc); + Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags), isolate_); if (probe->IsCode()) return Handle<Code>::cast(probe); - CallStubCompiler compiler(isolate(), argc, kind, extra_state, cache_holder); + CallStubCompiler compiler(isolate(), argc, kind, extra_state); Handle<Code> code = compiler.CompileCallGlobal(receiver, holder, cell, function, name); ASSERT(flags == code->flags()); @@ -876,7 +450,7 @@ Handle<Code> StubCache::ComputeCallGlobal(int argc, CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name)); GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code)); if (CallStubCompiler::CanBeCached(function)) { - HeapObject::UpdateMapCodeCache(stub_holder, name, code); + HeapObject::UpdateMapCodeCache(receiver, name, code); } return code; } @@ -1036,7 +610,7 @@ Handle<Code> StubCache::ComputeCompareNil(Handle<Map> receiver_map, Handle<String> name(isolate_->heap()->empty_string()); if (!receiver_map->is_shared()) { Handle<Code> cached_ic = FindIC(name, receiver_map, Code::COMPARE_NIL_IC, - Code::NORMAL, stub.GetExtraICState()); + stub.GetExtraICState()); if (!cached_ic.is_null()) return cached_ic; } @@ -1073,30 +647,25 @@ Handle<Code> StubCache::ComputeLoadElementPolymorphic( } -Handle<Code> StubCache::ComputePolymorphicLoadIC(MapHandleList* receiver_maps, - CodeHandleList* handlers, - int number_of_valid_maps, - Handle<Name> name) { - LoadStubCompiler ic_compiler(isolate_); - Code::StubType type = number_of_valid_maps == 1 ? handlers->at(0)->type() - : Code::NORMAL; - Handle<Code> ic = ic_compiler.CompilePolymorphicIC( - receiver_maps, handlers, name, type, PROPERTY); - return ic; -} - - -Handle<Code> StubCache::ComputePolymorphicStoreIC(MapHandleList* receiver_maps, - CodeHandleList* handlers, - int number_of_valid_maps, - Handle<Name> name, - StrictModeFlag strict_mode) { - StoreStubCompiler ic_compiler(isolate_, strict_mode); - Code::StubType type = number_of_valid_maps == 1 ? handlers->at(0)->type() +Handle<Code> StubCache::ComputePolymorphicIC(MapHandleList* receiver_maps, + CodeHandleList* handlers, + int number_of_valid_maps, + Handle<Name> name, + StrictModeFlag strict_mode) { + Handle<Code> handler = handlers->at(0); + Code::Kind kind = handler->handler_kind(); + Code::StubType type = number_of_valid_maps == 1 ? handler->type() : Code::NORMAL; - Handle<Code> ic = ic_compiler.CompilePolymorphicIC( - receiver_maps, handlers, name, type, PROPERTY); - return ic; + if (kind == Code::LOAD_IC) { + LoadStubCompiler ic_compiler(isolate_); + return ic_compiler.CompilePolymorphicIC( + receiver_maps, handlers, name, type, PROPERTY); + } else { + ASSERT(kind == Code::STORE_IC); + StoreStubCompiler ic_compiler(isolate_, strict_mode); + return ic_compiler.CompilePolymorphicIC( + receiver_maps, handlers, name, type, PROPERTY); + } } @@ -1300,12 +869,12 @@ static MaybeObject* ThrowReferenceError(Isolate* isolate, Name* name) { // If the load is non-contextual, just return the undefined result. // Note that both keyed and non-keyed loads may end up here, so we // can't use either LoadIC or KeyedLoadIC constructors. + HandleScope scope(isolate); IC ic(IC::NO_EXTRA_FRAME, isolate); ASSERT(ic.target()->is_load_stub() || ic.target()->is_keyed_load_stub()); if (!ic.SlowIsUndeclaredGlobal()) return isolate->heap()->undefined_value(); // Throw a reference error. - HandleScope scope(isolate); Handle<Name> name_handle(name); Handle<Object> error = isolate->factory()->NewReferenceError("not_defined", @@ -1314,8 +883,8 @@ static MaybeObject* ThrowReferenceError(Isolate* isolate, Name* name) { } -static MaybeObject* LoadWithInterceptor(Arguments* args, - PropertyAttributes* attrs) { +static Handle<Object> LoadWithInterceptor(Arguments* args, + PropertyAttributes* attrs) { ASSERT(args->length() == StubCache::kInterceptorArgsLength); Handle<Name> name_handle = args->at<Name>(StubCache::kInterceptorArgsNameIndex); @@ -1329,9 +898,10 @@ static MaybeObject* LoadWithInterceptor(Arguments* args, Isolate* isolate = receiver_handle->GetIsolate(); // TODO(rossberg): Support symbols in the API. - if (name_handle->IsSymbol()) - return holder_handle->GetPropertyPostInterceptor( - *receiver_handle, *name_handle, attrs); + if (name_handle->IsSymbol()) { + return JSObject::GetPropertyPostInterceptor( + holder_handle, receiver_handle, name_handle, attrs); + } Handle<String> name = Handle<String>::cast(name_handle); Address getter_address = v8::ToCData<Address>(interceptor_info->getter()); @@ -1344,24 +914,21 @@ static MaybeObject* LoadWithInterceptor(Arguments* args, *receiver_handle, *holder_handle); { - // Use the interceptor getter. HandleScope scope(isolate); + // Use the interceptor getter. v8::Handle<v8::Value> r = callback_args.Call(getter, v8::Utils::ToLocal(name)); - RETURN_IF_SCHEDULED_EXCEPTION(isolate); + RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object); if (!r.IsEmpty()) { *attrs = NONE; Handle<Object> result = v8::Utils::OpenHandle(*r); result->VerifyApiCallResultType(); - return *result; + return scope.CloseAndEscape(result); } } - MaybeObject* result = holder_handle->GetPropertyPostInterceptor( - *receiver_handle, - *name_handle, - attrs); - RETURN_IF_SCHEDULED_EXCEPTION(isolate); + Handle<Object> result = JSObject::GetPropertyPostInterceptor( + holder_handle, receiver_handle, name_handle, attrs); return result; } @@ -1372,40 +939,42 @@ static MaybeObject* LoadWithInterceptor(Arguments* args, */ RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorForLoad) { PropertyAttributes attr = NONE; - Object* result; - { MaybeObject* maybe_result = LoadWithInterceptor(&args, &attr); - if (!maybe_result->ToObject(&result)) return maybe_result; - } + HandleScope scope(isolate); + Handle<Object> result = LoadWithInterceptor(&args, &attr); + RETURN_IF_EMPTY_HANDLE(isolate, result); // If the property is present, return it. - if (attr != ABSENT) return result; + if (attr != ABSENT) return *result; return ThrowReferenceError(isolate, Name::cast(args[0])); } RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorForCall) { PropertyAttributes attr; - MaybeObject* result = LoadWithInterceptor(&args, &attr); - RETURN_IF_SCHEDULED_EXCEPTION(isolate); + HandleScope scope(isolate); + Handle<Object> result = LoadWithInterceptor(&args, &attr); + RETURN_IF_EMPTY_HANDLE(isolate, result); // This is call IC. In this case, we simply return the undefined result which // will lead to an exception when trying to invoke the result as a // function. - return result; + return *result; } RUNTIME_FUNCTION(MaybeObject*, StoreInterceptorProperty) { + HandleScope scope(isolate); ASSERT(args.length() == 4); - JSObject* recv = JSObject::cast(args[0]); - Name* name = Name::cast(args[1]); - Object* value = args[2]; + Handle<JSObject> recv(JSObject::cast(args[0])); + Handle<Name> name(Name::cast(args[1])); + Handle<Object> value(args[2], isolate); ASSERT(args.smi_at(3) == kStrictMode || args.smi_at(3) == kNonStrictMode); StrictModeFlag strict_mode = static_cast<StrictModeFlag>(args.smi_at(3)); ASSERT(recv->HasNamedInterceptor()); PropertyAttributes attr = NONE; - MaybeObject* result = recv->SetPropertyWithInterceptor( - name, value, attr, strict_mode); - return result; + Handle<Object> result = JSObject::SetPropertyWithInterceptor( + recv, name, value, attr, strict_mode); + RETURN_IF_EMPTY_HANDLE(isolate, result); + return *result; } @@ -1599,7 +1168,7 @@ void StubCompiler::LookupPostInterceptor(Handle<JSObject> holder, #define __ ACCESS_MASM(masm()) -Register BaseLoadStubCompiler::HandlerFrontendHeader( +Register LoadStubCompiler::HandlerFrontendHeader( Handle<JSObject> object, Register object_reg, Handle<JSObject> holder, @@ -1613,7 +1182,7 @@ Register BaseLoadStubCompiler::HandlerFrontendHeader( // HandlerFrontend for store uses the name register. It has to be restored // before a miss. -Register BaseStoreStubCompiler::HandlerFrontendHeader( +Register StoreStubCompiler::HandlerFrontendHeader( Handle<JSObject> object, Register object_reg, Handle<JSObject> holder, @@ -1639,7 +1208,7 @@ Register BaseLoadStoreStubCompiler::HandlerFrontend(Handle<JSObject> object, } -Handle<Code> BaseLoadStubCompiler::CompileLoadField( +Handle<Code> LoadStubCompiler::CompileLoadField( Handle<JSObject> object, Handle<JSObject> holder, Handle<Name> name, @@ -1659,7 +1228,7 @@ Handle<Code> BaseLoadStubCompiler::CompileLoadField( } -Handle<Code> BaseLoadStubCompiler::CompileLoadConstant( +Handle<Code> LoadStubCompiler::CompileLoadConstant( Handle<JSObject> object, Handle<JSObject> holder, Handle<Name> name, @@ -1674,7 +1243,7 @@ Handle<Code> BaseLoadStubCompiler::CompileLoadConstant( } -Handle<Code> BaseLoadStubCompiler::CompileLoadCallback( +Handle<Code> LoadStubCompiler::CompileLoadCallback( Handle<JSObject> object, Handle<JSObject> holder, Handle<Name> name, @@ -1691,7 +1260,7 @@ Handle<Code> BaseLoadStubCompiler::CompileLoadCallback( } -Handle<Code> BaseLoadStubCompiler::CompileLoadCallback( +Handle<Code> LoadStubCompiler::CompileLoadCallback( Handle<JSObject> object, Handle<JSObject> holder, Handle<Name> name, @@ -1710,7 +1279,7 @@ Handle<Code> BaseLoadStubCompiler::CompileLoadCallback( } -Handle<Code> BaseLoadStubCompiler::CompileLoadInterceptor( +Handle<Code> LoadStubCompiler::CompileLoadInterceptor( Handle<JSObject> object, Handle<JSObject> holder, Handle<Name> name) { @@ -1730,7 +1299,7 @@ Handle<Code> BaseLoadStubCompiler::CompileLoadInterceptor( } -void BaseLoadStubCompiler::GenerateLoadPostInterceptor( +void LoadStubCompiler::GenerateLoadPostInterceptor( Register interceptor_reg, Handle<JSObject> interceptor_holder, Handle<Name> name, @@ -1789,14 +1358,14 @@ Handle<Code> LoadStubCompiler::CompileLoadViaGetter( HandlerFrontend(object, receiver(), holder, name, &success); __ bind(&success); - GenerateLoadViaGetter(masm(), getter); + GenerateLoadViaGetter(masm(), receiver(), getter); // Return the generated code. return GetCode(kind(), Code::CALLBACKS, name); } -Handle<Code> BaseStoreStubCompiler::CompileStoreTransition( +Handle<Code> StoreStubCompiler::CompileStoreTransition( Handle<JSObject> object, LookupResult* lookup, Handle<Map> transition, @@ -1849,13 +1418,13 @@ Handle<Code> BaseStoreStubCompiler::CompileStoreTransition( TailCallBuiltin(masm(), SlowBuiltin(kind())); // Return the generated code. - return GetCode(kind(), Code::MAP_TRANSITION, name); + return GetCode(kind(), Code::TRANSITION, name); } -Handle<Code> BaseStoreStubCompiler::CompileStoreField(Handle<JSObject> object, - LookupResult* lookup, - Handle<Name> name) { +Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object, + LookupResult* lookup, + Handle<Name> name) { Label miss; HandlerFrontendHeader(object, receiver(), object, name, &miss); @@ -1948,23 +1517,33 @@ void StubCompiler::TailCallBuiltin(MacroAssembler* masm, Builtins::Name name) { } -void LoadStubCompiler::JitEvent(Handle<Name> name, Handle<Code> code) { - GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code)); -} - - -void KeyedLoadStubCompiler::JitEvent(Handle<Name> name, Handle<Code> code) { - GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code)); -} - - -void StoreStubCompiler::JitEvent(Handle<Name> name, Handle<Code> code) { - GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code)); +void BaseLoadStoreStubCompiler::JitEvent(Handle<Name> name, Handle<Code> code) { +#ifdef ENABLE_GDB_JIT_INTERFACE + GDBJITInterface::CodeTag tag; + if (kind_ == Code::LOAD_IC) { + tag = GDBJITInterface::LOAD_IC; + } else if (kind_ == Code::KEYED_LOAD_IC) { + tag = GDBJITInterface::KEYED_LOAD_IC; + } else if (kind_ == Code::STORE_IC) { + tag = GDBJITInterface::STORE_IC; + } else { + tag = GDBJITInterface::KEYED_STORE_IC; + } + GDBJIT(AddCode(tag, *name, *code)); +#endif } -void KeyedStoreStubCompiler::JitEvent(Handle<Name> name, Handle<Code> code) { - GDBJIT(AddCode(GDBJITInterface::KEYED_STORE_IC, *name, *code)); +void BaseLoadStoreStubCompiler::InitializeRegisters() { + if (kind_ == Code::LOAD_IC) { + registers_ = LoadStubCompiler::registers(); + } else if (kind_ == Code::KEYED_LOAD_IC) { + registers_ = KeyedLoadStubCompiler::registers(); + } else if (kind_ == Code::STORE_IC) { + registers_ = StoreStubCompiler::registers(); + } else { + registers_ = KeyedStoreStubCompiler::registers(); + } } @@ -1972,21 +1551,7 @@ Handle<Code> BaseLoadStoreStubCompiler::GetICCode(Code::Kind kind, Code::StubType type, Handle<Name> name, InlineCacheState state) { - Code::Flags flags = Code::ComputeFlags( - kind, state, extra_state(), type); - Handle<Code> code = GetCodeWithFlags(flags, name); - PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, *name)); - JitEvent(name, code); - return code; -} - - -Handle<Code> BaseLoadStubCompiler::GetCode(Code::Kind kind, - Code::StubType type, - Handle<Name> name) { - ASSERT(type != Code::NORMAL); - Code::Flags flags = Code::ComputeFlags( - Code::STUB, MONOMORPHIC, Code::kNoExtraICState, type, kind); + Code::Flags flags = Code::ComputeFlags(kind, state, extra_state(), type); Handle<Code> code = GetCodeWithFlags(flags, name); PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, *name)); JitEvent(name, code); @@ -1994,12 +1559,12 @@ Handle<Code> BaseLoadStubCompiler::GetCode(Code::Kind kind, } -Handle<Code> BaseStoreStubCompiler::GetCode(Code::Kind kind, - Code::StubType type, - Handle<Name> name) { +Handle<Code> BaseLoadStoreStubCompiler::GetCode(Code::Kind kind, + Code::StubType type, + Handle<Name> name) { ASSERT(type != Code::NORMAL); Code::Flags flags = Code::ComputeFlags( - Code::STUB, MONOMORPHIC, extra_state(), type, kind); + Code::HANDLER, MONOMORPHIC, extra_state(), type, kind); Handle<Code> code = GetCodeWithFlags(flags, name); PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, *name)); JitEvent(name, code); diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h index 63cb42b46e..38bc7a3c3a 100644 --- a/deps/v8/src/stub-cache.h +++ b/deps/v8/src/stub-cache.h @@ -83,83 +83,28 @@ class StubCache { Handle<Code> FindIC(Handle<Name> name, Handle<Map> stub_holder_map, Code::Kind kind, - Code::StubType type, Code::ExtraICState extra_state = Code::kNoExtraICState); Handle<Code> FindIC(Handle<Name> name, Handle<JSObject> stub_holder, Code::Kind kind, - Code::StubType type, Code::ExtraICState extra_state = Code::kNoExtraICState); - Handle<Code> FindLoadHandler(Handle<Name> name, - Handle<JSObject> receiver, - Handle<JSObject> stub_holder, - Code::Kind kind, - Code::StubType type); - - Handle<Code> FindStoreHandler(Handle<Name> name, - Handle<JSObject> receiver, - Code::Kind kind, - Code::StubType type, - StrictModeFlag strict_mode); - - Handle<Code> ComputeMonomorphicLoadIC(Handle<HeapObject> receiver, - Handle<Code> handler, - Handle<Name> name); - - Handle<Code> ComputeMonomorphicKeyedLoadIC(Handle<HeapObject> receiver, - Handle<Code> handler, - Handle<Name> name); + Handle<Code> FindHandler(Handle<Name> name, + Handle<JSObject> receiver, + Code::Kind kind, + StrictModeFlag strict_mode = kNonStrictMode); - Handle<Code> ComputeMonomorphicStoreIC(Handle<HeapObject> receiver, - Handle<Code> handler, - Handle<Name> name, - StrictModeFlag strict_mode); - - Handle<Code> ComputeMonomorphicKeyedStoreIC(Handle<HeapObject> receiver, - Handle<Code> handler, - Handle<Name> name, - StrictModeFlag strict_mode); + Handle<Code> ComputeMonomorphicIC(Handle<HeapObject> receiver, + Handle<Code> handler, + Handle<Name> name, + StrictModeFlag strict_mode); // Computes the right stub matching. Inserts the result in the // cache before returning. This might compile a stub if needed. Handle<Code> ComputeLoadNonexistent(Handle<Name> name, Handle<JSObject> object); - Handle<Code> ComputeLoadField(Handle<Name> name, - Handle<JSObject> object, - Handle<JSObject> holder, - PropertyIndex field_index, - Representation representation); - - Handle<Code> ComputeLoadCallback(Handle<Name> name, - Handle<JSObject> object, - Handle<JSObject> holder, - Handle<ExecutableAccessorInfo> callback); - - Handle<Code> ComputeLoadCallback(Handle<Name> name, - Handle<JSObject> object, - Handle<JSObject> holder, - const CallOptimization& call_optimization); - - Handle<Code> ComputeLoadViaGetter(Handle<Name> name, - Handle<JSObject> object, - Handle<JSObject> holder, - Handle<JSFunction> getter); - - Handle<Code> ComputeLoadConstant(Handle<Name> name, - Handle<JSObject> object, - Handle<JSObject> holder, - Handle<Object> value); - - Handle<Code> ComputeLoadInterceptor(Handle<Name> name, - Handle<JSObject> object, - Handle<JSObject> holder); - - Handle<Code> ComputeLoadNormal(Handle<Name> name, - Handle<JSObject> object); - Handle<Code> ComputeLoadGlobal(Handle<Name> name, Handle<JSObject> object, Handle<GlobalObject> holder, @@ -195,69 +140,18 @@ class StubCache { Handle<JSObject> object, Handle<JSObject> holder); - // --- - - Handle<Code> ComputeStoreField(Handle<Name> name, - Handle<JSObject> object, - LookupResult* lookup, - StrictModeFlag strict_mode); - - Handle<Code> ComputeStoreTransition(Handle<Name> name, - Handle<JSObject> object, - LookupResult* lookup, - Handle<Map> transition, - StrictModeFlag strict_mode); - - Handle<Code> ComputeStoreNormal(StrictModeFlag strict_mode); - Handle<Code> ComputeStoreGlobal(Handle<Name> name, Handle<GlobalObject> object, Handle<PropertyCell> cell, Handle<Object> value, StrictModeFlag strict_mode); - Handle<Code> ComputeStoreCallback(Handle<Name> name, - Handle<JSObject> object, - Handle<JSObject> holder, - Handle<ExecutableAccessorInfo> callback, - StrictModeFlag strict_mode); - - Handle<Code> ComputeStoreCallback(Handle<Name> name, - Handle<JSObject> object, - Handle<JSObject> holder, - const CallOptimization& call_optimation, - StrictModeFlag strict_mode); - - Handle<Code> ComputeStoreViaSetter(Handle<Name> name, - Handle<JSObject> object, - Handle<JSObject> holder, - Handle<JSFunction> setter, - StrictModeFlag strict_mode); - - Handle<Code> ComputeStoreInterceptor(Handle<Name> name, - Handle<JSObject> object, - StrictModeFlag strict_mode); - - // --- - - Handle<Code> ComputeKeyedStoreField(Handle<Name> name, - Handle<JSObject> object, - LookupResult* lookup, - StrictModeFlag strict_mode); - Handle<Code> ComputeKeyedStoreTransition(Handle<Name> name, - Handle<JSObject> object, - LookupResult* lookup, - Handle<Map> transition, - StrictModeFlag strict_mode); - Handle<Code> ComputeKeyedLoadElement(Handle<Map> receiver_map); Handle<Code> ComputeKeyedStoreElement(Handle<Map> receiver_map, StrictModeFlag strict_mode, KeyedAccessStoreMode store_mode); - // --- - Handle<Code> ComputeCallField(int argc, Code::Kind, Code::ExtraICState extra_state, @@ -326,16 +220,11 @@ class StubCache { KeyedAccessStoreMode store_mode, StrictModeFlag strict_mode); - Handle<Code> ComputePolymorphicLoadIC(MapHandleList* receiver_maps, - CodeHandleList* handlers, - int number_of_valid_maps, - Handle<Name> name); - - Handle<Code> ComputePolymorphicStoreIC(MapHandleList* receiver_maps, - CodeHandleList* handlers, - int number_of_valid_maps, - Handle<Name> name, - StrictModeFlag strict_mode); + Handle<Code> ComputePolymorphicIC(MapHandleList* receiver_maps, + CodeHandleList* handlers, + int number_of_valid_maps, + Handle<Name> name, + StrictModeFlag strict_mode); // Finds the Code object stored in the Heap::non_monomorphic_cache(). Code* FindCallInitialize(int argc, RelocInfo::Mode mode, Code::Kind kind); @@ -572,8 +461,7 @@ class StubCompiler BASE_EMBEDDED { Register receiver, Register scratch1, Register scratch2, - Label* miss_label, - bool support_wrappers); + Label* miss_label); static void GenerateLoadFunctionPrototype(MacroAssembler* masm, Register receiver, @@ -652,8 +540,10 @@ enum FrontendCheckType { PERFORM_INITIAL_CHECKS, SKIP_INITIAL_CHECKS }; class BaseLoadStoreStubCompiler: public StubCompiler { public: - BaseLoadStoreStubCompiler(Isolate* isolate, Register* registers) - : StubCompiler(isolate), registers_(registers) { } + BaseLoadStoreStubCompiler(Isolate* isolate, Code::Kind kind) + : StubCompiler(isolate), kind_(kind) { + InitializeRegisters(); + } virtual ~BaseLoadStoreStubCompiler() { } Handle<Code> CompileMonomorphicIC(Handle<Map> receiver_map, @@ -698,30 +588,53 @@ class BaseLoadStoreStubCompiler: public StubCompiler { Handle<Name> name, Label* success); + Handle<Code> GetCode(Code::Kind kind, + Code::StubType type, + Handle<Name> name); + Handle<Code> GetICCode(Code::Kind kind, Code::StubType type, Handle<Name> name, InlineCacheState state = MONOMORPHIC); + Code::Kind kind() { return kind_; } + + Logger::LogEventsAndTags log_kind(Handle<Code> code) { + if (!code->is_inline_cache_stub()) return Logger::STUB_TAG; + if (kind_ == Code::LOAD_IC) { + return code->ic_state() == MONOMORPHIC + ? Logger::LOAD_IC_TAG : Logger::LOAD_POLYMORPHIC_IC_TAG; + } else if (kind_ == Code::KEYED_LOAD_IC) { + return code->ic_state() == MONOMORPHIC + ? Logger::KEYED_LOAD_IC_TAG : Logger::KEYED_LOAD_POLYMORPHIC_IC_TAG; + } else if (kind_ == Code::STORE_IC) { + return code->ic_state() == MONOMORPHIC + ? Logger::STORE_IC_TAG : Logger::STORE_POLYMORPHIC_IC_TAG; + } else { + return code->ic_state() == MONOMORPHIC + ? Logger::KEYED_STORE_IC_TAG : Logger::KEYED_STORE_POLYMORPHIC_IC_TAG; + } + } + void JitEvent(Handle<Name> name, Handle<Code> code); virtual Code::ExtraICState extra_state() { return Code::kNoExtraICState; } - virtual Logger::LogEventsAndTags log_kind(Handle<Code> code) = 0; - virtual void JitEvent(Handle<Name> name, Handle<Code> code) = 0; - virtual Code::Kind kind() = 0; virtual Register receiver() = 0; virtual Register name() = 0; virtual Register scratch1() = 0; virtual Register scratch2() = 0; virtual Register scratch3() = 0; + void InitializeRegisters(); + + Code::Kind kind_; Register* registers_; }; -class BaseLoadStubCompiler: public BaseLoadStoreStubCompiler { +class LoadStubCompiler: public BaseLoadStoreStubCompiler { public: - BaseLoadStubCompiler(Isolate* isolate, Register* registers) - : BaseLoadStoreStubCompiler(isolate, registers) { } - virtual ~BaseLoadStubCompiler() { } + LoadStubCompiler(Isolate* isolate, Code::Kind kind = Code::LOAD_IC) + : BaseLoadStoreStubCompiler(isolate, kind) { } + virtual ~LoadStubCompiler() { } Handle<Code> CompileLoadField(Handle<JSObject> object, Handle<JSObject> holder, @@ -748,6 +661,28 @@ class BaseLoadStubCompiler: public BaseLoadStoreStubCompiler { Handle<JSObject> holder, Handle<Name> name); + Handle<Code> CompileLoadViaGetter(Handle<JSObject> object, + Handle<JSObject> holder, + Handle<Name> name, + Handle<JSFunction> getter); + + static void GenerateLoadViaGetter(MacroAssembler* masm, + Register receiver, + Handle<JSFunction> getter); + + Handle<Code> CompileLoadNonexistent(Handle<JSObject> object, + Handle<JSObject> last, + Handle<Name> name, + Handle<GlobalObject> global); + + Handle<Code> CompileLoadGlobal(Handle<JSObject> object, + Handle<GlobalObject> holder, + Handle<PropertyCell> cell, + Handle<Name> name, + bool is_dont_delete); + + static Register* registers(); + protected: virtual Register HandlerFrontendHeader(Handle<JSObject> object, Register object_reg, @@ -789,10 +724,6 @@ class BaseLoadStubCompiler: public BaseLoadStoreStubCompiler { Handle<Name> name, LookupResult* lookup); - Handle<Code> GetCode(Code::Kind kind, - Code::StubType type, - Handle<Name> name); - virtual Register receiver() { return registers_[0]; } virtual Register name() { return registers_[1]; } virtual Register scratch1() { return registers_[2]; } @@ -802,46 +733,10 @@ class BaseLoadStubCompiler: public BaseLoadStoreStubCompiler { }; -class LoadStubCompiler: public BaseLoadStubCompiler { - public: - explicit LoadStubCompiler(Isolate* isolate) - : BaseLoadStubCompiler(isolate, registers()) { } - - Handle<Code> CompileLoadNonexistent(Handle<JSObject> object, - Handle<JSObject> last, - Handle<Name> name, - Handle<GlobalObject> global); - - static void GenerateLoadViaGetter(MacroAssembler* masm, - Handle<JSFunction> getter); - - Handle<Code> CompileLoadViaGetter(Handle<JSObject> object, - Handle<JSObject> holder, - Handle<Name> name, - Handle<JSFunction> getter); - - Handle<Code> CompileLoadGlobal(Handle<JSObject> object, - Handle<GlobalObject> holder, - Handle<PropertyCell> cell, - Handle<Name> name, - bool is_dont_delete); - - private: - static Register* registers(); - virtual Code::Kind kind() { return Code::LOAD_IC; } - virtual Logger::LogEventsAndTags log_kind(Handle<Code> code) { - if (!code->is_inline_cache_stub()) return Logger::STUB_TAG; - return code->ic_state() == MONOMORPHIC - ? Logger::LOAD_IC_TAG : Logger::LOAD_POLYMORPHIC_IC_TAG; - } - virtual void JitEvent(Handle<Name> name, Handle<Code> code); -}; - - -class KeyedLoadStubCompiler: public BaseLoadStubCompiler { +class KeyedLoadStubCompiler: public LoadStubCompiler { public: explicit KeyedLoadStubCompiler(Isolate* isolate) - : BaseLoadStubCompiler(isolate, registers()) { } + : LoadStubCompiler(isolate, Code::KEYED_LOAD_IC) { } Handle<Code> CompileLoadElement(Handle<Map> receiver_map); @@ -850,30 +745,26 @@ class KeyedLoadStubCompiler: public BaseLoadStubCompiler { static void GenerateLoadDictionaryElement(MacroAssembler* masm); - private: + protected: static Register* registers(); - virtual Code::Kind kind() { return Code::KEYED_LOAD_IC; } - virtual Logger::LogEventsAndTags log_kind(Handle<Code> code) { - if (!code->is_inline_cache_stub()) return Logger::STUB_TAG; - return code->ic_state() == MONOMORPHIC - ? Logger::KEYED_LOAD_IC_TAG : Logger::KEYED_LOAD_POLYMORPHIC_IC_TAG; - } - virtual void JitEvent(Handle<Name> name, Handle<Code> code); + + private: virtual void GenerateNameCheck(Handle<Name> name, Register name_reg, Label* miss); + friend class BaseLoadStoreStubCompiler; }; -class BaseStoreStubCompiler: public BaseLoadStoreStubCompiler { +class StoreStubCompiler: public BaseLoadStoreStubCompiler { public: - BaseStoreStubCompiler(Isolate* isolate, - StrictModeFlag strict_mode, - Register* registers) - : BaseLoadStoreStubCompiler(isolate, registers), + StoreStubCompiler(Isolate* isolate, + StrictModeFlag strict_mode, + Code::Kind kind = Code::STORE_IC) + : BaseLoadStoreStubCompiler(isolate, kind), strict_mode_(strict_mode) { } - virtual ~BaseStoreStubCompiler() { } + virtual ~StoreStubCompiler() { } Handle<Code> CompileStoreTransition(Handle<JSObject> object, LookupResult* lookup, @@ -914,16 +805,27 @@ class BaseStoreStubCompiler: public BaseLoadStoreStubCompiler { Register scratch2, Label* miss_label); - static Builtins::Name MissBuiltin(Code::Kind kind) { - switch (kind) { - case Code::LOAD_IC: return Builtins::kLoadIC_Miss; - case Code::STORE_IC: return Builtins::kStoreIC_Miss; - case Code::KEYED_LOAD_IC: return Builtins::kKeyedLoadIC_Miss; - case Code::KEYED_STORE_IC: return Builtins::kKeyedStoreIC_Miss; - default: UNREACHABLE(); - } - return Builtins::kLoadIC_Miss; - } + Handle<Code> CompileStoreCallback(Handle<JSObject> object, + Handle<JSObject> holder, + Handle<Name> name, + Handle<ExecutableAccessorInfo> callback); + + Handle<Code> CompileStoreCallback(Handle<JSObject> object, + Handle<JSObject> holder, + Handle<Name> name, + const CallOptimization& call_optimization); + + static void GenerateStoreViaSetter(MacroAssembler* masm, + Handle<JSFunction> setter); + + Handle<Code> CompileStoreViaSetter(Handle<JSObject> object, + Handle<JSObject> holder, + Handle<Name> name, + Handle<JSFunction> setter); + + Handle<Code> CompileStoreInterceptor(Handle<JSObject> object, + Handle<Name> name); + static Builtins::Name SlowBuiltin(Code::Kind kind) { switch (kind) { case Code::STORE_IC: return Builtins::kStoreIC_Slow; @@ -943,10 +845,6 @@ class BaseStoreStubCompiler: public BaseLoadStoreStubCompiler { virtual void HandlerFrontendFooter(Handle<Name> name, Label* success, Label* miss); - Handle<Code> GetCode(Code::Kind kind, - Code::StubType type, - Handle<Name> name); - void GenerateRestoreName(MacroAssembler* masm, Label* label, Handle<Name> name); @@ -960,56 +858,21 @@ class BaseStoreStubCompiler: public BaseLoadStoreStubCompiler { StrictModeFlag strict_mode() { return strict_mode_; } virtual Code::ExtraICState extra_state() { return strict_mode_; } - private: - StrictModeFlag strict_mode_; -}; - - -class StoreStubCompiler: public BaseStoreStubCompiler { - public: - StoreStubCompiler(Isolate* isolate, StrictModeFlag strict_mode) - : BaseStoreStubCompiler(isolate, strict_mode, registers()) { } - - - Handle<Code> CompileStoreCallback(Handle<JSObject> object, - Handle<JSObject> holder, - Handle<Name> name, - Handle<ExecutableAccessorInfo> callback); - - Handle<Code> CompileStoreCallback(Handle<JSObject> object, - Handle<JSObject> holder, - Handle<Name> name, - const CallOptimization& call_optimization); - - static void GenerateStoreViaSetter(MacroAssembler* masm, - Handle<JSFunction> setter); - - Handle<Code> CompileStoreViaSetter(Handle<JSObject> object, - Handle<JSObject> holder, - Handle<Name> name, - Handle<JSFunction> setter); - - Handle<Code> CompileStoreInterceptor(Handle<JSObject> object, - Handle<Name> name); + protected: + static Register* registers(); private: - static Register* registers(); - virtual Code::Kind kind() { return Code::STORE_IC; } - virtual Logger::LogEventsAndTags log_kind(Handle<Code> code) { - if (!code->is_inline_cache_stub()) return Logger::STUB_TAG; - return code->ic_state() == MONOMORPHIC - ? Logger::STORE_IC_TAG : Logger::STORE_POLYMORPHIC_IC_TAG; - } - virtual void JitEvent(Handle<Name> name, Handle<Code> code); + StrictModeFlag strict_mode_; + friend class BaseLoadStoreStubCompiler; }; -class KeyedStoreStubCompiler: public BaseStoreStubCompiler { +class KeyedStoreStubCompiler: public StoreStubCompiler { public: KeyedStoreStubCompiler(Isolate* isolate, StrictModeFlag strict_mode, KeyedAccessStoreMode store_mode) - : BaseStoreStubCompiler(isolate, strict_mode, registers()), + : StoreStubCompiler(isolate, strict_mode, Code::KEYED_STORE_IC), store_mode_(store_mode) { } Handle<Code> CompileStoreElement(Handle<Map> receiver_map); @@ -1026,24 +889,18 @@ class KeyedStoreStubCompiler: public BaseStoreStubCompiler { virtual Code::ExtraICState extra_state() { return Code::ComputeExtraICState(store_mode_, strict_mode()); } + static Register* registers(); private: Register transition_map() { return registers()[3]; } - static Register* registers(); - virtual Code::Kind kind() { return Code::KEYED_STORE_IC; } - virtual Logger::LogEventsAndTags log_kind(Handle<Code> code) { - if (!code->is_inline_cache_stub()) return Logger::STUB_TAG; - return code->ic_state() == MONOMORPHIC - ? Logger::KEYED_STORE_IC_TAG : Logger::KEYED_STORE_POLYMORPHIC_IC_TAG; - } - virtual void JitEvent(Handle<Name> name, Handle<Code> code); virtual void GenerateNameCheck(Handle<Name> name, Register name_reg, Label* miss); KeyedAccessStoreMode store_mode_; + friend class BaseLoadStoreStubCompiler; }; @@ -1070,7 +927,7 @@ class CallStubCompiler: public StubCompiler { int argc, Code::Kind kind, Code::ExtraICState extra_state, - InlineCacheHolderFlag cache_holder); + InlineCacheHolderFlag cache_holder = OWN_MAP); Handle<Code> CompileCallField(Handle<JSObject> object, Handle<JSObject> holder, diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc index 190eb3e6ff..65d1364058 100644 --- a/deps/v8/src/type-info.cc +++ b/deps/v8/src/type-info.cc @@ -128,6 +128,16 @@ bool TypeFeedbackOracle::LoadIsMonomorphicNormal(Property* expr) { } +bool TypeFeedbackOracle::LoadIsPreMonomorphic(Property* expr) { + Handle<Object> map_or_code = GetInfo(expr->PropertyFeedbackId()); + if (map_or_code->IsCode()) { + Handle<Code> code = Handle<Code>::cast(map_or_code); + return code->is_inline_cache_stub() && code->ic_state() == PREMONOMORPHIC; + } + return false; +} + + bool TypeFeedbackOracle::LoadIsPolymorphic(Property* expr) { Handle<Object> map_or_code = GetInfo(expr->PropertyFeedbackId()); if (map_or_code->IsCode()) { @@ -166,6 +176,16 @@ bool TypeFeedbackOracle::StoreIsMonomorphicNormal(TypeFeedbackId ast_id) { } +bool TypeFeedbackOracle::StoreIsPreMonomorphic(TypeFeedbackId ast_id) { + Handle<Object> map_or_code = GetInfo(ast_id); + if (map_or_code->IsCode()) { + Handle<Code> code = Handle<Code>::cast(map_or_code); + return code->ic_state() == PREMONOMORPHIC; + } + return false; +} + + bool TypeFeedbackOracle::StoreIsKeyedPolymorphic(TypeFeedbackId ast_id) { Handle<Object> map_or_code = GetInfo(ast_id); if (map_or_code->IsCode()) { @@ -251,7 +271,7 @@ void TypeFeedbackOracle::LoadReceiverTypes(Property* expr, Handle<String> name, SmallMapList* types) { Code::Flags flags = Code::ComputeFlags( - Code::STUB, MONOMORPHIC, Code::kNoExtraICState, + Code::HANDLER, MONOMORPHIC, Code::kNoExtraICState, Code::NORMAL, Code::LOAD_IC); CollectReceiverTypes(expr->PropertyFeedbackId(), name, flags, types); } @@ -261,7 +281,7 @@ void TypeFeedbackOracle::StoreReceiverTypes(Assignment* expr, Handle<String> name, SmallMapList* types) { Code::Flags flags = Code::ComputeFlags( - Code::STUB, MONOMORPHIC, Code::kNoExtraICState, + Code::HANDLER, MONOMORPHIC, Code::kNoExtraICState, Code::NORMAL, Code::STORE_IC); CollectReceiverTypes(expr->AssignmentFeedbackId(), name, flags, types); } @@ -381,20 +401,29 @@ void TypeFeedbackOracle::BinaryType(TypeFeedbackId id, Handle<Type>* left, Handle<Type>* right, Handle<Type>* result, - Maybe<int>* fixed_right_arg) { + Maybe<int>* fixed_right_arg, + Token::Value operation) { Handle<Object> object = GetInfo(id); if (!object->IsCode()) { - // For some binary ops we don't have ICs, e.g. Token::COMMA. + // For some binary ops we don't have ICs, e.g. Token::COMMA, but for the + // operations covered by the BinaryOpStub we should always have them. + ASSERT(!(operation >= BinaryOpStub::FIRST_TOKEN && + operation <= BinaryOpStub::LAST_TOKEN)); *left = *right = *result = handle(Type::None(), isolate_); return; } Handle<Code> code = Handle<Code>::cast(object); ASSERT(code->is_binary_op_stub()); - int minor_key = code->stub_info(); - BinaryOpIC::StubInfoToType(minor_key, left, right, result, isolate()); - *fixed_right_arg = - BinaryOpStub::decode_fixed_right_arg_from_minor_key(minor_key); + BinaryOpStub stub(code->extended_extra_ic_state()); + + // Sanity check. + ASSERT(stub.operation() == operation); + + *left = stub.GetLeftType(isolate()); + *right = stub.GetRightType(isolate()); + *result = stub.GetResultType(isolate()); + *fixed_right_arg = stub.fixed_right_arg(); } @@ -410,36 +439,15 @@ Handle<Type> TypeFeedbackOracle::ClauseType(TypeFeedbackId id) { } -TypeInfo TypeFeedbackOracle::IncrementType(CountOperation* expr) { +Handle<Type> TypeFeedbackOracle::IncrementType(CountOperation* expr) { Handle<Object> object = GetInfo(expr->CountBinOpFeedbackId()); - TypeInfo unknown = TypeInfo::Unknown(); + Handle<Type> unknown(Type::None(), isolate_); if (!object->IsCode()) return unknown; Handle<Code> code = Handle<Code>::cast(object); if (!code->is_binary_op_stub()) return unknown; - BinaryOpIC::TypeInfo left_type, right_type, unused_result_type; - BinaryOpStub::decode_types_from_minor_key(code->stub_info(), &left_type, - &right_type, &unused_result_type); - // CountOperations should always have +1 or -1 as their right input. - ASSERT(right_type == BinaryOpIC::SMI || - right_type == BinaryOpIC::UNINITIALIZED); - - switch (left_type) { - case BinaryOpIC::UNINITIALIZED: - case BinaryOpIC::SMI: - return TypeInfo::Smi(); - case BinaryOpIC::INT32: - return TypeInfo::Integer32(); - case BinaryOpIC::NUMBER: - return TypeInfo::Double(); - case BinaryOpIC::STRING: - case BinaryOpIC::GENERIC: - return unknown; - default: - return unknown; - } - UNREACHABLE(); - return unknown; + BinaryOpStub stub(code->extended_extra_ic_state()); + return stub.GetLeftType(isolate()); } @@ -634,12 +642,6 @@ void TypeFeedbackOracle::ProcessRelocInfos(ZoneList<RelocInfo>* infos) { case Code::KEYED_LOAD_IC: case Code::KEYED_STORE_IC: - if (target->ic_state() == MONOMORPHIC || - target->ic_state() == POLYMORPHIC) { - SetInfo(ast_id, target); - } - break; - case Code::BINARY_OP_IC: case Code::COMPARE_IC: case Code::TO_BOOLEAN_IC: diff --git a/deps/v8/src/type-info.h b/deps/v8/src/type-info.h index 4b376c84bd..f295c06dac 100644 --- a/deps/v8/src/type-info.h +++ b/deps/v8/src/type-info.h @@ -243,9 +243,11 @@ class TypeFeedbackOracle: public ZoneObject { bool LoadIsMonomorphicNormal(Property* expr); bool LoadIsUninitialized(Property* expr); + bool LoadIsPreMonomorphic(Property* expr); bool LoadIsPolymorphic(Property* expr); bool StoreIsUninitialized(TypeFeedbackId ast_id); bool StoreIsMonomorphicNormal(TypeFeedbackId ast_id); + bool StoreIsPreMonomorphic(TypeFeedbackId ast_id); bool StoreIsKeyedPolymorphic(TypeFeedbackId ast_id); bool CallIsMonomorphic(Call* expr); bool CallNewIsMonomorphic(CallNew* expr); @@ -301,7 +303,8 @@ class TypeFeedbackOracle: public ZoneObject { Handle<Type>* left, Handle<Type>* right, Handle<Type>* result, - Maybe<int>* fixed_right_arg); + Maybe<int>* fixed_right_arg, + Token::Value operation); void CompareType(TypeFeedbackId id, Handle<Type>* left, @@ -310,7 +313,7 @@ class TypeFeedbackOracle: public ZoneObject { Handle<Type> ClauseType(TypeFeedbackId id); - TypeInfo IncrementType(CountOperation* expr); + Handle<Type> IncrementType(CountOperation* expr); Zone* zone() const { return zone_; } Isolate* isolate() const { return isolate_; } diff --git a/deps/v8/src/typedarray.js b/deps/v8/src/typedarray.js index 7bd16f670b..1e67bc30c6 100644 --- a/deps/v8/src/typedarray.js +++ b/deps/v8/src/typedarray.js @@ -30,7 +30,7 @@ // This file relies on the fact that the following declaration has been made // in runtime.js: // var $Array = global.Array; - +var $ArrayBuffer = global.ArrayBuffer; // --------------- Typed Arrays --------------------- @@ -70,15 +70,17 @@ function CreateTypedArrayConstructor(name, elementSize, arrayId, constructor) { function ConstructByLength(obj, length) { var l = ToPositiveInteger(length, "invalid_typed_array_length"); var byteLength = l * elementSize; - var buffer = new global.ArrayBuffer(byteLength); + var buffer = new $ArrayBuffer(byteLength); %TypedArrayInitialize(obj, arrayId, buffer, 0, byteLength); } function ConstructByArrayLike(obj, arrayLike) { var length = arrayLike.length; - var l = ToPositiveInteger(length, "invalid_typed_array_length"); + var l = ToPositiveInteger(length, "invalid_typed_array_length"); if(!%TypedArrayInitializeFromArrayLike(obj, arrayId, arrayLike, l)) { for (var i = 0; i < l; i++) { + // It is crucial that we let any execptions from arrayLike[i] + // propagate outside the function. obj[i] = arrayLike[i]; } } diff --git a/deps/v8/src/types.cc b/deps/v8/src/types.cc index 70ddccd6a7..17a19b29e4 100644 --- a/deps/v8/src/types.cc +++ b/deps/v8/src/types.cc @@ -128,11 +128,19 @@ int Type::LubBitset() { Handle<v8::internal::Object> value = this->as_constant(); if (value->IsSmi()) return kSmi; map = HeapObject::cast(*value)->map(); + if (map->instance_type() == HEAP_NUMBER_TYPE) { + int32_t i; + uint32_t u; + if (value->ToInt32(&i)) return Smi::IsValid(i) ? kSmi : kOtherSigned32; + if (value->ToUint32(&u)) return kUnsigned32; + return kDouble; + } if (map->instance_type() == ODDBALL_TYPE) { if (value->IsUndefined()) return kUndefined; if (value->IsNull()) return kNull; if (value->IsTrue() || value->IsFalse()) return kBoolean; - if (value->IsTheHole()) return kAny; + if (value->IsTheHole()) return kAny; // TODO(rossberg): kNone? + UNREACHABLE(); } } switch (map->instance_type()) { @@ -230,8 +238,9 @@ int Type::GlbBitset() { // Check this <= that. -bool Type::IsSlowCase(Type* that) { +bool Type::SlowIs(Type* that) { // Fast path for bitsets. + if (this->is_none()) return true; if (that->is_bitset()) { return (this->LubBitset() | that->as_bitset()) == that->as_bitset(); } @@ -518,9 +527,13 @@ void Type::TypePrint(FILE* out) { } PrintF(out, "}"); } else if (is_constant()) { - PrintF(out, "Constant(%p)", static_cast<void*>(*as_constant())); + PrintF(out, "Constant(%p : ", static_cast<void*>(*as_constant())); + from_bitset(LubBitset())->TypePrint(out); + PrintF(")"); } else if (is_class()) { - PrintF(out, "Class(%p)", static_cast<void*>(*as_class())); + PrintF(out, "Class(%p < ", static_cast<void*>(*as_class())); + from_bitset(LubBitset())->TypePrint(out); + PrintF(")"); } else if (is_union()) { PrintF(out, "{"); Handle<Unioned> unioned = as_union(); diff --git a/deps/v8/src/types.h b/deps/v8/src/types.h index 2810ffc8a1..5d437e26b2 100644 --- a/deps/v8/src/types.h +++ b/deps/v8/src/types.h @@ -128,6 +128,7 @@ namespace internal { V(Receiver, kObject | kProxy) \ V(Allocated, kDouble | kName | kReceiver) \ V(Any, kOddball | kNumber | kAllocated | kInternal) \ + V(NonNumber, kAny - kNumber) \ V(Detectable, kAllocated - kUndetectable) #define TYPE_LIST(V) \ @@ -155,7 +156,7 @@ class Type : public Object { static Type* Intersect(Handle<Type> type1, Handle<Type> type2); static Type* Optional(Handle<Type> type); // type \/ Undefined - bool Is(Type* that) { return (this == that) ? true : IsSlowCase(that); } + bool Is(Type* that) { return (this == that) ? true : SlowIs(that); } bool Is(Handle<Type> that) { return this->Is(*that); } bool Maybe(Type* that); bool Maybe(Handle<Type> that) { return this->Maybe(*that); } @@ -225,12 +226,13 @@ class Type : public Object { kUnusedEOL = 0 }; + bool is_none() { return this == None(); } bool is_bitset() { return this->IsSmi(); } bool is_class() { return this->IsMap(); } bool is_constant() { return this->IsBox(); } bool is_union() { return this->IsFixedArray(); } - bool IsSlowCase(Type* that); + bool SlowIs(Type* that); int as_bitset() { return Smi::cast(this)->value(); } Handle<Map> as_class() { return Handle<Map>::cast(handle()); } @@ -298,10 +300,18 @@ struct Bounds { Handle<Type> upper; Bounds() {} - Bounds(Handle<Type> l, Handle<Type> u) : lower(l), upper(u) {} - Bounds(Type* l, Type* u, Isolate* isl) : lower(l, isl), upper(u, isl) {} - explicit Bounds(Handle<Type> t) : lower(t), upper(t) {} - Bounds(Type* t, Isolate* isl) : lower(t, isl), upper(t, isl) {} + Bounds(Handle<Type> l, Handle<Type> u) : lower(l), upper(u) { + ASSERT(lower->Is(upper)); + } + Bounds(Type* l, Type* u, Isolate* isl) : lower(l, isl), upper(u, isl) { + ASSERT(lower->Is(upper)); + } + explicit Bounds(Handle<Type> t) : lower(t), upper(t) { + ASSERT(lower->Is(upper)); + } + Bounds(Type* t, Isolate* isl) : lower(t, isl), upper(t, isl) { + ASSERT(lower->Is(upper)); + } // Unrestricted bounds. static Bounds Unbounded(Isolate* isl) { @@ -310,9 +320,11 @@ struct Bounds { // Meet: both b1 and b2 are known to hold. static Bounds Both(Bounds b1, Bounds b2, Isolate* isl) { - return Bounds( - handle(Type::Union(b1.lower, b2.lower), isl), - handle(Type::Intersect(b1.upper, b2.upper), isl)); + Handle<Type> lower(Type::Union(b1.lower, b2.lower), isl); + Handle<Type> upper(Type::Intersect(b1.upper, b2.upper), isl); + // Lower bounds are considered approximate, correct as necessary. + lower = handle(Type::Intersect(lower, upper), isl); + return Bounds(lower, upper); } // Join: either b1 or b2 is known to hold. @@ -323,10 +335,14 @@ struct Bounds { } static Bounds NarrowLower(Bounds b, Handle<Type> t, Isolate* isl) { + // Lower bounds are considered approximate, correct as necessary. + t = handle(Type::Intersect(t, b.upper), isl); return Bounds(handle(Type::Union(b.lower, t), isl), b.upper); } static Bounds NarrowUpper(Bounds b, Handle<Type> t, Isolate* isl) { - return Bounds(b.lower, handle(Type::Intersect(b.upper, t), isl)); + return Bounds( + handle(Type::Intersect(b.lower, t), isl), + handle(Type::Intersect(b.upper, t), isl)); } }; diff --git a/deps/v8/src/typing.cc b/deps/v8/src/typing.cc index 34bb64bd7d..03c1ad16ef 100644 --- a/deps/v8/src/typing.cc +++ b/deps/v8/src/typing.cc @@ -206,6 +206,11 @@ void AstTyper::VisitSwitchStatement(SwitchStatement* stmt) { } +void AstTyper::VisitCaseClause(CaseClause* clause) { + UNREACHABLE(); +} + + void AstTyper::VisitDoWhileStatement(DoWhileStatement* stmt) { // Collect type feedback. if (!stmt->cond()->ToBooleanIsTrue()) { @@ -247,8 +252,8 @@ void AstTyper::VisitForStatement(ForStatement* stmt) { RECURSE(Visit(stmt->cond())); } RECURSE(Visit(stmt->body())); - store_.Forget(); // Control may transfer here via 'continue'. if (stmt->next() != NULL) { + store_.Forget(); // Control may transfer here via 'continue'. RECURSE(Visit(stmt->next())); } store_.Forget(); // Control may transfer here via termination or 'break'. @@ -305,7 +310,7 @@ void AstTyper::VisitFunctionLiteral(FunctionLiteral* expr) { } -void AstTyper::VisitSharedFunctionInfoLiteral(SharedFunctionInfoLiteral* expr) { +void AstTyper::VisitNativeFunctionLiteral(NativeFunctionLiteral* expr) { } @@ -543,7 +548,7 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) { Handle<Type> type, left_type, right_type; Maybe<int> fixed_right_arg; oracle()->BinaryType(expr->BinaryOperationFeedbackId(), - &left_type, &right_type, &type, &fixed_right_arg); + &left_type, &right_type, &type, &fixed_right_arg, expr->op()); NarrowLowerType(expr, type); NarrowLowerType(expr->left(), left_type); NarrowLowerType(expr->right(), right_type); @@ -577,10 +582,15 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) { case Token::BIT_AND: { RECURSE(Visit(expr->left())); RECURSE(Visit(expr->right())); - Type* upper = Type::Union( - expr->left()->bounds().upper, expr->right()->bounds().upper); - if (!upper->Is(Type::Signed32())) upper = Type::Signed32(); - NarrowType(expr, Bounds(Type::Smi(), upper, isolate_)); + Handle<Type> upper( + Type::Union( + expr->left()->bounds().upper, expr->right()->bounds().upper), + isolate_); + if (!upper->Is(Type::Signed32())) + upper = handle(Type::Signed32(), isolate_); + Handle<Type> lower(Type::Intersect( + handle(Type::Smi(), isolate_), upper), isolate_); + NarrowType(expr, Bounds(lower, upper)); break; } case Token::BIT_XOR: @@ -593,7 +603,10 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) { case Token::SHR: RECURSE(Visit(expr->left())); RECURSE(Visit(expr->right())); - NarrowType(expr, Bounds(Type::Smi(), Type::Unsigned32(), isolate_)); + // TODO(rossberg): The upper bound would be Unsigned32, but since there + // is no 'positive Smi' type for the lower bound, we use the smallest + // union of Smi and Unsigned32 as upper bound instead. + NarrowType(expr, Bounds(Type::Smi(), Type::Number(), isolate_)); break; case Token::ADD: { RECURSE(Visit(expr->left())); @@ -601,15 +614,17 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) { Bounds l = expr->left()->bounds(); Bounds r = expr->right()->bounds(); Type* lower = - l.lower->Is(Type::Number()) && r.lower->Is(Type::Number()) ? - Type::Smi() : + l.lower->Is(Type::None()) || r.lower->Is(Type::None()) ? + Type::None() : l.lower->Is(Type::String()) || r.lower->Is(Type::String()) ? - Type::String() : Type::None(); + Type::String() : + l.lower->Is(Type::Number()) && r.lower->Is(Type::Number()) ? + Type::Smi() : Type::None(); Type* upper = - l.upper->Is(Type::Number()) && r.upper->Is(Type::Number()) ? - Type::Number() : l.upper->Is(Type::String()) || r.upper->Is(Type::String()) ? - Type::String() : Type::NumberOrString(); + Type::String() : + l.upper->Is(Type::Number()) && r.upper->Is(Type::Number()) ? + Type::Number() : Type::NumberOrString(); NarrowType(expr, Bounds(lower, upper, isolate_)); break; } diff --git a/deps/v8/src/unicode.h b/deps/v8/src/unicode.h index 42a81824ba..f1dcad0bcb 100644 --- a/deps/v8/src/unicode.h +++ b/deps/v8/src/unicode.h @@ -29,7 +29,7 @@ #define V8_UNICODE_H_ #include <sys/types.h> -#include <globals.h> +#include "globals.h" /** * \file * Definitions and convenience functions for working with unicode. diff --git a/deps/v8/src/unique.h b/deps/v8/src/unique.h index 7ae704a26a..a93b046993 100644 --- a/deps/v8/src/unique.h +++ b/deps/v8/src/unique.h @@ -29,6 +29,7 @@ #define V8_HYDROGEN_UNIQUE_H_ #include "handles.h" +#include "objects.h" #include "utils.h" #include "zone.h" @@ -53,19 +54,30 @@ class UniqueSet; template <typename T> class Unique V8_FINAL { public: - // TODO(titzer): make private and introduce some builder/owner class. + // TODO(titzer): make private and introduce a uniqueness scope. explicit Unique(Handle<T> handle) { if (handle.is_null()) { raw_address_ = NULL; } else { + // This is a best-effort check to prevent comparing Unique<T>'s created + // in different GC eras; we require heap allocation to be disallowed at + // creation time. + // NOTE: we currently consider maps to be non-movable, so no special + // assurance is required for creating a Unique<Map>. + // TODO(titzer): other immortable immovable objects are also fine. + ASSERT(!AllowHeapAllocation::IsAllowed() || handle->IsMap()); raw_address_ = reinterpret_cast<Address>(*handle); - ASSERT_NE(raw_address_, NULL); + ASSERT_NE(raw_address_, NULL); // Non-null should imply non-zero address. } handle_ = handle; } + // TODO(titzer): this is a hack to migrate to Unique<T> incrementally. + Unique(Address raw_address, Handle<T> handle) + : raw_address_(raw_address), handle_(handle) { } + // Constructor for handling automatic up casting. - // Ex. Unique<JSFunction> can be passed when Unique<Object> is expected. + // Eg. Unique<JSFunction> can be passed when Unique<Object> is expected. template <class S> Unique(Unique<S> uniq) { #ifdef DEBUG T* a = NULL; @@ -74,34 +86,57 @@ class Unique V8_FINAL { USE(a); #endif raw_address_ = uniq.raw_address_; - handle_ = uniq.handle_; // Creates a new handle sharing the same location. + handle_ = uniq.handle_; } template <typename U> - bool operator==(const Unique<U>& other) const { + inline bool operator==(const Unique<U>& other) const { + ASSERT(IsInitialized() && other.IsInitialized()); return raw_address_ == other.raw_address_; } template <typename U> - bool operator!=(const Unique<U>& other) const { + inline bool operator!=(const Unique<U>& other) const { + ASSERT(IsInitialized() && other.IsInitialized()); return raw_address_ != other.raw_address_; } - intptr_t Hashcode() const { + inline intptr_t Hashcode() const { + ASSERT(IsInitialized()); return reinterpret_cast<intptr_t>(raw_address_); } - bool IsNull() { + inline bool IsNull() const { + ASSERT(IsInitialized()); return raw_address_ == NULL; } - // Don't do this unless you have access to the heap! - // No, seriously! You can compare and hash and set-ify uniques that were - // all created at the same time; please don't dereference. - Handle<T> handle() { + inline bool IsKnownGlobal(void* global) const { + ASSERT(IsInitialized()); + return raw_address_ == reinterpret_cast<Address>(global); + } + + inline Handle<T> handle() const { return handle_; } + template <class S> static Unique<T> cast(Unique<S> that) { + return Unique<T>(that.raw_address_, Handle<T>::cast(that.handle_)); + } + + inline bool IsInitialized() const { + return raw_address_ != NULL || handle_.is_null(); + } + + // TODO(titzer): this is a hack to migrate to Unique<T> incrementally. + static Unique<T> CreateUninitialized(Handle<T> handle) { + return Unique<T>(reinterpret_cast<Address>(NULL), handle); + } + + static Unique<T> CreateImmovable(Handle<T> handle) { + return Unique<T>(reinterpret_cast<Address>(*handle), handle); + } + friend class UniqueSet<T>; // Uses internal details for speed. template <class U> friend class Unique; // For comparing raw_address values. @@ -120,6 +155,7 @@ class UniqueSet V8_FINAL : public ZoneObject { // Add a new element to this unique set. Mutates this set. O(|this|). void Add(Unique<T> uniq, Zone* zone) { + ASSERT(uniq.IsInitialized()); // Keep the set sorted by the {raw_address} of the unique elements. for (int i = 0; i < size_; i++) { if (array_[i] == uniq) return; @@ -137,8 +173,19 @@ class UniqueSet V8_FINAL : public ZoneObject { array_[size_++] = uniq; } + // Remove an element from this set. Mutates this set. O(|this|) + void Remove(Unique<T> uniq) { + for (int i = 0; i < size_; i++) { + if (array_[i] == uniq) { + while (++i < size_) array_[i - 1] = array_[i]; + size_--; + return; + } + } + } + // Compare this set against another set. O(|this|). - bool Equals(UniqueSet<T>* that) { + bool Equals(UniqueSet<T>* that) const { if (that->size_ != this->size_) return false; for (int i = 0; i < this->size_; i++) { if (this->array_[i] != that->array_[i]) return false; @@ -146,8 +193,18 @@ class UniqueSet V8_FINAL : public ZoneObject { return true; } + // Check whether this set contains the given element. O(|this|) + // TODO(titzer): use binary search for large sets to make this O(log|this|) + template <typename U> + bool Contains(Unique<U> elem) const { + for (int i = 0; i < size_; i++) { + if (this->array_[i] == elem) return true; + } + return false; + } + // Check if this set is a subset of the given set. O(|this| + |that|). - bool IsSubset(UniqueSet<T>* that) { + bool IsSubset(UniqueSet<T>* that) const { if (that->size_ < this->size_) return false; int j = 0; for (int i = 0; i < this->size_; i++) { @@ -163,7 +220,7 @@ class UniqueSet V8_FINAL : public ZoneObject { // Returns a new set representing the intersection of this set and the other. // O(|this| + |that|). - UniqueSet<T>* Intersect(UniqueSet<T>* that, Zone* zone) { + UniqueSet<T>* Intersect(UniqueSet<T>* that, Zone* zone) const { if (that->size_ == 0 || this->size_ == 0) return new(zone) UniqueSet<T>(); UniqueSet<T>* out = new(zone) UniqueSet<T>(); @@ -190,7 +247,7 @@ class UniqueSet V8_FINAL : public ZoneObject { // Returns a new set representing the union of this set and the other. // O(|this| + |that|). - UniqueSet<T>* Union(UniqueSet<T>* that, Zone* zone) { + UniqueSet<T>* Union(UniqueSet<T>* that, Zone* zone) const { if (that->size_ == 0) return this->Copy(zone); if (this->size_ == 0) return that->Copy(zone); @@ -222,7 +279,7 @@ class UniqueSet V8_FINAL : public ZoneObject { } // Makes an exact copy of this set. O(|this| + |that|). - UniqueSet<T>* Copy(Zone* zone) { + UniqueSet<T>* Copy(Zone* zone) const { UniqueSet<T>* copy = new(zone) UniqueSet<T>(); copy->size_ = this->size_; copy->capacity_ = this->size_; @@ -231,10 +288,19 @@ class UniqueSet V8_FINAL : public ZoneObject { return copy; } - inline int size() { + void Clear() { + size_ = 0; + } + + inline int size() const { return size_; } + inline Unique<T> at(int index) const { + ASSERT(index >= 0 && index < size_); + return array_[index]; + } + private: // These sets should be small, since operations are implemented with simple // linear algorithms. Enforce a maximum size. diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h index 4a08319044..062019af46 100644 --- a/deps/v8/src/utils.h +++ b/deps/v8/src/utils.h @@ -419,8 +419,8 @@ class Vector { // Returns a vector using the same backing storage as this one, // spanning from and including 'from', to but not including 'to'. Vector<T> SubVector(int from, int to) { - ASSERT(to <= length_); - ASSERT(from < to); + SLOW_ASSERT(to <= length_); + SLOW_ASSERT(from < to); ASSERT(0 <= from); return Vector<T>(start() + from, to - from); } diff --git a/deps/v8/src/utils/random-number-generator.cc b/deps/v8/src/utils/random-number-generator.cc index 1e03ee2449..fe273315a7 100644 --- a/deps/v8/src/utils/random-number-generator.cc +++ b/deps/v8/src/utils/random-number-generator.cc @@ -28,6 +28,7 @@ #include "utils/random-number-generator.h" #include <cstdio> +#include <cstdlib> #include "flags.h" #include "platform/mutex.h" @@ -67,6 +68,16 @@ RandomNumberGenerator::RandomNumberGenerator() { } } +#if V8_OS_CYGWIN || V8_OS_WIN + // Use rand_s() to gather entropy on Windows. See: + // https://code.google.com/p/v8/issues/detail?id=2905 + unsigned first_half, second_half; + errno_t result = rand_s(&first_half); + ASSERT_EQ(0, result); + result = rand_s(&second_half); + ASSERT_EQ(0, result); + SetSeed((static_cast<int64_t>(first_half) << 32) + second_half); +#else // Gather entropy from /dev/urandom if available. FILE* fp = fopen("/dev/urandom", "rb"); if (fp != NULL) { @@ -82,10 +93,16 @@ RandomNumberGenerator::RandomNumberGenerator() { // We cannot assume that random() or rand() were seeded // properly, so instead of relying on random() or rand(), // we just seed our PRNG using timing data as fallback. + // This is weak entropy, but it's sufficient, because + // it is the responsibility of the embedder to install + // an entropy source using v8::V8::SetEntropySource(), + // which provides reasonable entropy, see: + // https://code.google.com/p/v8/issues/detail?id=2905 int64_t seed = Time::NowFromSystemTime().ToInternalValue() << 24; - seed ^= TimeTicks::HighResNow().ToInternalValue() << 16; + seed ^= TimeTicks::HighResolutionNow().ToInternalValue() << 16; seed ^= TimeTicks::Now().ToInternalValue() << 8; SetSeed(seed); +#endif // V8_OS_CYGWIN || V8_OS_WIN } diff --git a/deps/v8/src/utils/random-number-generator.h b/deps/v8/src/utils/random-number-generator.h index bd7dca7e65..cc7d7395e6 100644 --- a/deps/v8/src/utils/random-number-generator.h +++ b/deps/v8/src/utils/random-number-generator.h @@ -42,6 +42,10 @@ namespace internal { // If two instances of RandomNumberGenerator are created with the same seed, and // the same sequence of method calls is made for each, they will generate and // return identical sequences of numbers. +// This class uses (probably) weak entropy by default, but it's sufficient, +// because it is the responsibility of the embedder to install an entropy source +// using v8::V8::SetEntropySource(), which provides reasonable entropy, see: +// https://code.google.com/p/v8/issues/detail?id=2905 // This class is neither reentrant nor threadsafe. class RandomNumberGenerator V8_FINAL { diff --git a/deps/v8/src/v8-counters.cc b/deps/v8/src/v8-counters.cc index 6711c80203..a0c3ebd07c 100644 --- a/deps/v8/src/v8-counters.cc +++ b/deps/v8/src/v8-counters.cc @@ -76,6 +76,14 @@ Counters::Counters(Isolate* isolate) { StatsCounter(isolate, "c:" "V8.SizeOf_FIXED_ARRAY-" #name); FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC) #undef SC + +#define SC(name) \ + count_of_CODE_AGE_##name##_ = \ + StatsCounter(isolate, "c:" "V8.CountOf_CODE_AGE-" #name); \ + size_of_CODE_AGE_##name##_ = \ + StatsCounter(isolate, "c:" "V8.SizeOf_CODE_AGE-" #name); + CODE_AGE_LIST_WITH_NO_AGE(SC) +#undef SC } diff --git a/deps/v8/src/v8-counters.h b/deps/v8/src/v8-counters.h index ff2247cba1..476021cdbb 100644 --- a/deps/v8/src/v8-counters.h +++ b/deps/v8/src/v8-counters.h @@ -51,6 +51,7 @@ namespace internal { HT(compile_lazy, V8.CompileLazy) #define HISTOGRAM_PERCENTAGE_LIST(HP) \ + /* Heap fragmentation. */ \ HP(external_fragmentation_total, \ V8.MemoryExternalFragmentationTotal) \ HP(external_fragmentation_old_pointer_space, \ @@ -67,12 +68,26 @@ namespace internal { V8.MemoryExternalFragmentationPropertyCellSpace) \ HP(external_fragmentation_lo_space, \ V8.MemoryExternalFragmentationLoSpace) \ + /* Percentages of heap committed to each space. */ \ + HP(heap_fraction_new_space, \ + V8.MemoryHeapFractionNewSpace) \ + HP(heap_fraction_old_pointer_space, \ + V8.MemoryHeapFractionOldPointerSpace) \ + HP(heap_fraction_old_data_space, \ + V8.MemoryHeapFractionOldDataSpace) \ + HP(heap_fraction_code_space, \ + V8.MemoryHeapFractionCodeSpace) \ HP(heap_fraction_map_space, \ V8.MemoryHeapFractionMapSpace) \ HP(heap_fraction_cell_space, \ V8.MemoryHeapFractionCellSpace) \ HP(heap_fraction_property_cell_space, \ V8.MemoryHeapFractionPropertyCellSpace) \ + HP(heap_fraction_lo_space, \ + V8.MemoryHeapFractionLoSpace) \ + /* Percentage of crankshafted codegen. */ \ + HP(codegen_fraction_crankshaft, \ + V8.CodegenFractionCrankshaft) \ #define HISTOGRAM_MEMORY_LIST(HM) \ @@ -84,6 +99,8 @@ namespace internal { V8.MemoryHeapSampleCellSpaceCommitted) \ HM(heap_sample_property_cell_space_committed, \ V8.MemoryHeapSamplePropertyCellSpaceCommitted) \ + HM(heap_sample_code_space_committed, \ + V8.MemoryHeapSampleCodeSpaceCommitted) \ // WARNING: STATS_COUNTER_LIST_* is a very large macro that is causing MSVC @@ -204,7 +221,6 @@ namespace internal { SC(enum_cache_hits, V8.EnumCacheHits) \ SC(enum_cache_misses, V8.EnumCacheMisses) \ SC(zone_segment_bytes, V8.ZoneSegmentBytes) \ - SC(generic_binary_stub_calls, V8.GenericBinaryStubCalls) \ SC(fast_new_closure_total, V8.FastNewClosureTotal) \ SC(fast_new_closure_try_optimized, V8.FastNewClosureTryOptimized) \ SC(fast_new_closure_install_optimized, V8.FastNewClosureInstallOptimized) \ @@ -320,6 +336,14 @@ class Counters { FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC) #undef SC +#define SC(name) \ + StatsCounter* count_of_CODE_AGE_##name() \ + { return &count_of_CODE_AGE_##name##_; } \ + StatsCounter* size_of_CODE_AGE_##name() \ + { return &size_of_CODE_AGE_##name##_; } + CODE_AGE_LIST_WITH_NO_AGE(SC) +#undef SC + enum Id { #define RATE_ID(name, caption) k_##name, HISTOGRAM_TIMER_LIST(RATE_ID) @@ -345,6 +369,10 @@ class Counters { kSizeOfFIXED_ARRAY__##name, FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(COUNTER_ID) #undef COUNTER_ID +#define COUNTER_ID(name) kCountOfCODE_AGE__##name, \ + kSizeOfCODE_AGE__##name, + CODE_AGE_LIST_WITH_NO_AGE(COUNTER_ID) +#undef COUNTER_ID stats_counter_count }; @@ -390,6 +418,12 @@ class Counters { FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC) #undef SC +#define SC(name) \ + StatsCounter size_of_CODE_AGE_##name##_; \ + StatsCounter count_of_CODE_AGE_##name##_; + CODE_AGE_LIST_WITH_NO_AGE(SC) +#undef SC + friend class Isolate; explicit Counters(Isolate* isolate); diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc index e894164cd1..62330c32d4 100644 --- a/deps/v8/src/v8.cc +++ b/deps/v8/src/v8.cc @@ -227,19 +227,6 @@ void V8::InitializeOncePerProcessImpl() { FLAG_sweeper_threads = 0; } - if (FLAG_parallel_marking) { - if (FLAG_marking_threads <= 0) { - FLAG_marking_threads = SystemThreadManager:: - NumberOfParallelSystemThreads( - SystemThreadManager::PARALLEL_MARKING); - } - if (FLAG_marking_threads == 0) { - FLAG_parallel_marking = false; - } - } else { - FLAG_marking_threads = 0; - } - if (FLAG_concurrent_recompilation && SystemThreadManager::NumberOfParallelSystemThreads( SystemThreadManager::PARALLEL_RECOMPILATION) == 0) { diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js index 76eeac6a58..c42d5c4d35 100644 --- a/deps/v8/src/v8natives.js +++ b/deps/v8/src/v8natives.js @@ -32,7 +32,6 @@ // var $Number = global.Number; // var $Function = global.Function; // var $Array = global.Array; -// var $NaN = 0/0; // // in math.js: // var $floor = MathFloor @@ -95,7 +94,7 @@ function SetUpLockedPrototype(constructor, fields, methods) { } if (fields) { for (var i = 0; i < fields.length; i++) { - %SetProperty(prototype, fields[i], void 0, DONT_ENUM | DONT_DELETE); + %SetProperty(prototype, fields[i], UNDEFINED, DONT_ENUM | DONT_DELETE); } } for (var i = 0; i < methods.length; i += 2) { @@ -148,7 +147,7 @@ function GlobalParseInt(string, radix) { string = TO_STRING_INLINE(string); radix = TO_INT32(radix); if (!(radix == 0 || (2 <= radix && radix <= 36))) { - return $NaN; + return NAN; } } @@ -197,15 +196,16 @@ function GlobalEval(x) { function SetUpGlobal() { %CheckIsBootstrapping(); + var attributes = DONT_ENUM | DONT_DELETE | READ_ONLY; + // ECMA 262 - 15.1.1.1. - %SetProperty(global, "NaN", $NaN, DONT_ENUM | DONT_DELETE | READ_ONLY); + %SetProperty(global, "NaN", NAN, attributes); // ECMA-262 - 15.1.1.2. - %SetProperty(global, "Infinity", 1/0, DONT_ENUM | DONT_DELETE | READ_ONLY); + %SetProperty(global, "Infinity", INFINITY, attributes); // ECMA-262 - 15.1.1.3. - %SetProperty(global, "undefined", void 0, - DONT_ENUM | DONT_DELETE | READ_ONLY); + %SetProperty(global, "undefined", UNDEFINED, attributes); // Set up non-enumerable function on the global object. InstallFunctions(global, DONT_ENUM, $Array( @@ -475,12 +475,12 @@ function ToPropertyDescriptor(obj) { function ToCompletePropertyDescriptor(obj) { var desc = ToPropertyDescriptor(obj); if (IsGenericDescriptor(desc) || IsDataDescriptor(desc)) { - if (!desc.hasValue()) desc.setValue(void 0); + if (!desc.hasValue()) desc.setValue(UNDEFINED); if (!desc.hasWritable()) desc.setWritable(false); } else { // Is accessor descriptor. - if (!desc.hasGetter()) desc.setGet(void 0); - if (!desc.hasSetter()) desc.setSet(void 0); + if (!desc.hasGetter()) desc.setGet(UNDEFINED); + if (!desc.hasSetter()) desc.setSet(UNDEFINED); } if (!desc.hasEnumerable()) desc.setEnumerable(false); if (!desc.hasConfigurable()) desc.setConfigurable(false); @@ -491,7 +491,7 @@ function ToCompletePropertyDescriptor(obj) { function PropertyDescriptor() { // Initialize here so they are all in-object and have the same map. // Default values from ES5 8.6.1. - this.value_ = void 0; + this.value_ = UNDEFINED; this.hasValue_ = false; this.writable_ = false; this.hasWritable_ = false; @@ -499,9 +499,9 @@ function PropertyDescriptor() { this.hasEnumerable_ = false; this.configurable_ = false; this.hasConfigurable_ = false; - this.get_ = void 0; + this.get_ = UNDEFINED; this.hasGetter_ = false; - this.set_ = void 0; + this.set_ = UNDEFINED; this.hasSetter_ = false; } @@ -593,7 +593,7 @@ function ConvertDescriptorArrayToDescriptor(desc_array) { } if (IS_UNDEFINED(desc_array)) { - return void 0; + return UNDEFINED; } var desc = new PropertyDescriptor(); @@ -647,10 +647,11 @@ function GetOwnProperty(obj, v) { var p = ToName(v); if (%IsJSProxy(obj)) { // TODO(rossberg): adjust once there is a story for symbols vs proxies. - if (IS_SYMBOL(v)) return void 0; + if (IS_SYMBOL(v)) return UNDEFINED; var handler = %GetHandler(obj); - var descriptor = CallTrap1(handler, "getOwnPropertyDescriptor", void 0, p); + var descriptor = CallTrap1( + handler, "getOwnPropertyDescriptor", UNDEFINED, p); if (IS_UNDEFINED(descriptor)) return descriptor; var desc = ToCompletePropertyDescriptor(descriptor); if (!desc.isConfigurable()) { @@ -666,7 +667,7 @@ function GetOwnProperty(obj, v) { var props = %GetOwnProperty(ToObject(obj), p); // A false value here means that access checks failed. - if (props === false) return void 0; + if (props === false) return UNDEFINED; return ConvertDescriptorArrayToDescriptor(props); } @@ -693,7 +694,7 @@ function DefineProxyProperty(obj, p, attributes, should_throw) { if (IS_SYMBOL(p)) return false; var handler = %GetHandler(obj); - var result = CallTrap2(handler, "defineProperty", void 0, p, attributes); + var result = CallTrap2(handler, "defineProperty", UNDEFINED, p, attributes); if (!ToBoolean(result)) { if (should_throw) { throw MakeTypeError("handler_returned_false", @@ -710,7 +711,7 @@ function DefineProxyProperty(obj, p, attributes, should_throw) { function DefineObjectProperty(obj, p, desc, should_throw) { var current_or_access = %GetOwnProperty(ToObject(obj), ToName(p)); // A false value here means that access checks failed. - if (current_or_access === false) return void 0; + if (current_or_access === false) return UNDEFINED; var current = ConvertDescriptorArrayToDescriptor(current_or_access); var extensible = %IsExtensible(ToObject(obj)); @@ -841,7 +842,7 @@ function DefineObjectProperty(obj, p, desc, should_throw) { flag |= READ_ONLY; } - var value = void 0; // Default value is undefined. + var value = UNDEFINED; // Default value is undefined. if (desc.hasValue()) { value = desc.getValue(); } else if (!IS_UNDEFINED(current) && IsDataDescriptor(current)) { @@ -920,7 +921,7 @@ function DefineArrayProperty(obj, p, desc, should_throw) { // For the time being, we need a hack to prevent Object.observe from // generating two change records. obj.length = new_length; - desc.value_ = void 0; + desc.value_ = UNDEFINED; desc.hasValue_ = false; threw = !DefineObjectProperty(obj, "length", desc, should_throw) || threw; if (emit_splice) { @@ -1045,7 +1046,7 @@ function ObjectGetOwnPropertyNames(obj) { // Special handling for proxies. if (%IsJSProxy(obj)) { var handler = %GetHandler(obj); - var names = CallTrap0(handler, "getOwnPropertyNames", void 0); + var names = CallTrap0(handler, "getOwnPropertyNames", UNDEFINED); return ToNameArray(names, "getOwnPropertyNames", false); } @@ -1194,7 +1195,7 @@ function ObjectDefineProperties(obj, properties) { // Harmony proxies. function ProxyFix(obj) { var handler = %GetHandler(obj); - var props = CallTrap0(handler, "fix", void 0); + var props = CallTrap0(handler, "fix", UNDEFINED); if (IS_UNDEFINED(props)) { throw MakeTypeError("handler_returned_undefined", [handler, "fix"]); } @@ -1560,8 +1561,8 @@ function NumberToFixed(fractionDigits) { } if (NUMBER_IS_NAN(x)) return "NaN"; - if (x == 1/0) return "Infinity"; - if (x == -1/0) return "-Infinity"; + if (x == INFINITY) return "Infinity"; + if (x == -INFINITY) return "-Infinity"; return %NumberToFixed(x, f); } @@ -1578,11 +1579,11 @@ function NumberToExponential(fractionDigits) { // Get the value of this number in case it's an object. x = %_ValueOf(this); } - var f = IS_UNDEFINED(fractionDigits) ? void 0 : TO_INTEGER(fractionDigits); + var f = IS_UNDEFINED(fractionDigits) ? UNDEFINED : TO_INTEGER(fractionDigits); if (NUMBER_IS_NAN(x)) return "NaN"; - if (x == 1/0) return "Infinity"; - if (x == -1/0) return "-Infinity"; + if (x == INFINITY) return "Infinity"; + if (x == -INFINITY) return "-Infinity"; if (IS_UNDEFINED(f)) { f = -1; // Signal for runtime function that f is not defined. @@ -1608,8 +1609,8 @@ function NumberToPrecision(precision) { var p = TO_INTEGER(precision); if (NUMBER_IS_NAN(x)) return "NaN"; - if (x == 1/0) return "Infinity"; - if (x == -1/0) return "-Infinity"; + if (x == INFINITY) return "Infinity"; + if (x == -INFINITY) return "-Infinity"; if (p < 1 || p > 21) { throw new $RangeError("toPrecision() argument must be between 1 and 21"); @@ -1654,18 +1655,18 @@ function SetUpNumber() { DONT_ENUM | DONT_DELETE | READ_ONLY); // ECMA-262 section 15.7.3.3. - %SetProperty($Number, "NaN", $NaN, DONT_ENUM | DONT_DELETE | READ_ONLY); + %SetProperty($Number, "NaN", NAN, DONT_ENUM | DONT_DELETE | READ_ONLY); // ECMA-262 section 15.7.3.4. %SetProperty($Number, "NEGATIVE_INFINITY", - -1/0, + -INFINITY, DONT_ENUM | DONT_DELETE | READ_ONLY); // ECMA-262 section 15.7.3.5. %SetProperty($Number, "POSITIVE_INFINITY", - 1/0, + INFINITY, DONT_ENUM | DONT_DELETE | READ_ONLY); %ToFastProperties($Number); diff --git a/deps/v8/src/v8threads.cc b/deps/v8/src/v8threads.cc index 33b620d8ea..cc4f43965f 100644 --- a/deps/v8/src/v8threads.cc +++ b/deps/v8/src/v8threads.cc @@ -42,11 +42,6 @@ namespace v8 { bool Locker::active_ = false; -Locker::Locker() { - Initialize(i::Isolate::GetDefaultIsolateForLocking()); -} - - // Once the Locker is initialized, the current thread will be guaranteed to have // the lock for a given isolate. void Locker::Initialize(v8::Isolate* isolate) { @@ -116,11 +111,6 @@ Locker::~Locker() { } -Unlocker::Unlocker() { - Initialize(i::Isolate::GetDefaultIsolateForLocking()); -} - - void Unlocker::Initialize(v8::Isolate* isolate) { ASSERT(isolate != NULL); isolate_ = reinterpret_cast<i::Isolate*>(isolate); @@ -143,14 +133,15 @@ Unlocker::~Unlocker() { } -void Locker::StartPreemption(int every_n_ms) { +void Locker::StartPreemption(v8::Isolate* isolate, int every_n_ms) { v8::internal::ContextSwitcher::StartPreemption( - i::Isolate::Current(), every_n_ms); + reinterpret_cast<i::Isolate*>(isolate), every_n_ms); } -void Locker::StopPreemption() { - v8::internal::ContextSwitcher::StopPreemption(i::Isolate::Current()); +void Locker::StopPreemption(v8::Isolate* isolate) { + v8::internal::ContextSwitcher::StopPreemption( + reinterpret_cast<i::Isolate*>(isolate)); } @@ -481,7 +472,6 @@ void ContextSwitcher::Run() { // Acknowledge the preemption by the receiving thread. void ContextSwitcher::PreemptionReceived() { - ASSERT(Locker::IsLocked(i::Isolate::GetDefaultIsolateForLocking())); // There is currently no accounting being done for this. But could be in the // future, which is why we leave this in. } diff --git a/deps/v8/src/v8utils.h b/deps/v8/src/v8utils.h index fd3f4a5095..02e57ebe72 100644 --- a/deps/v8/src/v8utils.h +++ b/deps/v8/src/v8utils.h @@ -194,61 +194,6 @@ inline void CopyBytes(T* dst, const T* src, size_t num_bytes) { } -// Copies data from |src| to |dst|. No restrictions. -template <typename T> -inline void MoveBytes(T* dst, const T* src, size_t num_bytes) { - STATIC_ASSERT(sizeof(T) == 1); - switch (num_bytes) { - case 0: return; - case 1: - *dst = *src; - return; -#ifdef V8_HOST_CAN_READ_UNALIGNED - case 2: - *reinterpret_cast<uint16_t*>(dst) = *reinterpret_cast<const uint16_t*>(src); - return; - case 3: { - uint16_t part1 = *reinterpret_cast<const uint16_t*>(src); - byte part2 = *(src + 2); - *reinterpret_cast<uint16_t*>(dst) = part1; - *(dst + 2) = part2; - return; - } - case 4: - *reinterpret_cast<uint32_t*>(dst) = *reinterpret_cast<const uint32_t*>(src); - return; - case 5: - case 6: - case 7: - case 8: { - uint32_t part1 = *reinterpret_cast<const uint32_t*>(src); - uint32_t part2 = *reinterpret_cast<const uint32_t*>(src + num_bytes - 4); - *reinterpret_cast<uint32_t*>(dst) = part1; - *reinterpret_cast<uint32_t*>(dst + num_bytes - 4) = part2; - return; - } - case 9: - case 10: - case 11: - case 12: - case 13: - case 14: - case 15: - case 16: { - double part1 = *reinterpret_cast<const double*>(src); - double part2 = *reinterpret_cast<const double*>(src + num_bytes - 8); - *reinterpret_cast<double*>(dst) = part1; - *reinterpret_cast<double*>(dst + num_bytes - 8) = part2; - return; - } -#endif - default: - OS::MemMove(dst, src, num_bytes); - return; - } -} - - template <typename T, typename U> inline void MemsetPointer(T** dest, U* value, int counter) { #ifdef DEBUG diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc index a65b54f674..6d4efa2261 100644 --- a/deps/v8/src/version.cc +++ b/deps/v8/src/version.cc @@ -33,9 +33,9 @@ // NOTE these macros are used by some of the tool scripts and the build // system so their names cannot be changed without changing the scripts. #define MAJOR_VERSION 3 -#define MINOR_VERSION 21 -#define BUILD_NUMBER 18 -#define PATCH_LEVEL 3 +#define MINOR_VERSION 22 +#define BUILD_NUMBER 24 +#define PATCH_LEVEL 0 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) #define IS_CANDIDATE_VERSION 0 diff --git a/deps/v8/src/win32-math.cc b/deps/v8/src/win32-math.cc index 88fa3a684b..8f6d077431 100644 --- a/deps/v8/src/win32-math.cc +++ b/deps/v8/src/win32-math.cc @@ -29,7 +29,7 @@ // refer to The Open Group Base Specification for specification of the correct // semantics for these functions. // (http://www.opengroup.org/onlinepubs/000095399/) -#ifdef _MSC_VER +#if defined(_MSC_VER) && (_MSC_VER < 1800) #include "win32-headers.h" #include <limits.h> // Required for INT_MAX etc. diff --git a/deps/v8/src/win32-math.h b/deps/v8/src/win32-math.h index 0397c7e14e..fd9312b0f5 100644 --- a/deps/v8/src/win32-math.h +++ b/deps/v8/src/win32-math.h @@ -37,6 +37,8 @@ #error Wrong environment, expected MSVC. #endif // _MSC_VER +// MSVC 2013+ provides implementations of all standard math functions. +#if (_MSC_VER < 1800) enum { FP_NAN, FP_INFINITE, @@ -58,4 +60,6 @@ int signbit(double x); } // namespace std +#endif // _MSC_VER < 1800 + #endif // V8_WIN32_MATH_H_ diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h index 07d07033e9..afac886c73 100644 --- a/deps/v8/src/x64/assembler-x64-inl.h +++ b/deps/v8/src/x64/assembler-x64-inl.h @@ -43,6 +43,7 @@ namespace internal { static const byte kCallOpcode = 0xE8; +static const int kNoCodeAgeSequenceLength = 6; void Assembler::emitl(uint32_t x) { @@ -61,11 +62,8 @@ void Assembler::emitp(void* x, RelocInfo::Mode rmode) { } -void Assembler::emitq(uint64_t x, RelocInfo::Mode rmode) { +void Assembler::emitq(uint64_t x) { Memory::uint64_at(pc_) = x; - if (!RelocInfo::IsNone(rmode)) { - RecordRelocInfo(rmode, x); - } pc_ += sizeof(uint64_t); } @@ -79,7 +77,8 @@ void Assembler::emitw(uint16_t x) { void Assembler::emit_code_target(Handle<Code> target, RelocInfo::Mode rmode, TypeFeedbackId ast_id) { - ASSERT(RelocInfo::IsCodeTarget(rmode)); + ASSERT(RelocInfo::IsCodeTarget(rmode) || + rmode == RelocInfo::CODE_AGE_SEQUENCE); if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) { RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, ast_id.ToInt()); } else { @@ -392,6 +391,13 @@ bool RelocInfo::IsPatchedDebugBreakSlotSequence() { } +Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) { + ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); + ASSERT(*pc_ == kCallOpcode); + return origin->code_target_object_handle_at(pc_ + 1); +} + + Code* RelocInfo::code_age_stub() { ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); ASSERT(*pc_ == kCallOpcode); diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc index 41bf297b38..dcb9fa5621 100644 --- a/deps/v8/src/x64/assembler-x64.cc +++ b/deps/v8/src/x64/assembler-x64.cc @@ -44,7 +44,7 @@ bool CpuFeatures::initialized_ = false; #endif uint64_t CpuFeatures::supported_ = CpuFeatures::kDefaultCpuFeatures; uint64_t CpuFeatures::found_by_runtime_probing_only_ = 0; - +uint64_t CpuFeatures::cross_compile_ = 0; ExternalReference ExternalReference::cpu_features() { ASSERT(CpuFeatures::initialized_); @@ -110,8 +110,8 @@ void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) { #endif // Patch the code. - patcher.masm()->movq(r10, target, RelocInfo::NONE64); - patcher.masm()->call(r10); + patcher.masm()->movq(kScratchRegister, target, RelocInfo::NONE64); + patcher.masm()->call(kScratchRegister); // Check that the size of the code generated is as expected. ASSERT_EQ(Assembler::kCallSequenceLength, @@ -1465,26 +1465,24 @@ void Assembler::movq(Register dst, void* value, RelocInfo::Mode rmode) { void Assembler::movq(Register dst, int64_t value, RelocInfo::Mode rmode) { // Non-relocatable values might not need a 64-bit representation. - if (RelocInfo::IsNone(rmode)) { - if (is_uint32(value)) { - movl(dst, Immediate(static_cast<int32_t>(value))); - return; - } else if (is_int32(value)) { - movq(dst, Immediate(static_cast<int32_t>(value))); - return; - } + ASSERT(RelocInfo::IsNone(rmode)); + if (is_uint32(value)) { + movl(dst, Immediate(static_cast<int32_t>(value))); + } else if (is_int32(value)) { + movq(dst, Immediate(static_cast<int32_t>(value))); + } else { // Value cannot be represented by 32 bits, so do a full 64 bit immediate // value. + EnsureSpace ensure_space(this); + emit_rex_64(dst); + emit(0xB8 | dst.low_bits()); + emitq(value); } - EnsureSpace ensure_space(this); - emit_rex_64(dst); - emit(0xB8 | dst.low_bits()); - emitq(value, rmode); } void Assembler::movq(Register dst, ExternalReference ref) { - int64_t value = reinterpret_cast<int64_t>(ref.address()); + Address value = reinterpret_cast<Address>(ref.address()); movq(dst, value, RelocInfo::EXTERNAL_REFERENCE); } @@ -1899,7 +1897,7 @@ void Assembler::shrd(Register dst, Register src) { } -void Assembler::xchg(Register dst, Register src) { +void Assembler::xchgq(Register dst, Register src) { EnsureSpace ensure_space(this); if (src.is(rax) || dst.is(rax)) { // Single-byte encoding Register other = src.is(rax) ? dst : src; @@ -1917,6 +1915,24 @@ void Assembler::xchg(Register dst, Register src) { } +void Assembler::xchgl(Register dst, Register src) { + EnsureSpace ensure_space(this); + if (src.is(rax) || dst.is(rax)) { // Single-byte encoding + Register other = src.is(rax) ? dst : src; + emit_optional_rex_32(other); + emit(0x90 | other.low_bits()); + } else if (dst.low_bits() == 4) { + emit_optional_rex_32(dst, src); + emit(0x87); + emit_modrm(dst, src); + } else { + emit_optional_rex_32(src, dst); + emit(0x87); + emit_modrm(src, dst); + } +} + + void Assembler::store_rax(void* dst, RelocInfo::Mode mode) { EnsureSpace ensure_space(this); emit(0x48); // REX.W @@ -2035,6 +2051,14 @@ void Assembler::testl(const Operand& op, Immediate mask) { } +void Assembler::testl(const Operand& op, Register reg) { + EnsureSpace ensure_space(this); + emit_optional_rex_32(reg, op); + emit(0x85); + emit_operand(reg, op); +} + + void Assembler::testq(const Operand& op, Register reg) { EnsureSpace ensure_space(this); emit_rex_64(reg, op); @@ -2058,6 +2082,10 @@ void Assembler::testq(Register dst, Register src) { void Assembler::testq(Register dst, Immediate mask) { + if (is_uint8(mask.value_)) { + testb(dst, mask); + return; + } EnsureSpace ensure_space(this); if (dst.is(rax)) { emit_rex_64(); @@ -2448,6 +2476,17 @@ void Assembler::emit_farith(int b1, int b2, int i) { } +// SSE operations. + +void Assembler::andps(XMMRegister dst, XMMRegister src) { + EnsureSpace ensure_space(this); + emit_optional_rex_32(dst, src); + emit(0x0F); + emit(0x54); + emit_sse_operand(dst, src); +} + + // SSE 2 operations. void Assembler::movd(XMMRegister dst, Register src) { @@ -2550,15 +2589,15 @@ void Assembler::movdqu(XMMRegister dst, const Operand& src) { void Assembler::extractps(Register dst, XMMRegister src, byte imm8) { - ASSERT(CpuFeatures::IsSupported(SSE4_1)); + ASSERT(IsEnabled(SSE4_1)); ASSERT(is_uint8(imm8)); EnsureSpace ensure_space(this); emit(0x66); - emit_optional_rex_32(dst, src); + emit_optional_rex_32(src, dst); emit(0x0F); emit(0x3A); emit(0x17); - emit_sse_operand(dst, src); + emit_sse_operand(src, dst); emit(imm8); } @@ -3000,8 +3039,8 @@ void Assembler::dd(uint32_t data) { void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { ASSERT(!RelocInfo::IsNone(rmode)); - // Don't record external references unless the heap will be serialized. if (rmode == RelocInfo::EXTERNAL_REFERENCE) { + // Don't record external references unless the heap will be serialized. #ifdef DEBUG if (!Serializer::enabled()) { Serializer::TooLateToEnableNow(); @@ -3010,6 +3049,9 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { if (!Serializer::enabled() && !emit_debug_code()) { return; } + } else if (rmode == RelocInfo::CODE_AGE_SEQUENCE) { + // Don't record psuedo relocation info for code age sequence mode. + return; } RelocInfo rinfo(pc_, rmode, data, NULL); reloc_info_writer.Write(&rinfo); diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h index f2e37fe863..508c622112 100644 --- a/deps/v8/src/x64/assembler-x64.h +++ b/deps/v8/src/x64/assembler-x64.h @@ -471,26 +471,45 @@ class CpuFeatures : public AllStatic { // Check whether a feature is supported by the target CPU. static bool IsSupported(CpuFeature f) { + if (Check(f, cross_compile_)) return true; ASSERT(initialized_); if (f == SSE3 && !FLAG_enable_sse3) return false; if (f == SSE4_1 && !FLAG_enable_sse4_1) return false; if (f == CMOV && !FLAG_enable_cmov) return false; if (f == SAHF && !FLAG_enable_sahf) return false; - return (supported_ & (static_cast<uint64_t>(1) << f)) != 0; + return Check(f, supported_); } static bool IsFoundByRuntimeProbingOnly(CpuFeature f) { ASSERT(initialized_); - return (found_by_runtime_probing_only_ & - (static_cast<uint64_t>(1) << f)) != 0; + return Check(f, found_by_runtime_probing_only_); } static bool IsSafeForSnapshot(CpuFeature f) { - return (IsSupported(f) && + return Check(f, cross_compile_) || + (IsSupported(f) && (!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f))); } + static bool VerifyCrossCompiling() { + return cross_compile_ == 0; + } + + static bool VerifyCrossCompiling(CpuFeature f) { + uint64_t mask = flag2set(f); + return cross_compile_ == 0 || + (cross_compile_ & mask) == mask; + } + private: + static bool Check(CpuFeature f, uint64_t set) { + return (set & flag2set(f)) != 0; + } + + static uint64_t flag2set(CpuFeature f) { + return static_cast<uint64_t>(1) << f; + } + // Safe defaults include CMOV for X64. It is always available, if // anyone checks, but they shouldn't need to check. // The required user mode extensions in X64 are (from AMD64 ABI Table A.1): @@ -503,7 +522,10 @@ class CpuFeatures : public AllStatic { static uint64_t supported_; static uint64_t found_by_runtime_probing_only_; + static uint64_t cross_compile_; + friend class ExternalReference; + friend class PlatformFeatureScope; DISALLOW_COPY_AND_ASSIGN(CpuFeatures); }; @@ -701,7 +723,6 @@ class Assembler : public AssemblerBase { // All 64-bit immediates must have a relocation mode. void movq(Register dst, void* ptr, RelocInfo::Mode rmode); void movq(Register dst, int64_t value, RelocInfo::Mode rmode); - void movq(Register dst, const char* s, RelocInfo::Mode rmode); // Moves the address of the external reference into the register. void movq(Register dst, ExternalReference ext); void movq(Register dst, Handle<Object> handle, RelocInfo::Mode rmode); @@ -734,7 +755,8 @@ class Assembler : public AssemblerBase { void cmovl(Condition cc, Register dst, const Operand& src); // Exchange two registers - void xchg(Register dst, Register src); + void xchgq(Register dst, Register src); + void xchgl(Register dst, Register src); // Arithmetics void addl(Register dst, Register src) { @@ -969,6 +991,10 @@ class Assembler : public AssemblerBase { arithmetic_op(0x09, src, dst); } + void orl(const Operand& dst, Register src) { + arithmetic_op_32(0x09, src, dst); + } + void or_(Register dst, Immediate src) { immediate_arithmetic_op(0x1, dst, src); } @@ -994,6 +1020,10 @@ class Assembler : public AssemblerBase { shift(dst, imm8, 0x0); } + void roll(Register dst, Immediate imm8) { + shift_32(dst, imm8, 0x0); + } + void rcr(Register dst, Immediate imm8) { shift(dst, imm8, 0x3); } @@ -1101,6 +1131,10 @@ class Assembler : public AssemblerBase { arithmetic_op_32(0x2B, dst, src); } + void subl(const Operand& dst, Register src) { + arithmetic_op_32(0x29, src, dst); + } + void subl(const Operand& dst, Immediate src) { immediate_arithmetic_op_32(0x5, dst, src); } @@ -1119,6 +1153,7 @@ class Assembler : public AssemblerBase { void testb(const Operand& op, Register reg); void testl(Register dst, Register src); void testl(Register reg, Immediate mask); + void testl(const Operand& op, Register reg); void testl(const Operand& op, Immediate mask); void testq(const Operand& op, Register reg); void testq(Register dst, Register src); @@ -1144,6 +1179,10 @@ class Assembler : public AssemblerBase { immediate_arithmetic_op_32(0x6, dst, src); } + void xorl(const Operand& dst, Register src) { + arithmetic_op_32(0x31, src, dst); + } + void xorl(const Operand& dst, Immediate src) { immediate_arithmetic_op_32(0x6, dst, src); } @@ -1307,13 +1346,26 @@ class Assembler : public AssemblerBase { void sahf(); + // SSE instructions + void movaps(XMMRegister dst, XMMRegister src); + void movss(XMMRegister dst, const Operand& src); + void movss(const Operand& dst, XMMRegister src); + + void cvttss2si(Register dst, const Operand& src); + void cvttss2si(Register dst, XMMRegister src); + void cvtlsi2ss(XMMRegister dst, Register src); + + void xorps(XMMRegister dst, XMMRegister src); + void andps(XMMRegister dst, XMMRegister src); + + void movmskps(Register dst, XMMRegister src); + // SSE2 instructions void movd(XMMRegister dst, Register src); void movd(Register dst, XMMRegister src); void movq(XMMRegister dst, Register src); void movq(Register dst, XMMRegister src); void movq(XMMRegister dst, XMMRegister src); - void extractps(Register dst, XMMRegister src, byte imm8); // Don't use this unless it's important to keep the // top half of the destination register unchanged. @@ -1331,13 +1383,7 @@ class Assembler : public AssemblerBase { void movdqu(XMMRegister dst, const Operand& src); void movapd(XMMRegister dst, XMMRegister src); - void movaps(XMMRegister dst, XMMRegister src); - - void movss(XMMRegister dst, const Operand& src); - void movss(const Operand& dst, XMMRegister src); - void cvttss2si(Register dst, const Operand& src); - void cvttss2si(Register dst, XMMRegister src); void cvttsd2si(Register dst, const Operand& src); void cvttsd2si(Register dst, XMMRegister src); void cvttsd2siq(Register dst, XMMRegister src); @@ -1347,7 +1393,6 @@ class Assembler : public AssemblerBase { void cvtqsi2sd(XMMRegister dst, const Operand& src); void cvtqsi2sd(XMMRegister dst, Register src); - void cvtlsi2ss(XMMRegister dst, Register src); void cvtss2sd(XMMRegister dst, XMMRegister src); void cvtss2sd(XMMRegister dst, const Operand& src); @@ -1366,11 +1411,16 @@ class Assembler : public AssemblerBase { void andpd(XMMRegister dst, XMMRegister src); void orpd(XMMRegister dst, XMMRegister src); void xorpd(XMMRegister dst, XMMRegister src); - void xorps(XMMRegister dst, XMMRegister src); void sqrtsd(XMMRegister dst, XMMRegister src); void ucomisd(XMMRegister dst, XMMRegister src); void ucomisd(XMMRegister dst, const Operand& src); + void cmpltsd(XMMRegister dst, XMMRegister src); + + void movmskpd(Register dst, XMMRegister src); + + // SSE 4.1 instruction + void extractps(Register dst, XMMRegister src, byte imm8); enum RoundingMode { kRoundToNearest = 0x0, @@ -1381,17 +1431,6 @@ class Assembler : public AssemblerBase { void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode); - void movmskpd(Register dst, XMMRegister src); - void movmskps(Register dst, XMMRegister src); - - void cmpltsd(XMMRegister dst, XMMRegister src); - - // The first argument is the reg field, the second argument is the r/m field. - void emit_sse_operand(XMMRegister dst, XMMRegister src); - void emit_sse_operand(XMMRegister reg, const Operand& adr); - void emit_sse_operand(XMMRegister dst, Register src); - void emit_sse_operand(Register dst, XMMRegister src); - // Debugging void Print(); @@ -1452,7 +1491,7 @@ class Assembler : public AssemblerBase { void emit(byte x) { *pc_++ = x; } inline void emitl(uint32_t x); inline void emitp(void* x, RelocInfo::Mode rmode); - inline void emitq(uint64_t x, RelocInfo::Mode rmode); + inline void emitq(uint64_t x); inline void emitw(uint16_t x); inline void emit_code_target(Handle<Code> target, RelocInfo::Mode rmode, @@ -1572,6 +1611,12 @@ class Assembler : public AssemblerBase { // Emit the code-object-relative offset of the label's position inline void emit_code_relative_offset(Label* label); + // The first argument is the reg field, the second argument is the r/m field. + void emit_sse_operand(XMMRegister dst, XMMRegister src); + void emit_sse_operand(XMMRegister reg, const Operand& adr); + void emit_sse_operand(XMMRegister dst, Register src); + void emit_sse_operand(Register dst, XMMRegister src); + // Emit machine code for one of the operations ADD, ADC, SUB, SBC, // AND, OR, XOR, or CMP. The encodings of these operations are all // similar, differing just in the opcode or in the reg field of the diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc index 81721c25e1..f65b25c652 100644 --- a/deps/v8/src/x64/builtins-x64.cc +++ b/deps/v8/src/x64/builtins-x64.cc @@ -600,6 +600,8 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) { // the stub returns. __ subq(Operand(rsp, 0), Immediate(5)); __ Pushad(); + __ movq(arg_reg_2, + ExternalReference::isolate_address(masm->isolate())); __ movq(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize)); { // NOLINT FrameScope scope(masm, StackFrame::MANUAL); @@ -625,6 +627,42 @@ CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR) #undef DEFINE_CODE_AGE_BUILTIN_GENERATOR +void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) { + // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact + // that make_code_young doesn't do any garbage collection which allows us to + // save/restore the registers without worrying about which of them contain + // pointers. + __ Pushad(); + __ movq(arg_reg_2, ExternalReference::isolate_address(masm->isolate())); + __ movq(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize)); + __ subq(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength)); + { // NOLINT + FrameScope scope(masm, StackFrame::MANUAL); + __ PrepareCallCFunction(1); + __ CallCFunction( + ExternalReference::get_mark_code_as_executed_function(masm->isolate()), + 1); + } + __ Popad(); + + // Perform prologue operations usually performed by the young code stub. + __ PopReturnAddressTo(kScratchRegister); + __ push(rbp); // Caller's frame pointer. + __ movq(rbp, rsp); + __ push(rsi); // Callee's context. + __ push(rdi); // Callee's JS Function. + __ PushReturnAddressFrom(kScratchRegister); + + // Jump to point after the code-age stub. + __ ret(0); +} + + +void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) { + GenerateMakeCodeYoungAgainCommon(masm); +} + + void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { // Enter an internal frame. { @@ -658,17 +696,17 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, } // Get the full codegen state from the stack and untag it. - __ SmiToInteger32(r10, Operand(rsp, kPCOnStackSize)); + __ SmiToInteger32(kScratchRegister, Operand(rsp, kPCOnStackSize)); // Switch on the state. Label not_no_registers, not_tos_rax; - __ cmpq(r10, Immediate(FullCodeGenerator::NO_REGISTERS)); + __ cmpq(kScratchRegister, Immediate(FullCodeGenerator::NO_REGISTERS)); __ j(not_equal, ¬_no_registers, Label::kNear); __ ret(1 * kPointerSize); // Remove state. __ bind(¬_no_registers); __ movq(rax, Operand(rsp, kPCOnStackSize + kPointerSize)); - __ cmpq(r10, Immediate(FullCodeGenerator::TOS_REG)); + __ cmpq(kScratchRegister, Immediate(FullCodeGenerator::TOS_REG)); __ j(not_equal, ¬_tos_rax, Label::kNear); __ ret(2 * kPointerSize); // Remove state, rax. @@ -692,21 +730,6 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) { } -void Builtins::Generate_NotifyOSR(MacroAssembler* masm) { - // For now, we are relying on the fact that Runtime::NotifyOSR - // doesn't do any garbage collection which allows us to save/restore - // the registers without worrying about which of them contain - // pointers. This seems a bit fragile. - __ Pushad(); - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ CallRuntime(Runtime::kNotifyOSR, 0); - } - __ Popad(); - __ ret(0); -} - - void Builtins::Generate_FunctionCall(MacroAssembler* masm) { // Stack Layout: // rsp[0] : Return address @@ -894,9 +917,9 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { // rbp[16] : function arguments // rbp[24] : receiver // rbp[32] : function - static const int kArgumentsOffset = 2 * kPointerSize; - static const int kReceiverOffset = 3 * kPointerSize; - static const int kFunctionOffset = 4 * kPointerSize; + static const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize; + static const int kReceiverOffset = kArgumentsOffset + kPointerSize; + static const int kFunctionOffset = kReceiverOffset + kPointerSize; __ push(Operand(rbp, kFunctionOffset)); __ push(Operand(rbp, kArgumentsOffset)); @@ -1140,13 +1163,11 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { // Lookup the argument in the number to string cache. Label not_cached, argument_is_string; - NumberToStringStub::GenerateLookupNumberStringCache( - masm, - rax, // Input. - rbx, // Result. - rcx, // Scratch 1. - rdx, // Scratch 2. - ¬_cached); + __ LookupNumberStringCache(rax, // Input. + rbx, // Result. + rcx, // Scratch 1. + rdx, // Scratch 2. + ¬_cached); __ IncrementCounter(counters->string_ctor_cached_number(), 1); __ bind(&argument_is_string); @@ -1401,6 +1422,23 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { } +void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) { + // We check the stack limit as indicator that recompilation might be done. + Label ok; + __ CompareRoot(rsp, Heap::kStackLimitRootIndex); + __ j(above_equal, &ok); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ CallRuntime(Runtime::kStackGuard, 0); + } + __ jmp(masm->isolate()->builtins()->OnStackReplacement(), + RelocInfo::CODE_TARGET); + + __ bind(&ok); + __ ret(0); +} + + #undef __ } } // namespace v8::internal diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc index 51e1a5395c..b3ab8c1e75 100644 --- a/deps/v8/src/x64/code-stubs-x64.cc +++ b/deps/v8/src/x64/code-stubs-x64.cc @@ -60,6 +60,17 @@ void ToNumberStub::InitializeInterfaceDescriptor( } +void NumberToStringStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + static Register registers[] = { rax }; + descriptor->register_param_count_ = 1; + descriptor->register_params_ = registers; + descriptor->deoptimization_handler_ = + Runtime::FunctionForId(Runtime::kNumberToString)->entry; +} + + void FastCloneShallowArrayStub::InitializeInterfaceDescriptor( Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { @@ -78,7 +89,7 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor( descriptor->register_param_count_ = 4; descriptor->register_params_ = registers; descriptor->deoptimization_handler_ = - Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry; + Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry; } @@ -145,6 +156,18 @@ void TransitionElementsKindStub::InitializeInterfaceDescriptor( } +void BinaryOpStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + static Register registers[] = { rdx, rax }; + descriptor->register_param_count_ = 2; + descriptor->register_params_ = registers; + descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss); + descriptor->SetMissHandler( + ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate)); +} + + static void InitializeArrayConstructorDescriptor( Isolate* isolate, CodeStubInterfaceDescriptor* descriptor, @@ -157,7 +180,7 @@ static void InitializeArrayConstructorDescriptor( descriptor->register_param_count_ = 2; if (constant_stack_parameter_count != 0) { // stack param count needs (constructor pointer, and single argument) - descriptor->stack_parameter_count_ = &rax; + descriptor->stack_parameter_count_ = rax; } descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count; descriptor->register_params_ = registers; @@ -179,7 +202,7 @@ static void InitializeInternalArrayConstructorDescriptor( if (constant_stack_parameter_count != 0) { // stack param count needs (constructor pointer, and single argument) - descriptor->stack_parameter_count_ = &rax; + descriptor->stack_parameter_count_ = rax; } descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count; descriptor->register_params_ = registers; @@ -437,35 +460,8 @@ class FloatingPointHelper : public AllStatic { // If the operands are not both numbers, jump to not_numbers. // Leaves rdx and rax unchanged. SmiOperands assumes both are smis. // NumberOperands assumes both are smis or heap numbers. - static void LoadSSE2SmiOperands(MacroAssembler* masm); static void LoadSSE2UnknownOperands(MacroAssembler* masm, Label* not_numbers); - - // Takes the operands in rdx and rax and loads them as integers in rax - // and rcx. - static void LoadAsIntegers(MacroAssembler* masm, - Label* operand_conversion_failure, - Register heap_number_map); - - // Tries to convert two values to smis losslessly. - // This fails if either argument is not a Smi nor a HeapNumber, - // or if it's a HeapNumber with a value that can't be converted - // losslessly to a Smi. In that case, control transitions to the - // on_not_smis label. - // On success, either control goes to the on_success label (if one is - // provided), or it falls through at the end of the code (if on_success - // is NULL). - // On success, both first and second holds Smi tagged values. - // One of first or second must be non-Smi when entering. - static void NumbersToSmis(MacroAssembler* masm, - Register first, - Register second, - Register scratch1, - Register scratch2, - Register scratch3, - Label* on_success, - Label* on_not_smis, - ConvertUndefined convert_undefined); }; @@ -553,569 +549,6 @@ void DoubleToIStub::Generate(MacroAssembler* masm) { } -void BinaryOpStub::Initialize() {} - - -void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { - __ PopReturnAddressTo(rcx); - __ push(rdx); - __ push(rax); - // Left and right arguments are now on top. - __ Push(Smi::FromInt(MinorKey())); - - __ PushReturnAddressFrom(rcx); - - // Patch the caller to an appropriate specialized stub and return the - // operation result to the caller of the stub. - __ TailCallExternalReference( - ExternalReference(IC_Utility(IC::kBinaryOp_Patch), - masm->isolate()), - 3, - 1); -} - - -static void BinaryOpStub_GenerateSmiCode( - MacroAssembler* masm, - Label* slow, - BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results, - Token::Value op) { - - // Arguments to BinaryOpStub are in rdx and rax. - const Register left = rdx; - const Register right = rax; - - // We only generate heapnumber answers for overflowing calculations - // for the four basic arithmetic operations and logical right shift by 0. - bool generate_inline_heapnumber_results = - (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) && - (op == Token::ADD || op == Token::SUB || - op == Token::MUL || op == Token::DIV || op == Token::SHR); - - // Smi check of both operands. If op is BIT_OR, the check is delayed - // until after the OR operation. - Label not_smis; - Label use_fp_on_smis; - Label fail; - - if (op != Token::BIT_OR) { - Comment smi_check_comment(masm, "-- Smi check arguments"); - __ JumpIfNotBothSmi(left, right, ¬_smis); - } - - Label smi_values; - __ bind(&smi_values); - // Perform the operation. - Comment perform_smi(masm, "-- Perform smi operation"); - switch (op) { - case Token::ADD: - ASSERT(right.is(rax)); - __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative. - break; - - case Token::SUB: - __ SmiSub(left, left, right, &use_fp_on_smis); - __ movq(rax, left); - break; - - case Token::MUL: - ASSERT(right.is(rax)); - __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative. - break; - - case Token::DIV: - // SmiDiv will not accept left in rdx or right in rax. - __ movq(rbx, rax); - __ movq(rcx, rdx); - __ SmiDiv(rax, rcx, rbx, &use_fp_on_smis); - break; - - case Token::MOD: - // SmiMod will not accept left in rdx or right in rax. - __ movq(rbx, rax); - __ movq(rcx, rdx); - __ SmiMod(rax, rcx, rbx, &use_fp_on_smis); - break; - - case Token::BIT_OR: { - ASSERT(right.is(rax)); - __ SmiOrIfSmis(right, right, left, ¬_smis); // BIT_OR is commutative. - break; - } - case Token::BIT_XOR: - ASSERT(right.is(rax)); - __ SmiXor(right, right, left); // BIT_XOR is commutative. - break; - - case Token::BIT_AND: - ASSERT(right.is(rax)); - __ SmiAnd(right, right, left); // BIT_AND is commutative. - break; - - case Token::SHL: - __ SmiShiftLeft(left, left, right); - __ movq(rax, left); - break; - - case Token::SAR: - __ SmiShiftArithmeticRight(left, left, right); - __ movq(rax, left); - break; - - case Token::SHR: - __ SmiShiftLogicalRight(left, left, right, &use_fp_on_smis); - __ movq(rax, left); - break; - - default: - UNREACHABLE(); - } - - // 5. Emit return of result in rax. Some operations have registers pushed. - __ ret(0); - - if (use_fp_on_smis.is_linked()) { - // 6. For some operations emit inline code to perform floating point - // operations on known smis (e.g., if the result of the operation - // overflowed the smi range). - __ bind(&use_fp_on_smis); - if (op == Token::DIV || op == Token::MOD) { - // Restore left and right to rdx and rax. - __ movq(rdx, rcx); - __ movq(rax, rbx); - } - - if (generate_inline_heapnumber_results) { - __ AllocateHeapNumber(rcx, rbx, slow); - Comment perform_float(masm, "-- Perform float operation on smis"); - if (op == Token::SHR) { - __ SmiToInteger32(left, left); - __ cvtqsi2sd(xmm0, left); - } else { - FloatingPointHelper::LoadSSE2SmiOperands(masm); - switch (op) { - case Token::ADD: __ addsd(xmm0, xmm1); break; - case Token::SUB: __ subsd(xmm0, xmm1); break; - case Token::MUL: __ mulsd(xmm0, xmm1); break; - case Token::DIV: __ divsd(xmm0, xmm1); break; - default: UNREACHABLE(); - } - } - __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0); - __ movq(rax, rcx); - __ ret(0); - } else { - __ jmp(&fail); - } - } - - // 7. Non-smi operands reach the end of the code generated by - // GenerateSmiCode, and fall through to subsequent code, - // with the operands in rdx and rax. - // But first we check if non-smi values are HeapNumbers holding - // values that could be smi. - __ bind(¬_smis); - Comment done_comment(masm, "-- Enter non-smi code"); - FloatingPointHelper::ConvertUndefined convert_undefined = - FloatingPointHelper::BAILOUT_ON_UNDEFINED; - // This list must be in sync with BinaryOpPatch() behavior in ic.cc. - if (op == Token::BIT_AND || - op == Token::BIT_OR || - op == Token::BIT_XOR || - op == Token::SAR || - op == Token::SHL || - op == Token::SHR) { - convert_undefined = FloatingPointHelper::CONVERT_UNDEFINED_TO_ZERO; - } - FloatingPointHelper::NumbersToSmis(masm, left, right, rbx, rdi, rcx, - &smi_values, &fail, convert_undefined); - __ jmp(&smi_values); - __ bind(&fail); -} - - -static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, - Label* alloc_failure, - OverwriteMode mode); - - -static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm, - Label* allocation_failure, - Label* non_numeric_failure, - Token::Value op, - OverwriteMode mode) { - switch (op) { - case Token::ADD: - case Token::SUB: - case Token::MUL: - case Token::DIV: { - FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure); - - switch (op) { - case Token::ADD: __ addsd(xmm0, xmm1); break; - case Token::SUB: __ subsd(xmm0, xmm1); break; - case Token::MUL: __ mulsd(xmm0, xmm1); break; - case Token::DIV: __ divsd(xmm0, xmm1); break; - default: UNREACHABLE(); - } - BinaryOpStub_GenerateHeapResultAllocation( - masm, allocation_failure, mode); - __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); - __ ret(0); - break; - } - case Token::MOD: { - // For MOD we jump to the allocation_failure label, to call runtime. - __ jmp(allocation_failure); - break; - } - case Token::BIT_OR: - case Token::BIT_AND: - case Token::BIT_XOR: - case Token::SAR: - case Token::SHL: - case Token::SHR: { - Label non_smi_shr_result; - Register heap_number_map = r9; - __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); - FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure, - heap_number_map); - switch (op) { - case Token::BIT_OR: __ orl(rax, rcx); break; - case Token::BIT_AND: __ andl(rax, rcx); break; - case Token::BIT_XOR: __ xorl(rax, rcx); break; - case Token::SAR: __ sarl_cl(rax); break; - case Token::SHL: __ shll_cl(rax); break; - case Token::SHR: { - __ shrl_cl(rax); - // Check if result is negative. This can only happen for a shift - // by zero. - __ testl(rax, rax); - __ j(negative, &non_smi_shr_result); - break; - } - default: UNREACHABLE(); - } - STATIC_ASSERT(kSmiValueSize == 32); - // Tag smi result and return. - __ Integer32ToSmi(rax, rax); - __ Ret(); - - // Logical shift right can produce an unsigned int32 that is not - // an int32, and so is not in the smi range. Allocate a heap number - // in that case. - if (op == Token::SHR) { - __ bind(&non_smi_shr_result); - Label allocation_failed; - __ movl(rbx, rax); // rbx holds result value (uint32 value as int64). - // Allocate heap number in new space. - // Not using AllocateHeapNumber macro in order to reuse - // already loaded heap_number_map. - __ Allocate(HeapNumber::kSize, rax, rdx, no_reg, &allocation_failed, - TAG_OBJECT); - // Set the map. - __ AssertRootValue(heap_number_map, - Heap::kHeapNumberMapRootIndex, - kHeapNumberMapRegisterClobbered); - __ movq(FieldOperand(rax, HeapObject::kMapOffset), - heap_number_map); - __ cvtqsi2sd(xmm0, rbx); - __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); - __ Ret(); - - __ bind(&allocation_failed); - // We need tagged values in rdx and rax for the following code, - // not int32 in rax and rcx. - __ Integer32ToSmi(rax, rcx); - __ Integer32ToSmi(rdx, rbx); - __ jmp(allocation_failure); - } - break; - } - default: UNREACHABLE(); break; - } - // No fall-through from this generated code. - if (FLAG_debug_code) { - __ Abort(kUnexpectedFallThroughInBinaryStubGenerateFloatingPointCode); - } -} - - -static void BinaryOpStub_GenerateRegisterArgsPushUnderReturn( - MacroAssembler* masm) { - // Push arguments, but ensure they are under the return address - // for a tail call. - __ PopReturnAddressTo(rcx); - __ push(rdx); - __ push(rax); - __ PushReturnAddressFrom(rcx); -} - - -void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { - ASSERT(op_ == Token::ADD); - Label left_not_string, call_runtime; - - // Registers containing left and right operands respectively. - Register left = rdx; - Register right = rax; - - // Test if left operand is a string. - __ JumpIfSmi(left, &left_not_string, Label::kNear); - __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx); - __ j(above_equal, &left_not_string, Label::kNear); - StringAddStub string_add_left_stub( - (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME)); - BinaryOpStub_GenerateRegisterArgsPushUnderReturn(masm); - __ TailCallStub(&string_add_left_stub); - - // Left operand is not a string, test right. - __ bind(&left_not_string); - __ JumpIfSmi(right, &call_runtime, Label::kNear); - __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx); - __ j(above_equal, &call_runtime, Label::kNear); - - StringAddStub string_add_right_stub( - (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME)); - BinaryOpStub_GenerateRegisterArgsPushUnderReturn(masm); - __ TailCallStub(&string_add_right_stub); - - // Neither argument is a string. - __ bind(&call_runtime); -} - - -void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { - Label right_arg_changed, call_runtime; - - if (op_ == Token::MOD && encoded_right_arg_.has_value) { - // It is guaranteed that the value will fit into a Smi, because if it - // didn't, we wouldn't be here, see BinaryOp_Patch. - __ Cmp(rax, Smi::FromInt(fixed_right_arg_value())); - __ j(not_equal, &right_arg_changed); - } - - if (result_type_ == BinaryOpIC::UNINITIALIZED || - result_type_ == BinaryOpIC::SMI) { - // Only allow smi results. - BinaryOpStub_GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS, op_); - } else { - // Allow heap number result and don't make a transition if a heap number - // cannot be allocated. - BinaryOpStub_GenerateSmiCode( - masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_); - } - - // Code falls through if the result is not returned as either a smi or heap - // number. - __ bind(&right_arg_changed); - GenerateTypeTransition(masm); - - if (call_runtime.is_linked()) { - __ bind(&call_runtime); - { - FrameScope scope(masm, StackFrame::INTERNAL); - GenerateRegisterArgsPush(masm); - GenerateCallRuntime(masm); - } - __ Ret(); - } -} - - -void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { - // The int32 case is identical to the Smi case. We avoid creating this - // ic state on x64. - UNREACHABLE(); -} - - -void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { - Label call_runtime; - ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING); - ASSERT(op_ == Token::ADD); - // If both arguments are strings, call the string add stub. - // Otherwise, do a transition. - - // Registers containing left and right operands respectively. - Register left = rdx; - Register right = rax; - - // Test if left operand is a string. - __ JumpIfSmi(left, &call_runtime); - __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx); - __ j(above_equal, &call_runtime); - - // Test if right operand is a string. - __ JumpIfSmi(right, &call_runtime); - __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx); - __ j(above_equal, &call_runtime); - - StringAddStub string_add_stub( - (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME)); - BinaryOpStub_GenerateRegisterArgsPushUnderReturn(masm); - __ TailCallStub(&string_add_stub); - - __ bind(&call_runtime); - GenerateTypeTransition(masm); -} - - -void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { - Label call_runtime; - - if (op_ == Token::ADD) { - // Handle string addition here, because it is the only operation - // that does not do a ToNumber conversion on the operands. - GenerateAddStrings(masm); - } - - // Convert oddball arguments to numbers. - Label check, done; - __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex); - __ j(not_equal, &check, Label::kNear); - if (Token::IsBitOp(op_)) { - __ xor_(rdx, rdx); - } else { - __ LoadRoot(rdx, Heap::kNanValueRootIndex); - } - __ jmp(&done, Label::kNear); - __ bind(&check); - __ CompareRoot(rax, Heap::kUndefinedValueRootIndex); - __ j(not_equal, &done, Label::kNear); - if (Token::IsBitOp(op_)) { - __ xor_(rax, rax); - } else { - __ LoadRoot(rax, Heap::kNanValueRootIndex); - } - __ bind(&done); - - GenerateNumberStub(masm); -} - - -static void BinaryOpStub_CheckSmiInput(MacroAssembler* masm, - Register input, - Label* fail) { - Label ok; - __ JumpIfSmi(input, &ok, Label::kNear); - Register heap_number_map = r8; - Register scratch1 = r9; - Register scratch2 = r10; - // HeapNumbers containing 32bit integer values are also allowed. - __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); - __ cmpq(FieldOperand(input, HeapObject::kMapOffset), heap_number_map); - __ j(not_equal, fail); - __ movsd(xmm0, FieldOperand(input, HeapNumber::kValueOffset)); - // Convert, convert back, and compare the two doubles' bits. - __ cvttsd2siq(scratch2, xmm0); - __ cvtlsi2sd(xmm1, scratch2); - __ movq(scratch1, xmm0); - __ movq(scratch2, xmm1); - __ cmpq(scratch1, scratch2); - __ j(not_equal, fail); - __ bind(&ok); -} - - -void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) { - Label gc_required, not_number; - - // It could be that only SMIs have been seen at either the left - // or the right operand. For precise type feedback, patch the IC - // again if this changes. - if (left_type_ == BinaryOpIC::SMI) { - BinaryOpStub_CheckSmiInput(masm, rdx, ¬_number); - } - if (right_type_ == BinaryOpIC::SMI) { - BinaryOpStub_CheckSmiInput(masm, rax, ¬_number); - } - - BinaryOpStub_GenerateFloatingPointCode( - masm, &gc_required, ¬_number, op_, mode_); - - __ bind(¬_number); - GenerateTypeTransition(masm); - - __ bind(&gc_required); - { - FrameScope scope(masm, StackFrame::INTERNAL); - GenerateRegisterArgsPush(masm); - GenerateCallRuntime(masm); - } - __ Ret(); -} - - -void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { - Label call_runtime, call_string_add_or_runtime; - - BinaryOpStub_GenerateSmiCode( - masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_); - - BinaryOpStub_GenerateFloatingPointCode( - masm, &call_runtime, &call_string_add_or_runtime, op_, mode_); - - __ bind(&call_string_add_or_runtime); - if (op_ == Token::ADD) { - GenerateAddStrings(masm); - } - - __ bind(&call_runtime); - { - FrameScope scope(masm, StackFrame::INTERNAL); - GenerateRegisterArgsPush(masm); - GenerateCallRuntime(masm); - } - __ Ret(); -} - - -static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, - Label* alloc_failure, - OverwriteMode mode) { - Label skip_allocation; - switch (mode) { - case OVERWRITE_LEFT: { - // If the argument in rdx is already an object, we skip the - // allocation of a heap number. - __ JumpIfNotSmi(rdx, &skip_allocation); - // Allocate a heap number for the result. Keep rax and rdx intact - // for the possible runtime call. - __ AllocateHeapNumber(rbx, rcx, alloc_failure); - // Now rdx can be overwritten losing one of the arguments as we are - // now done and will not need it any more. - __ movq(rdx, rbx); - __ bind(&skip_allocation); - // Use object in rdx as a result holder - __ movq(rax, rdx); - break; - } - case OVERWRITE_RIGHT: - // If the argument in rax is already an object, we skip the - // allocation of a heap number. - __ JumpIfNotSmi(rax, &skip_allocation); - // Fall through! - case NO_OVERWRITE: - // Allocate a heap number for the result. Keep rax and rdx intact - // for the possible runtime call. - __ AllocateHeapNumber(rbx, rcx, alloc_failure); - // Now rax can be overwritten losing one of the arguments as we are - // now done and will not need it any more. - __ movq(rax, rbx); - __ bind(&skip_allocation); - break; - default: UNREACHABLE(); - } -} - - -void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { - __ push(rdx); - __ push(rax); -} - - void TranscendentalCacheStub::Generate(MacroAssembler* masm) { // TAGGED case: // Input: @@ -1145,7 +578,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { // Then load the bits of the double into rbx. __ SmiToInteger32(rax, rax); __ subq(rsp, Immediate(kDoubleSize)); - __ cvtlsi2sd(xmm1, rax); + __ Cvtlsi2sd(xmm1, rax); __ movsd(Operand(rsp, 0), xmm1); __ movq(rbx, xmm1); __ movq(rdx, xmm1); @@ -1161,7 +594,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { // Input is a HeapNumber. Push it on the FPU stack and load its // bits into rbx. __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset)); - __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset)); + __ MoveDouble(rbx, FieldOperand(rax, HeapNumber::kValueOffset)); __ movq(rdx, rbx); __ bind(&loaded); @@ -1422,67 +855,6 @@ void TranscendentalCacheStub::GenerateOperation( } -// Input: rdx, rax are the left and right objects of a bit op. -// Output: rax, rcx are left and right integers for a bit op. -// Jump to conversion_failure: rdx and rax are unchanged. -void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm, - Label* conversion_failure, - Register heap_number_map) { - // Check float operands. - Label arg1_is_object, check_undefined_arg1; - Label arg2_is_object, check_undefined_arg2; - Label load_arg2, done; - - __ JumpIfNotSmi(rdx, &arg1_is_object); - __ SmiToInteger32(r8, rdx); - __ jmp(&load_arg2); - - // If the argument is undefined it converts to zero (ECMA-262, section 9.5). - __ bind(&check_undefined_arg1); - __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex); - __ j(not_equal, conversion_failure); - __ Set(r8, 0); - __ jmp(&load_arg2); - - __ bind(&arg1_is_object); - __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map); - __ j(not_equal, &check_undefined_arg1); - // Get the untagged integer version of the rdx heap number in r8. - __ TruncateHeapNumberToI(r8, rdx); - - // Here r8 has the untagged integer, rax has a Smi or a heap number. - __ bind(&load_arg2); - // Test if arg2 is a Smi. - __ JumpIfNotSmi(rax, &arg2_is_object); - __ SmiToInteger32(rcx, rax); - __ jmp(&done); - - // If the argument is undefined it converts to zero (ECMA-262, section 9.5). - __ bind(&check_undefined_arg2); - __ CompareRoot(rax, Heap::kUndefinedValueRootIndex); - __ j(not_equal, conversion_failure); - __ Set(rcx, 0); - __ jmp(&done); - - __ bind(&arg2_is_object); - __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map); - __ j(not_equal, &check_undefined_arg2); - // Get the untagged integer version of the rax heap number in rcx. - __ TruncateHeapNumberToI(rcx, rax); - - __ bind(&done); - __ movl(rax, r8); -} - - -void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) { - __ SmiToInteger32(kScratchRegister, rdx); - __ cvtlsi2sd(xmm0, kScratchRegister); - __ SmiToInteger32(kScratchRegister, rax); - __ cvtlsi2sd(xmm1, kScratchRegister); -} - - void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm, Label* not_numbers) { Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done; @@ -1503,89 +875,12 @@ void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm, __ bind(&load_smi_rdx); __ SmiToInteger32(kScratchRegister, rdx); - __ cvtlsi2sd(xmm0, kScratchRegister); + __ Cvtlsi2sd(xmm0, kScratchRegister); __ JumpIfNotSmi(rax, &load_nonsmi_rax); __ bind(&load_smi_rax); __ SmiToInteger32(kScratchRegister, rax); - __ cvtlsi2sd(xmm1, kScratchRegister); - __ bind(&done); -} - - -void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm, - Register first, - Register second, - Register scratch1, - Register scratch2, - Register scratch3, - Label* on_success, - Label* on_not_smis, - ConvertUndefined convert_undefined) { - Register heap_number_map = scratch3; - Register smi_result = scratch1; - Label done, maybe_undefined_first, maybe_undefined_second, first_done; - - __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); - - Label first_smi; - __ JumpIfSmi(first, &first_smi, Label::kNear); - __ cmpq(FieldOperand(first, HeapObject::kMapOffset), heap_number_map); - __ j(not_equal, - (convert_undefined == CONVERT_UNDEFINED_TO_ZERO) - ? &maybe_undefined_first - : on_not_smis); - // Convert HeapNumber to smi if possible. - __ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset)); - __ movq(scratch2, xmm0); - __ cvttsd2siq(smi_result, xmm0); - // Check if conversion was successful by converting back and - // comparing to the original double's bits. - __ cvtlsi2sd(xmm1, smi_result); - __ movq(kScratchRegister, xmm1); - __ cmpq(scratch2, kScratchRegister); - __ j(not_equal, on_not_smis); - __ Integer32ToSmi(first, smi_result); - - __ bind(&first_done); - __ JumpIfSmi(second, (on_success != NULL) ? on_success : &done); - __ bind(&first_smi); - __ AssertNotSmi(second); - __ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map); - __ j(not_equal, - (convert_undefined == CONVERT_UNDEFINED_TO_ZERO) - ? &maybe_undefined_second - : on_not_smis); - // Convert second to smi, if possible. - __ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset)); - __ movq(scratch2, xmm0); - __ cvttsd2siq(smi_result, xmm0); - __ cvtlsi2sd(xmm1, smi_result); - __ movq(kScratchRegister, xmm1); - __ cmpq(scratch2, kScratchRegister); - __ j(not_equal, on_not_smis); - __ Integer32ToSmi(second, smi_result); - if (on_success != NULL) { - __ jmp(on_success); - } else { - __ jmp(&done); - } - - __ bind(&maybe_undefined_first); - __ CompareRoot(first, Heap::kUndefinedValueRootIndex); - __ j(not_equal, on_not_smis); - __ xor_(first, first); - __ jmp(&first_done); - - __ bind(&maybe_undefined_second); - __ CompareRoot(second, Heap::kUndefinedValueRootIndex); - __ j(not_equal, on_not_smis); - __ xor_(second, second); - if (on_success != NULL) { - __ jmp(on_success); - } - // Else: fall through. - + __ Cvtlsi2sd(xmm1, kScratchRegister); __ bind(&done); } @@ -1603,7 +898,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { // Save 1 in double_result - we need this several times later on. __ movq(scratch, Immediate(1)); - __ cvtlsi2sd(double_result, scratch); + __ Cvtlsi2sd(double_result, scratch); if (exponent_type_ == ON_STACK) { Label base_is_smi, unpack_exponent; @@ -1623,7 +918,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { __ bind(&base_is_smi); __ SmiToInteger32(base, base); - __ cvtlsi2sd(double_base, base); + __ Cvtlsi2sd(double_base, base); __ bind(&unpack_exponent); __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear); @@ -1812,7 +1107,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { // and may not have contained the exponent value in the first place when the // input was a smi. We reset it with exponent value before bailing out. __ j(not_equal, &done); - __ cvtlsi2sd(double_exponent, exponent); + __ Cvtlsi2sd(double_exponent, exponent); // Returning or bailing out. Counters* counters = masm->isolate()->counters(); @@ -1902,8 +1197,7 @@ void StringLengthStub::Generate(MacroAssembler* masm) { receiver = rax; } - StubCompiler::GenerateLoadStringLength(masm, receiver, r8, r9, &miss, - support_wrapper_); + StubCompiler::GenerateLoadStringLength(masm, receiver, r8, r9, &miss); __ bind(&miss); StubCompiler::TailCallBuiltin( masm, BaseLoadStoreStubCompiler::MissBuiltin(kind())); @@ -1977,11 +1271,6 @@ void StoreArrayLengthStub::Generate(MacroAssembler* masm) { void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { // The key is in rdx and the parameter count is in rax. - // The displacement is used for skipping the frame pointer on the - // stack. It is the offset of the last parameter (if any) relative - // to the frame pointer. - static const int kDisplacement = 1 * kPointerSize; - // Check that the key is a smi. Label slow; __ JumpIfNotSmi(rdx, &slow); @@ -2003,10 +1292,10 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { __ j(above_equal, &slow); // Read the argument from the stack and return it. - SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2); - __ lea(rbx, Operand(rbp, index.reg, index.scale, 0)); - index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2); - __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement)); + __ SmiSub(rax, rax, rdx); + __ SmiToInteger32(rax, rax); + StackArgumentsAccessor args(rbp, rax, ARGUMENTS_DONT_CONTAIN_RECEIVER); + __ movq(rax, args.GetArgumentOperand(0)); __ Ret(); // Arguments adaptor case: Check index against actual arguments @@ -2018,10 +1307,11 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { __ j(above_equal, &slow); // Read the argument from the stack and return it. - index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2); - __ lea(rbx, Operand(rbx, index.reg, index.scale, 0)); - index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2); - __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement)); + __ SmiSub(rcx, rcx, rdx); + __ SmiToInteger32(rcx, rcx); + StackArgumentsAccessor adaptor_args(rbx, rcx, + ARGUMENTS_DONT_CONTAIN_RECEIVER); + __ movq(rax, adaptor_args.GetArgumentOperand(0)); __ Ret(); // Slow-case: Handle non-smi or out-of-bounds access to arguments @@ -2395,11 +1685,16 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // rsp[24] : subject string // rsp[32] : JSRegExp object - static const int kLastMatchInfoOffset = 1 * kPointerSize; - static const int kPreviousIndexOffset = 2 * kPointerSize; - static const int kSubjectOffset = 3 * kPointerSize; - static const int kJSRegExpOffset = 4 * kPointerSize; + enum RegExpExecStubArgumentIndices { + JS_REG_EXP_OBJECT_ARGUMENT_INDEX, + SUBJECT_STRING_ARGUMENT_INDEX, + PREVIOUS_INDEX_ARGUMENT_INDEX, + LAST_MATCH_INFO_ARGUMENT_INDEX, + REG_EXP_EXEC_ARGUMENT_COUNT + }; + StackArgumentsAccessor args(rsp, REG_EXP_EXEC_ARGUMENT_COUNT, + ARGUMENTS_DONT_CONTAIN_RECEIVER); Label runtime; // Ensure that a RegExp stack is allocated. Isolate* isolate = masm->isolate(); @@ -2412,7 +1707,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ j(zero, &runtime); // Check that the first argument is a JSRegExp object. - __ movq(rax, Operand(rsp, kJSRegExpOffset)); + __ movq(rax, args.GetArgumentOperand(JS_REG_EXP_OBJECT_ARGUMENT_INDEX)); __ JumpIfSmi(rax, &runtime); __ CmpObjectType(rax, JS_REGEXP_TYPE, kScratchRegister); __ j(not_equal, &runtime); @@ -2445,7 +1740,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Reset offset for possibly sliced string. __ Set(r14, 0); - __ movq(rdi, Operand(rsp, kSubjectOffset)); + __ movq(rdi, args.GetArgumentOperand(SUBJECT_STRING_ARGUMENT_INDEX)); __ JumpIfSmi(rdi, &runtime); __ movq(r15, rdi); // Make a copy of the original subject string. __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset)); @@ -2547,7 +1842,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // We have to use r15 instead of rdi to load the length because rdi might // have been only made to look like a sequential string when it actually // is an external string. - __ movq(rbx, Operand(rsp, kPreviousIndexOffset)); + __ movq(rbx, args.GetArgumentOperand(PREVIOUS_INDEX_ARGUMENT_INDEX)); __ JumpIfNotSmi(rbx, &runtime); __ SmiCompare(rbx, FieldOperand(r15, String::kLengthOffset)); __ j(above_equal, &runtime); @@ -2649,7 +1944,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag)); __ call(r11); - __ LeaveApiExitFrame(); + __ LeaveApiExitFrame(true); // Check the result. Label success; @@ -2667,11 +1962,11 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // For failure return null. __ LoadRoot(rax, Heap::kNullValueRootIndex); - __ ret(4 * kPointerSize); + __ ret(REG_EXP_EXEC_ARGUMENT_COUNT * kPointerSize); // Load RegExp data. __ bind(&success); - __ movq(rax, Operand(rsp, kJSRegExpOffset)); + __ movq(rax, args.GetArgumentOperand(JS_REG_EXP_OBJECT_ARGUMENT_INDEX)); __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset)); __ SmiToInteger32(rax, FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset)); @@ -2680,7 +1975,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // rdx: Number of capture registers // Check that the fourth object is a JSArray object. - __ movq(r15, Operand(rsp, kLastMatchInfoOffset)); + __ movq(r15, args.GetArgumentOperand(LAST_MATCH_INFO_ARGUMENT_INDEX)); __ JumpIfSmi(r15, &runtime); __ CmpObjectType(r15, JS_ARRAY_TYPE, kScratchRegister); __ j(not_equal, &runtime); @@ -2704,7 +1999,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ movq(FieldOperand(rbx, RegExpImpl::kLastCaptureCountOffset), kScratchRegister); // Store last subject and last input. - __ movq(rax, Operand(rsp, kSubjectOffset)); + __ movq(rax, args.GetArgumentOperand(SUBJECT_STRING_ARGUMENT_INDEX)); __ movq(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax); __ movq(rcx, rax); __ RecordWriteField(rbx, @@ -2747,7 +2042,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Return last match info. __ movq(rax, r15); - __ ret(4 * kPointerSize); + __ ret(REG_EXP_EXEC_ARGUMENT_COUNT * kPointerSize); __ bind(&exception); // Result must now be exception. If there is no pending exception already a @@ -2910,112 +2205,6 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) { } -void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, - Register object, - Register result, - Register scratch1, - Register scratch2, - Label* not_found) { - // Use of registers. Register result is used as a temporary. - Register number_string_cache = result; - Register mask = scratch1; - Register scratch = scratch2; - - // Load the number string cache. - __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); - - // Make the hash mask from the length of the number string cache. It - // contains two elements (number and string) for each cache entry. - __ SmiToInteger32( - mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset)); - __ shrl(mask, Immediate(1)); - __ subq(mask, Immediate(1)); // Make mask. - - // Calculate the entry in the number string cache. The hash value in the - // number string cache for smis is just the smi value, and the hash for - // doubles is the xor of the upper and lower words. See - // Heap::GetNumberStringCache. - Label is_smi; - Label load_result_from_cache; - Factory* factory = masm->isolate()->factory(); - __ JumpIfSmi(object, &is_smi); - __ CheckMap(object, - factory->heap_number_map(), - not_found, - DONT_DO_SMI_CHECK); - - STATIC_ASSERT(8 == kDoubleSize); - __ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4)); - __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset)); - GenerateConvertHashCodeToIndex(masm, scratch, mask); - - Register index = scratch; - Register probe = mask; - __ movq(probe, - FieldOperand(number_string_cache, - index, - times_1, - FixedArray::kHeaderSize)); - __ JumpIfSmi(probe, not_found); - __ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset)); - __ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset)); - __ ucomisd(xmm0, xmm1); - __ j(parity_even, not_found); // Bail out if NaN is involved. - __ j(not_equal, not_found); // The cache did not contain this value. - __ jmp(&load_result_from_cache); - - __ bind(&is_smi); - __ SmiToInteger32(scratch, object); - GenerateConvertHashCodeToIndex(masm, scratch, mask); - - // Check if the entry is the smi we are looking for. - __ cmpq(object, - FieldOperand(number_string_cache, - index, - times_1, - FixedArray::kHeaderSize)); - __ j(not_equal, not_found); - - // Get the result from the cache. - __ bind(&load_result_from_cache); - __ movq(result, - FieldOperand(number_string_cache, - index, - times_1, - FixedArray::kHeaderSize + kPointerSize)); - Counters* counters = masm->isolate()->counters(); - __ IncrementCounter(counters->number_to_string_native(), 1); -} - - -void NumberToStringStub::GenerateConvertHashCodeToIndex(MacroAssembler* masm, - Register hash, - Register mask) { - __ and_(hash, mask); - // Each entry in string cache consists of two pointer sized fields, - // but times_twice_pointer_size (multiplication by 16) scale factor - // is not supported by addrmode on x64 platform. - // So we have to premultiply entry index before lookup. - __ shl(hash, Immediate(kPointerSizeLog2 + 1)); -} - - -void NumberToStringStub::Generate(MacroAssembler* masm) { - Label runtime; - - StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER); - __ movq(rbx, args.GetArgumentOperand(0)); - - // Generate code to lookup number in the number string cache. - GenerateLookupNumberStringCache(masm, rbx, rax, r8, r9, &runtime); - __ ret(1 * kPointerSize); - - __ bind(&runtime); - // Handle number to string in the runtime system if not found in the cache. - __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1); -} - - static int NegativeComparisonResult(Condition cc) { ASSERT(cc != equal); ASSERT((cc == less) || (cc == less_equal) @@ -3322,6 +2511,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) { // Cache the called function in a global property cell. Cache states // are uninitialized, monomorphic (indicated by a JSFunction), and // megamorphic. + // rax : number of arguments to the construct function // rbx : cache cell for call target // rdi : the function to call Isolate* isolate = masm->isolate(); @@ -3341,9 +2531,8 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) { // If we didn't have a matching function, and we didn't find the megamorph // sentinel, then we have in the cell either some other function or an // AllocationSite. Do a map check on the object in rcx. - Handle<Map> allocation_site_map( - masm->isolate()->heap()->allocation_site_map(), - masm->isolate()); + Handle<Map> allocation_site_map = + masm->isolate()->factory()->allocation_site_map(); __ Cmp(FieldOperand(rcx, 0), allocation_site_map); __ j(not_equal, &miss); @@ -3379,6 +2568,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) { { FrameScope scope(masm, StackFrame::INTERNAL); + // Arguments register must be smi-tagged to call out. __ Integer32ToSmi(rax, rax); __ push(rax); __ push(rdi); @@ -3562,6 +2752,7 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate); ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); CreateAllocationSiteStub::GenerateAheadOfTime(isolate); + BinaryOpStub::GenerateAheadOfTime(isolate); } @@ -3619,6 +2810,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the // stack is known to be aligned. This function takes one argument which is // passed in register. + __ movq(arg_reg_2, ExternalReference::isolate_address(masm->isolate())); __ movq(arg_reg_1, rax); __ movq(kScratchRegister, ExternalReference::perform_gc_function(masm->isolate())); @@ -4583,34 +3775,11 @@ void StringAddStub::Generate(MacroAssembler* masm) { // Just jump to runtime to add the two strings. __ bind(&call_runtime); - - if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) { - GenerateRegisterArgsPop(masm, rcx); - // Build a frame - { - FrameScope scope(masm, StackFrame::INTERNAL); - GenerateRegisterArgsPush(masm); - __ CallRuntime(Runtime::kStringAdd, 2); - } - __ Ret(); - } else { - __ TailCallRuntime(Runtime::kStringAdd, 2, 1); - } + __ TailCallRuntime(Runtime::kStringAdd, 2, 1); if (call_builtin.is_linked()) { __ bind(&call_builtin); - if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) { - GenerateRegisterArgsPop(masm, rcx); - // Build a frame - { - FrameScope scope(masm, StackFrame::INTERNAL); - GenerateRegisterArgsPush(masm); - __ InvokeBuiltin(builtin_id, CALL_FUNCTION); - } - __ Ret(); - } else { - __ InvokeBuiltin(builtin_id, JUMP_FUNCTION); - } + __ InvokeBuiltin(builtin_id, JUMP_FUNCTION); } } @@ -4646,12 +3815,7 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm, // Check the number to string cache. __ bind(¬_string); // Puts the cached result into scratch1. - NumberToStringStub::GenerateLookupNumberStringCache(masm, - arg, - scratch1, - scratch2, - scratch3, - slow); + __ LookupNumberStringCache(arg, scratch1, scratch2, scratch3, slow); __ movq(arg, scratch1); __ movq(Operand(rsp, stack_offset), arg); __ bind(&done); @@ -4935,13 +4099,18 @@ void SubStringStub::Generate(MacroAssembler* masm) { // rsp[16] : from // rsp[24] : string - const int kToOffset = 1 * kPointerSize; - const int kFromOffset = kToOffset + kPointerSize; - const int kStringOffset = kFromOffset + kPointerSize; - const int kArgumentsSize = (kStringOffset + kPointerSize) - kToOffset; + enum SubStringStubArgumentIndices { + STRING_ARGUMENT_INDEX, + FROM_ARGUMENT_INDEX, + TO_ARGUMENT_INDEX, + SUB_STRING_ARGUMENT_COUNT + }; + + StackArgumentsAccessor args(rsp, SUB_STRING_ARGUMENT_COUNT, + ARGUMENTS_DONT_CONTAIN_RECEIVER); // Make sure first argument is a string. - __ movq(rax, Operand(rsp, kStringOffset)); + __ movq(rax, args.GetArgumentOperand(STRING_ARGUMENT_INDEX)); STATIC_ASSERT(kSmiTag == 0); __ testl(rax, Immediate(kSmiTagMask)); __ j(zero, &runtime); @@ -4951,8 +4120,8 @@ void SubStringStub::Generate(MacroAssembler* masm) { // rax: string // rbx: instance type // Calculate length of sub string using the smi values. - __ movq(rcx, Operand(rsp, kToOffset)); - __ movq(rdx, Operand(rsp, kFromOffset)); + __ movq(rcx, args.GetArgumentOperand(TO_ARGUMENT_INDEX)); + __ movq(rdx, args.GetArgumentOperand(FROM_ARGUMENT_INDEX)); __ JumpUnlessBothNonNegativeSmi(rcx, rdx, &runtime); __ SmiSub(rcx, rcx, rdx); // Overflow doesn't happen. @@ -4965,7 +4134,7 @@ void SubStringStub::Generate(MacroAssembler* masm) { // Return original string. Counters* counters = masm->isolate()->counters(); __ IncrementCounter(counters->sub_string_native(), 1); - __ ret(kArgumentsSize); + __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize); __ bind(¬_original_string); Label single_char; @@ -5035,9 +4204,15 @@ void SubStringStub::Generate(MacroAssembler* masm) { STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0); STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); __ testb(rbx, Immediate(kStringEncodingMask)); - __ j(zero, &two_byte_slice, Label::kNear); + // Make long jumps when allocations tracking is on due to + // RecordObjectAllocation inside MacroAssembler::Allocate. + Label::Distance jump_distance = + masm->isolate()->heap_profiler()->is_tracking_allocations() + ? Label::kFar + : Label::kNear; + __ j(zero, &two_byte_slice, jump_distance); __ AllocateAsciiSlicedString(rax, rbx, r14, &runtime); - __ jmp(&set_slice_header, Label::kNear); + __ jmp(&set_slice_header, jump_distance); __ bind(&two_byte_slice); __ AllocateTwoByteSlicedString(rax, rbx, r14, &runtime); __ bind(&set_slice_header); @@ -5048,7 +4223,7 @@ void SubStringStub::Generate(MacroAssembler* masm) { __ movq(FieldOperand(rax, SlicedString::kParentOffset), rdi); __ movq(FieldOperand(rax, SlicedString::kOffsetOffset), rdx); __ IncrementCounter(counters->sub_string_native(), 1); - __ ret(kArgumentsSize); + __ ret(3 * kPointerSize); __ bind(©_routine); } @@ -5102,7 +4277,7 @@ void SubStringStub::Generate(MacroAssembler* masm) { StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true); __ movq(rsi, r14); // Restore rsi. __ IncrementCounter(counters->sub_string_native(), 1); - __ ret(kArgumentsSize); + __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize); __ bind(&two_byte_sequential); // Allocate the result. @@ -5127,7 +4302,7 @@ void SubStringStub::Generate(MacroAssembler* masm) { StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false); __ movq(rsi, r14); // Restore esi. __ IncrementCounter(counters->sub_string_native(), 1); - __ ret(kArgumentsSize); + __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize); // Just jump to runtime to create the sub string. __ bind(&runtime); @@ -5141,7 +4316,7 @@ void SubStringStub::Generate(MacroAssembler* masm) { StringCharAtGenerator generator( rax, rdx, rcx, rax, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER); generator.GenerateFast(masm); - __ ret(kArgumentsSize); + __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize); generator.SkipSlow(masm, &runtime); } @@ -5376,7 +4551,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { __ jmp(&left, Label::kNear); __ bind(&right_smi); __ SmiToInteger32(rcx, rax); // Can't clobber rax yet. - __ cvtlsi2sd(xmm1, rcx); + __ Cvtlsi2sd(xmm1, rcx); __ bind(&left); __ JumpIfSmi(rdx, &left_smi, Label::kNear); @@ -5386,7 +4561,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { __ jmp(&done); __ bind(&left_smi); __ SmiToInteger32(rcx, rdx); // Can't clobber rdx yet. - __ cvtlsi2sd(xmm0, rcx); + __ Cvtlsi2sd(xmm0, rcx); __ bind(&done); // Compare operands @@ -6392,9 +5567,8 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm, __ incl(rdx); __ movq(rcx, FieldOperand(rbx, Cell::kValueOffset)); if (FLAG_debug_code) { - Handle<Map> allocation_site_map( - masm->isolate()->heap()->allocation_site_map(), - masm->isolate()); + Handle<Map> allocation_site_map = + masm->isolate()->factory()->allocation_site_map(); __ Cmp(FieldOperand(rcx, 0), allocation_site_map); __ Assert(equal, kExpectedAllocationSiteInCell); } @@ -6541,7 +5715,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) { __ j(equal, &no_info); __ movq(rdx, FieldOperand(rbx, Cell::kValueOffset)); __ Cmp(FieldOperand(rdx, 0), - Handle<Map>(masm->isolate()->heap()->allocation_site_map())); + masm->isolate()->factory()->allocation_site_map()); __ j(not_equal, &no_info); __ movq(rdx, FieldOperand(rdx, AllocationSite::kTransitionInfoOffset)); diff --git a/deps/v8/src/x64/code-stubs-x64.h b/deps/v8/src/x64/code-stubs-x64.h index 41678ecd20..c76abcf001 100644 --- a/deps/v8/src/x64/code-stubs-x64.h +++ b/deps/v8/src/x64/code-stubs-x64.h @@ -208,34 +208,6 @@ class StringCompareStub: public PlatformCodeStub { }; -class NumberToStringStub: public PlatformCodeStub { - public: - NumberToStringStub() { } - - // Generate code to do a lookup in the number string cache. If the number in - // the register object is found in the cache the generated code falls through - // with the result in the result register. The object and the result register - // can be the same. If the number is not found in the cache the code jumps to - // the label not_found with only the content of register object unchanged. - static void GenerateLookupNumberStringCache(MacroAssembler* masm, - Register object, - Register result, - Register scratch1, - Register scratch2, - Label* not_found); - - private: - static void GenerateConvertHashCodeToIndex(MacroAssembler* masm, - Register hash, - Register mask); - - Major MajorKey() { return NumberToString; } - int MinorKey() { return 0; } - - void Generate(MacroAssembler* masm); -}; - - class NameDictionaryLookupStub: public PlatformCodeStub { public: enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP }; diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc index 24773c2595..390ec7c9c9 100644 --- a/deps/v8/src/x64/codegen-x64.cc +++ b/deps/v8/src/x64/codegen-x64.cc @@ -263,8 +263,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( // ----------------------------------- if (mode == TRACK_ALLOCATION_SITE) { ASSERT(allocation_memento_found != NULL); - __ TestJSArrayForAllocationMemento(rdx, rdi); - __ j(equal, allocation_memento_found); + __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, allocation_memento_found); } // Set transitioned map. @@ -292,8 +291,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( Label allocated, new_backing_store, only_change_map, done; if (mode == TRACK_ALLOCATION_SITE) { - __ TestJSArrayForAllocationMemento(rdx, rdi); - __ j(equal, fail); + __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, fail); } // Check for empty arrays, which only require a map transition and no changes @@ -386,7 +384,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( // rbx: current element (smi-tagged) __ JumpIfNotSmi(rbx, &convert_hole); __ SmiToInteger32(rbx, rbx); - __ cvtlsi2sd(xmm0, rbx); + __ Cvtlsi2sd(xmm0, rbx); __ movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), xmm0); __ jmp(&entry); @@ -418,8 +416,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( Label loop, entry, convert_hole, gc_required, only_change_map; if (mode == TRACK_ALLOCATION_SITE) { - __ TestJSArrayForAllocationMemento(rdx, rdi); - __ j(equal, fail); + __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, fail); } // Check for empty arrays, which only require a map transition and no changes @@ -469,7 +466,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( // Non-hole double, copy value into a heap number. __ AllocateHeapNumber(rax, r15, &gc_required); // rax: new heap number - __ movq(FieldOperand(rax, HeapNumber::kValueOffset), r14); + __ MoveDouble(FieldOperand(rax, HeapNumber::kValueOffset), r14); __ movq(FieldOperand(r11, r9, times_pointer_size, @@ -678,8 +675,6 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm, #undef __ -static const int kNoCodeAgeSequenceLength = 6; - static byte* GetNoCodeAgeSequence(uint32_t* length) { static bool initialized = false; static byte sequence[kNoCodeAgeSequenceLength]; @@ -711,7 +706,7 @@ bool Code::IsYoungSequence(byte* sequence) { void Code::GetCodeAgeAndParity(byte* sequence, Age* age, MarkingParity* parity) { if (IsYoungSequence(sequence)) { - *age = kNoAge; + *age = kNoAgeCodeAge; *parity = NO_MARKING_PARITY; } else { sequence++; // Skip the kCallOpcode byte @@ -723,30 +718,27 @@ void Code::GetCodeAgeAndParity(byte* sequence, Age* age, } -void Code::PatchPlatformCodeAge(byte* sequence, +void Code::PatchPlatformCodeAge(Isolate* isolate, + byte* sequence, Code::Age age, MarkingParity parity) { uint32_t young_length; byte* young_sequence = GetNoCodeAgeSequence(&young_length); - if (age == kNoAge) { + if (age == kNoAgeCodeAge) { CopyBytes(sequence, young_sequence, young_length); CPU::FlushICache(sequence, young_length); } else { - Code* stub = GetCodeAgeStub(age, parity); + Code* stub = GetCodeAgeStub(isolate, age, parity); CodePatcher patcher(sequence, young_length); patcher.masm()->call(stub->instruction_start()); - for (int i = 0; - i < kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength; - i++) { - patcher.masm()->nop(); - } + patcher.masm()->Nop( + kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength); } } Operand StackArgumentsAccessor::GetArgumentOperand(int index) { ASSERT(index >= 0); - ASSERT(base_reg_.is(rsp) || base_reg_.is(rbp)); int receiver = (receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER) ? 1 : 0; int displacement_to_last_argument = base_reg_.is(rsp) ? kPCOnStackSize : kFPOnStackSize + kPCOnStackSize; diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h index 7d1f59ad5f..811ac507d5 100644 --- a/deps/v8/src/x64/codegen-x64.h +++ b/deps/v8/src/x64/codegen-x64.h @@ -156,7 +156,7 @@ class StackArgumentsAccessor BASE_EMBEDDED { Operand GetArgumentOperand(int index); Operand GetReceiverOperand() { ASSERT(receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER); - return GetArgumentOperand(0);; + return GetArgumentOperand(0); } private: diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc index 303b756cac..bf11e0860f 100644 --- a/deps/v8/src/x64/deoptimizer-x64.cc +++ b/deps/v8/src/x64/deoptimizer-x64.cc @@ -82,87 +82,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) { } -static const byte kJnsInstruction = 0x79; -static const byte kJnsOffset = 0x1d; -static const byte kCallInstruction = 0xe8; -static const byte kNopByteOne = 0x66; -static const byte kNopByteTwo = 0x90; - -// The back edge bookkeeping code matches the pattern: -// -// add <profiling_counter>, <-delta> -// jns ok -// call <stack guard> -// ok: -// -// We will patch away the branch so the code is: -// -// add <profiling_counter>, <-delta> ;; Not changed -// nop -// nop -// call <on-stack replacment> -// ok: - -void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code, - Address pc_after, - Code* replacement_code) { - // Turn the jump into nops. - Address call_target_address = pc_after - kIntSize; - *(call_target_address - 3) = kNopByteOne; - *(call_target_address - 2) = kNopByteTwo; - // Replace the call address. - Assembler::set_target_address_at(call_target_address, - replacement_code->entry()); - - unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch( - unoptimized_code, call_target_address, replacement_code); -} - - -void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code, - Address pc_after, - Code* interrupt_code) { - // Restore the original jump. - Address call_target_address = pc_after - kIntSize; - *(call_target_address - 3) = kJnsInstruction; - *(call_target_address - 2) = kJnsOffset; - // Restore the original call address. - Assembler::set_target_address_at(call_target_address, - interrupt_code->entry()); - - interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch( - unoptimized_code, call_target_address, interrupt_code); -} - - -#ifdef DEBUG -Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState( - Isolate* isolate, - Code* unoptimized_code, - Address pc_after) { - Address call_target_address = pc_after - kIntSize; - ASSERT_EQ(kCallInstruction, *(call_target_address - 1)); - if (*(call_target_address - 3) == kNopByteOne) { - ASSERT_EQ(kNopByteTwo, *(call_target_address - 2)); - Code* osr_builtin = - isolate->builtins()->builtin(Builtins::kOnStackReplacement); - ASSERT_EQ(osr_builtin->entry(), - Assembler::target_address_at(call_target_address)); - return PATCHED_FOR_OSR; - } else { - // Get the interrupt stub code object to match against from cache. - Code* interrupt_builtin = - isolate->builtins()->builtin(Builtins::kInterruptCheck); - ASSERT_EQ(interrupt_builtin->entry(), - Assembler::target_address_at(call_target_address)); - ASSERT_EQ(kJnsInstruction, *(call_target_address - 3)); - ASSERT_EQ(kJnsOffset, *(call_target_address - 2)); - return NOT_PATCHED; - } -} -#endif // DEBUG - - void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) { // Set the register values. The values are not important as there are no // callee saved registers in JavaScript frames, so all registers are @@ -187,10 +106,7 @@ void Deoptimizer::SetPlatformCompiledStubRegisters( FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) { intptr_t handler = reinterpret_cast<intptr_t>(descriptor->deoptimization_handler_); - int params = descriptor->register_param_count_; - if (descriptor->stack_parameter_count_ != NULL) { - params++; - } + int params = descriptor->environment_length(); output_frame->SetRegister(rax.code(), params); output_frame->SetRegister(rbx.code(), handler); } diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc index 9984a46307..7735b552fe 100644 --- a/deps/v8/src/x64/disasm-x64.cc +++ b/deps/v8/src/x64/disasm-x64.cc @@ -93,7 +93,7 @@ static const ByteMnemonic two_operands_instr[] = { { 0x39, OPER_REG_OP_ORDER, "cmp" }, { 0x3A, BYTE_REG_OPER_OP_ORDER, "cmp" }, { 0x3B, REG_OPER_OP_ORDER, "cmp" }, - { 0x63, REG_OPER_OP_ORDER, "movsxlq" }, + { 0x63, REG_OPER_OP_ORDER, "movsxl" }, { 0x84, BYTE_REG_OPER_OP_ORDER, "test" }, { 0x85, REG_OPER_OP_ORDER, "test" }, { 0x86, BYTE_REG_OPER_OP_ORDER, "xchg" }, @@ -1036,14 +1036,14 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) { get_modrm(*current, &mod, ®op, &rm); AppendToBuffer("extractps "); // reg/m32, xmm, imm8 current += PrintRightOperand(current); - AppendToBuffer(", %s, %d", NameOfCPURegister(regop), (*current) & 3); + AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), (*current) & 3); current += 1; } else if (third_byte == 0x0b) { get_modrm(*current, &mod, ®op, &rm); // roundsd xmm, xmm/m64, imm8 - AppendToBuffer("roundsd %s, ", NameOfCPURegister(regop)); - current += PrintRightOperand(current); - AppendToBuffer(", %d", (*current) & 3); + AppendToBuffer("roundsd %s,", NameOfXMMRegister(regop)); + current += PrintRightXMMOperand(current); + AppendToBuffer(",%d", (*current) & 3); current += 1; } else { UnimplementedInstruction(); @@ -1062,12 +1062,12 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) { } // else no immediate displacement. AppendToBuffer("nop"); } else if (opcode == 0x28) { - AppendToBuffer("movapd %s, ", NameOfXMMRegister(regop)); + AppendToBuffer("movapd %s,", NameOfXMMRegister(regop)); current += PrintRightXMMOperand(current); } else if (opcode == 0x29) { AppendToBuffer("movapd "); current += PrintRightXMMOperand(current); - AppendToBuffer(", %s", NameOfXMMRegister(regop)); + AppendToBuffer(",%s", NameOfXMMRegister(regop)); } else if (opcode == 0x6E) { AppendToBuffer("mov%c %s,", rex_w() ? 'q' : 'd', @@ -1081,15 +1081,15 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) { AppendToBuffer("mov%c ", rex_w() ? 'q' : 'd'); current += PrintRightOperand(current); - AppendToBuffer(", %s", NameOfXMMRegister(regop)); + AppendToBuffer(",%s", NameOfXMMRegister(regop)); } else if (opcode == 0x7F) { AppendToBuffer("movdqa "); current += PrintRightXMMOperand(current); - AppendToBuffer(", %s", NameOfXMMRegister(regop)); + AppendToBuffer(",%s", NameOfXMMRegister(regop)); } else if (opcode == 0xD6) { AppendToBuffer("movq "); current += PrintRightXMMOperand(current); - AppendToBuffer(", %s", NameOfXMMRegister(regop)); + AppendToBuffer(",%s", NameOfXMMRegister(regop)); } else if (opcode == 0x50) { AppendToBuffer("movmskpd %s,", NameOfCPURegister(regop)); current += PrintRightXMMOperand(current); @@ -1214,7 +1214,7 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) { } else if (opcode == 0x7E) { int mod, regop, rm; get_modrm(*current, &mod, ®op, &rm); - AppendToBuffer("movq %s, ", NameOfXMMRegister(regop)); + AppendToBuffer("movq %s,", NameOfXMMRegister(regop)); current += PrintRightXMMOperand(current); } else { UnimplementedInstruction(); @@ -1238,7 +1238,7 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) { // movaps xmm, xmm/m128 int mod, regop, rm; get_modrm(*current, &mod, ®op, &rm); - AppendToBuffer("movaps %s, ", NameOfXMMRegister(regop)); + AppendToBuffer("movaps %s,", NameOfXMMRegister(regop)); current += PrintRightXMMOperand(current); } else if (opcode == 0x29) { @@ -1247,7 +1247,7 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) { get_modrm(*current, &mod, ®op, &rm); AppendToBuffer("movaps "); current += PrintRightXMMOperand(current); - AppendToBuffer(", %s", NameOfXMMRegister(regop)); + AppendToBuffer(",%s", NameOfXMMRegister(regop)); } else if (opcode == 0xA2) { // CPUID @@ -1260,18 +1260,25 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) { byte_size_operand_ = idesc.byte_size_operation; current += PrintOperands(idesc.mnem, idesc.op_order_, current); + } else if (opcode == 0x54) { + // xorps xmm, xmm/m128 + int mod, regop, rm; + get_modrm(*current, &mod, ®op, &rm); + AppendToBuffer("andps %s,", NameOfXMMRegister(regop)); + current += PrintRightXMMOperand(current); + } else if (opcode == 0x57) { // xorps xmm, xmm/m128 int mod, regop, rm; get_modrm(*current, &mod, ®op, &rm); - AppendToBuffer("xorps %s, ", NameOfXMMRegister(regop)); + AppendToBuffer("xorps %s,", NameOfXMMRegister(regop)); current += PrintRightXMMOperand(current); } else if (opcode == 0x50) { // movmskps reg, xmm int mod, regop, rm; get_modrm(*current, &mod, ®op, &rm); - AppendToBuffer("movmskps %s, ", NameOfCPURegister(regop)); + AppendToBuffer("movmskps %s,", NameOfCPURegister(regop)); current += PrintRightXMMOperand(current); } else if ((opcode & 0xF0) == 0x80) { @@ -1450,7 +1457,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer, case SHORT_IMMEDIATE_INSTR: { byte* addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data + 1)); - AppendToBuffer("%s rax, %s", idesc.mnem, NameOfAddress(addr)); + AppendToBuffer("%s rax,%s", idesc.mnem, NameOfAddress(addr)); data += 5; break; } @@ -1599,7 +1606,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer, if (reg == 0) { AppendToBuffer("nop"); // Common name for xchg rax,rax. } else { - AppendToBuffer("xchg%c rax, %s", + AppendToBuffer("xchg%c rax,%s", operand_size_code(), NameOfCPURegister(reg)); } @@ -1628,12 +1635,12 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer, bool is_32bit = (opcode >= 0xB8); int reg = (opcode & 0x7) | (rex_b() ? 8 : 0); if (is_32bit) { - AppendToBuffer("mov%c %s, ", + AppendToBuffer("mov%c %s,", operand_size_code(), NameOfCPURegister(reg)); data += PrintImmediate(data, OPERAND_DOUBLEWORD_SIZE); } else { - AppendToBuffer("movb %s, ", + AppendToBuffer("movb %s,", NameOfByteCPURegister(reg)); data += PrintImmediate(data, OPERAND_BYTE_SIZE); } @@ -1755,7 +1762,7 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer, break; case 0x3C: - AppendToBuffer("cmp al, 0x%x", *reinterpret_cast<int8_t*>(data + 1)); + AppendToBuffer("cmp al,0x%x", *reinterpret_cast<int8_t*>(data + 1)); data +=2; break; diff --git a/deps/v8/src/x64/frames-x64.h b/deps/v8/src/x64/frames-x64.h index 2af5a81bb5..fb17964ada 100644 --- a/deps/v8/src/x64/frames-x64.h +++ b/deps/v8/src/x64/frames-x64.h @@ -70,11 +70,11 @@ class ExitFrameConstants : public AllStatic { static const int kSPOffset = -1 * kPointerSize; static const int kCallerFPOffset = +0 * kPointerSize; - static const int kCallerPCOffset = +1 * kPointerSize; + static const int kCallerPCOffset = kFPOnStackSize; // FP-relative displacement of the caller's SP. It points just // below the saved PC. - static const int kCallerSPDisplacement = +2 * kPointerSize; + static const int kCallerSPDisplacement = kCallerPCOffset + kPCOnStackSize; }; @@ -82,7 +82,7 @@ class JavaScriptFrameConstants : public AllStatic { public: // FP-relative. static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset; - static const int kLastParameterOffset = +2 * kPointerSize; + static const int kLastParameterOffset = kFPOnStackSize + kPCOnStackSize; static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset; // Caller SP-relative. diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc index c24512ecae..02ba67b90e 100644 --- a/deps/v8/src/x64/full-codegen-x64.cc +++ b/deps/v8/src/x64/full-codegen-x64.cc @@ -140,10 +140,9 @@ void FullCodeGenerator::Generate() { Label ok; __ testq(rcx, rcx); __ j(zero, &ok, Label::kNear); - // +1 for return address. - int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize; + StackArgumentsAccessor args(rsp, info->scope()->num_parameters()); __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex); - __ movq(Operand(rsp, receiver_offset), kScratchRegister); + __ movq(args.GetReceiverOperand(), kScratchRegister); __ bind(&ok); } @@ -153,10 +152,7 @@ void FullCodeGenerator::Generate() { FrameScope frame_scope(masm_, StackFrame::MANUAL); info->set_prologue_offset(masm_->pc_offset()); - __ push(rbp); // Caller's frame pointer. - __ movq(rbp, rsp); - __ push(rsi); // Callee's context. - __ push(rdi); // Callee's JS Function. + __ Prologue(BUILD_FUNCTION_FRAME); info->AddNoFrameRange(0, masm_->pc_offset()); { Comment cmnt(masm_, "[ Allocate locals"); @@ -678,7 +674,8 @@ MemOperand FullCodeGenerator::StackOperand(Variable* var) { int offset = -var->index() * kPointerSize; // Adjust by a (parameter or local) base offset. if (var->IsParameter()) { - offset += (info_->scope()->num_parameters() + 1) * kPointerSize; + offset += kFPOnStackSize + kPCOnStackSize + + (info_->scope()->num_parameters() - 1) * kPointerSize; } else { offset += JavaScriptFrameConstants::kLocal0Offset; } @@ -1129,7 +1126,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { Handle<Object>(Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker), isolate())); RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell); - __ LoadHeapObject(rbx, cell); + __ Move(rbx, cell); __ Move(FieldOperand(rbx, Cell::kValueOffset), Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)); @@ -1609,21 +1606,15 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { : ObjectLiteral::kNoFlags; int properties_count = constant_properties->length() / 2; if ((FLAG_track_double_fields && expr->may_store_doubles()) || - expr->depth() > 1) { - __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset)); - __ push(FieldOperand(rdi, JSFunction::kLiteralsOffset)); - __ Push(Smi::FromInt(expr->literal_index())); - __ Push(constant_properties); - __ Push(Smi::FromInt(flags)); - __ CallRuntime(Runtime::kCreateObjectLiteral, 4); - } else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements || + expr->depth() > 1 || Serializer::enabled() || + flags != ObjectLiteral::kFastElements || properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) { __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset)); __ push(FieldOperand(rdi, JSFunction::kLiteralsOffset)); __ Push(Smi::FromInt(expr->literal_index())); __ Push(constant_properties); __ Push(Smi::FromInt(flags)); - __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4); + __ CallRuntime(Runtime::kCreateObjectLiteral, 4); } else { __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset)); __ movq(rax, FieldOperand(rdi, JSFunction::kLiteralsOffset)); @@ -2638,7 +2629,8 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) { } // Push the receiver of the enclosing function and do runtime call. - __ push(Operand(rbp, (2 + info_->scope()->num_parameters()) * kPointerSize)); + StackArgumentsAccessor args(rbp, info_->scope()->num_parameters()); + __ push(args.GetReceiverOperand()); // Push the language mode. __ Push(Smi::FromInt(language_mode())); @@ -3513,8 +3505,8 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); ASSERT_EQ(args->length(), 1); - // Load the argument on the stack and call the stub. - VisitForStackValue(args->at(0)); + // Load the argument into rax and call the stub. + VisitForAccumulatorValue(args->at(0)); NumberToStringStub stub; __ CallStub(&stub); @@ -4883,6 +4875,79 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit( #undef __ + +static const byte kJnsInstruction = 0x79; +static const byte kJnsOffset = 0x1d; +static const byte kCallInstruction = 0xe8; +static const byte kNopByteOne = 0x66; +static const byte kNopByteTwo = 0x90; + + +void BackEdgeTable::PatchAt(Code* unoptimized_code, + Address pc, + BackEdgeState target_state, + Code* replacement_code) { + Address call_target_address = pc - kIntSize; + Address jns_instr_address = call_target_address - 3; + Address jns_offset_address = call_target_address - 2; + + switch (target_state) { + case INTERRUPT: + // sub <profiling_counter>, <delta> ;; Not changed + // jns ok + // call <interrupt stub> + // ok: + *jns_instr_address = kJnsInstruction; + *jns_offset_address = kJnsOffset; + break; + case ON_STACK_REPLACEMENT: + case OSR_AFTER_STACK_CHECK: + // sub <profiling_counter>, <delta> ;; Not changed + // nop + // nop + // call <on-stack replacment> + // ok: + *jns_instr_address = kNopByteOne; + *jns_offset_address = kNopByteTwo; + break; + } + + Assembler::set_target_address_at(call_target_address, + replacement_code->entry()); + unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch( + unoptimized_code, call_target_address, replacement_code); +} + + +BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState( + Isolate* isolate, + Code* unoptimized_code, + Address pc) { + Address call_target_address = pc - kIntSize; + Address jns_instr_address = call_target_address - 3; + ASSERT_EQ(kCallInstruction, *(call_target_address - 1)); + + if (*jns_instr_address == kJnsInstruction) { + ASSERT_EQ(kJnsOffset, *(call_target_address - 2)); + ASSERT_EQ(isolate->builtins()->InterruptCheck()->entry(), + Assembler::target_address_at(call_target_address)); + return INTERRUPT; + } + + ASSERT_EQ(kNopByteOne, *jns_instr_address); + ASSERT_EQ(kNopByteTwo, *(call_target_address - 2)); + + if (Assembler::target_address_at(call_target_address) == + isolate->builtins()->OnStackReplacement()->entry()) { + return ON_STACK_REPLACEMENT; + } + + ASSERT_EQ(isolate->builtins()->OsrAfterStackCheck()->entry(), + Assembler::target_address_at(call_target_address)); + return OSR_AFTER_STACK_CHECK; +} + + } } // namespace v8::internal #endif // V8_TARGET_ARCH_X64 diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc index 4a7c68a53c..15f410c134 100644 --- a/deps/v8/src/x64/ic-x64.cc +++ b/deps/v8/src/x64/ic-x64.cc @@ -1330,7 +1330,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) { // Probe the stub cache. Code::Flags flags = Code::ComputeFlags( - Code::STUB, MONOMORPHIC, Code::kNoExtraICState, + Code::HANDLER, MONOMORPHIC, Code::kNoExtraICState, Code::NORMAL, Code::LOAD_IC); masm->isolate()->stub_cache()->GenerateProbe( masm, flags, rax, rcx, rbx, rdx); @@ -1451,7 +1451,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm, // Get the receiver from the stack and probe the stub cache. Code::Flags flags = Code::ComputeFlags( - Code::STUB, MONOMORPHIC, strict_mode, + Code::HANDLER, MONOMORPHIC, strict_mode, Code::NORMAL, Code::STORE_IC); masm->isolate()->stub_cache()->GenerateProbe( masm, flags, rdx, rcx, rbx, no_reg); diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc index 9dca6b3e20..7c70094fbf 100644 --- a/deps/v8/src/x64/lithium-codegen-x64.cc +++ b/deps/v8/src/x64/lithium-codegen-x64.cc @@ -89,9 +89,7 @@ void LCodeGen::FinishCode(Handle<Code> code) { ASSERT(is_done()); code->set_stack_slots(GetStackSlotCount()); code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); - if (FLAG_weak_embedded_maps_in_optimized_code) { - RegisterDependentCodeForEmbeddedMaps(code); - } + RegisterDependentCodeForEmbeddedMaps(code); PopulateDeoptimizationData(code); info()->CommitDependencies(code); } @@ -103,24 +101,6 @@ void LChunkBuilder::Abort(BailoutReason reason) { } -void LCodeGen::Comment(const char* format, ...) { - if (!FLAG_code_comments) return; - char buffer[4 * KB]; - StringBuilder builder(buffer, ARRAY_SIZE(buffer)); - va_list arguments; - va_start(arguments, format); - builder.AddFormattedList(format, arguments); - va_end(arguments); - - // Copy the string before recording it in the assembler to avoid - // issues when the stack allocated buffer goes out of scope. - int length = builder.position(); - Vector<char> copy = Vector<char>::New(length + 1); - OS::MemCopy(copy.start(), builder.Finalize(), copy.length()); - masm()->RecordComment(copy.start()); -} - - #ifdef _MSC_VER void LCodeGen::MakeSureStackPagesMapped(int offset) { const int kPageSize = 4 * KB; @@ -152,10 +132,9 @@ bool LCodeGen::GeneratePrologue() { Label ok; __ testq(rcx, rcx); __ j(zero, &ok, Label::kNear); - // +1 for return address. - int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize; + StackArgumentsAccessor args(rsp, scope()->num_parameters()); __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex); - __ movq(Operand(rsp, receiver_offset), kScratchRegister); + __ movq(args.GetReceiverOperand(), kScratchRegister); __ bind(&ok); } } @@ -164,14 +143,7 @@ bool LCodeGen::GeneratePrologue() { if (NeedsEagerFrame()) { ASSERT(!frame_is_built_); frame_is_built_ = true; - __ push(rbp); // Caller's frame pointer. - __ movq(rbp, rsp); - __ push(rsi); // Callee's context. - if (info()->IsStub()) { - __ Push(Smi::FromInt(StackFrame::STUB)); - } else { - __ push(rdi); // Callee's JS function. - } + __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME); info()->AddNoFrameRange(0, masm_->pc_offset()); } @@ -273,36 +245,6 @@ void LCodeGen::GenerateOsrPrologue() { } -bool LCodeGen::GenerateBody() { - ASSERT(is_generating()); - bool emit_instructions = true; - for (current_instruction_ = 0; - !is_aborted() && current_instruction_ < instructions_->length(); - current_instruction_++) { - LInstruction* instr = instructions_->at(current_instruction_); - - // Don't emit code for basic blocks with a replacement. - if (instr->IsLabel()) { - emit_instructions = !LLabel::cast(instr)->HasReplacement(); - } - if (!emit_instructions) continue; - - if (FLAG_code_comments && instr->HasInterestingComment(this)) { - Comment(";;; <@%d,#%d> %s", - current_instruction_, - instr->hydrogen_value()->id(), - instr->Mnemonic()); - } - - RecordAndUpdatePosition(instr->position()); - - instr->CompileToNative(this); - } - EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); - return !is_aborted(); -} - - bool LCodeGen::GenerateJumpTable() { Label needs_frame; if (jump_table_.length() > 0) { @@ -350,8 +292,9 @@ bool LCodeGen::GenerateDeferredCode() { for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { LDeferredCode* code = deferred_[i]; - int pos = instructions_->at(code->instruction_index())->position(); - RecordAndUpdatePosition(pos); + HValue* value = + instructions_->at(code->instruction_index())->hydrogen_value(); + RecordAndWritePosition(value->position()); Comment(";;; <@%d,#%d> " "-------------------- Deferred %s --------------------", @@ -614,8 +557,6 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code, int argc) { EnsureSpaceForLazyDeopt(Deoptimizer::patch_size() - masm()->CallSize(code)); ASSERT(instr != NULL); - LPointerMap* pointers = instr->pointer_map(); - RecordPosition(pointers->position()); __ call(code, mode); RecordSafepointWithLazyDeopt(instr, safepoint_mode, argc); @@ -637,13 +578,13 @@ void LCodeGen::CallCode(Handle<Code> code, void LCodeGen::CallRuntime(const Runtime::Function* function, int num_arguments, - LInstruction* instr) { + LInstruction* instr, + SaveFPRegsMode save_doubles) { ASSERT(instr != NULL); ASSERT(instr->HasPointerMap()); - LPointerMap* pointers = instr->pointer_map(); - RecordPosition(pointers->position()); - __ CallRuntime(function, num_arguments); + __ CallRuntime(function, num_arguments, save_doubles); + RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0); } @@ -754,26 +695,31 @@ void LCodeGen::DeoptimizeIf(Condition cc, void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) { ZoneList<Handle<Map> > maps(1, zone()); + ZoneList<Handle<JSObject> > objects(1, zone()); int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT); for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) { - RelocInfo::Mode mode = it.rinfo()->rmode(); - if (mode == RelocInfo::EMBEDDED_OBJECT && - it.rinfo()->target_object()->IsMap()) { - Handle<Map> map(Map::cast(it.rinfo()->target_object())); - if (map->CanTransition()) { + if (Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) { + if (it.rinfo()->target_object()->IsMap()) { + Handle<Map> map(Map::cast(it.rinfo()->target_object())); maps.Add(map, zone()); + } else if (it.rinfo()->target_object()->IsJSObject()) { + Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object())); + objects.Add(object, zone()); } } } #ifdef VERIFY_HEAP - // This disables verification of weak embedded maps after full GC. + // This disables verification of weak embedded objects after full GC. // AddDependentCode can cause a GC, which would observe the state where // this code is not yet in the depended code lists of the embedded maps. - NoWeakEmbeddedMapsVerificationScope disable_verification_of_embedded_maps; + NoWeakObjectVerificationScope disable_verification_of_embedded_objects; #endif for (int i = 0; i < maps.length(); i++) { maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code); } + for (int i = 0; i < objects.length(); i++) { + AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code); + } } @@ -884,7 +830,7 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers, void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) { - LPointerMap empty_pointers(RelocInfo::kNoPosition, zone()); + LPointerMap empty_pointers(zone()); RecordSafepoint(&empty_pointers, deopt_mode); } @@ -896,17 +842,10 @@ void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, } -void LCodeGen::RecordPosition(int position) { +void LCodeGen::RecordAndWritePosition(int position) { if (position == RelocInfo::kNoPosition) return; masm()->positions_recorder()->RecordPosition(position); -} - - -void LCodeGen::RecordAndUpdatePosition(int position) { - if (position >= 0 && position != old_position_) { - masm()->positions_recorder()->RecordPosition(position); - old_position_ = position; - } + masm()->positions_recorder()->WriteRecordedPositions(); } @@ -973,11 +912,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) { CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); break; } - case CodeStub::NumberToString: { - NumberToStringStub stub; - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); - break; - } case CodeStub::StringCompare: { StringCompareStub stub; CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); @@ -1615,8 +1549,7 @@ void LCodeGen::DoConstantE(LConstantE* instr) { void LCodeGen::DoConstantT(LConstantT* instr) { Handle<Object> value = instr->value(isolate()); - AllowDeferredHandleDereference smi_check; - __ LoadObject(ToRegister(instr->result()), value); + __ Move(ToRegister(instr->result()), value); } @@ -1832,7 +1765,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) { __ jmp(&return_right, Label::kNear); __ bind(&check_zero); - XMMRegister xmm_scratch = xmm0; + XMMRegister xmm_scratch = double_scratch0(); __ xorps(xmm_scratch, xmm_scratch); __ ucomisd(left_reg, xmm_scratch); __ j(not_equal, &return_left, Label::kNear); // left == right != 0. @@ -1878,15 +1811,17 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) { // when there is a mulsd depending on the result __ movaps(left, left); break; - case Token::MOD: + case Token::MOD: { + XMMRegister xmm_scratch = double_scratch0(); __ PrepareCallCFunction(2); - __ movaps(xmm0, left); + __ movaps(xmm_scratch, left); ASSERT(right.is(xmm1)); __ CallCFunction( ExternalReference::double_fp_operation(Token::MOD, isolate()), 2); __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); - __ movaps(result, xmm0); + __ movaps(result, xmm_scratch); break; + } default: UNREACHABLE(); break; @@ -1905,14 +1840,6 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) { } -int LCodeGen::GetNextEmittedBlock() const { - for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) { - if (!chunk_->GetLabel(i)->HasReplacement()) return i; - } - return -1; -} - - template<class InstrType> void LCodeGen::EmitBranch(InstrType instr, Condition cc) { int left_block = instr->TrueDestination(chunk_); @@ -1947,25 +1874,6 @@ void LCodeGen::DoDebugBreak(LDebugBreak* instr) { } -void LCodeGen::DoIsNumberAndBranch(LIsNumberAndBranch* instr) { - Representation r = instr->hydrogen()->value()->representation(); - if (r.IsSmiOrInteger32() || r.IsDouble()) { - EmitBranch(instr, no_condition); - } else { - ASSERT(r.IsTagged()); - Register reg = ToRegister(instr->value()); - HType type = instr->hydrogen()->value()->type(); - if (type.IsTaggedNumber()) { - EmitBranch(instr, no_condition); - } - __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); - __ CompareRoot(FieldOperand(reg, HeapObject::kMapOffset), - Heap::kHeapNumberMapRootIndex); - EmitBranch(instr, equal); - } -} - - void LCodeGen::DoBranch(LBranch* instr) { Representation r = instr->hydrogen()->value()->representation(); if (r.IsInteger32()) { @@ -1981,8 +1889,9 @@ void LCodeGen::DoBranch(LBranch* instr) { } else if (r.IsDouble()) { ASSERT(!info()->IsStub()); XMMRegister reg = ToDoubleRegister(instr->value()); - __ xorps(xmm0, xmm0); - __ ucomisd(reg, xmm0); + XMMRegister xmm_scratch = double_scratch0(); + __ xorps(xmm_scratch, xmm_scratch); + __ ucomisd(reg, xmm_scratch); EmitBranch(instr, not_equal); } else { ASSERT(r.IsTagged()); @@ -2001,8 +1910,9 @@ void LCodeGen::DoBranch(LBranch* instr) { EmitBranch(instr, no_condition); } else if (type.IsHeapNumber()) { ASSERT(!info()->IsStub()); - __ xorps(xmm0, xmm0); - __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset)); + XMMRegister xmm_scratch = double_scratch0(); + __ xorps(xmm_scratch, xmm_scratch); + __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); EmitBranch(instr, not_equal); } else if (type.IsString()) { ASSERT(!info()->IsStub()); @@ -2083,8 +1993,9 @@ void LCodeGen::DoBranch(LBranch* instr) { Label not_heap_number; __ CompareRoot(map, Heap::kHeapNumberMapRootIndex); __ j(not_equal, ¬_heap_number, Label::kNear); - __ xorps(xmm0, xmm0); - __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset)); + XMMRegister xmm_scratch = double_scratch0(); + __ xorps(xmm_scratch, xmm_scratch); + __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); __ j(zero, instr->FalseLabel(chunk_)); __ jmp(instr->TrueLabel(chunk_)); __ bind(¬_heap_number); @@ -2119,6 +2030,10 @@ inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { case Token::EQ_STRICT: cond = equal; break; + case Token::NE: + case Token::NE_STRICT: + cond = not_equal; + break; case Token::LT: cond = is_unsigned ? below : less; break; @@ -2206,7 +2121,7 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) { if (instr->right()->IsConstantOperand()) { Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right())); - __ CmpObject(left, right); + __ Cmp(left, right); } else { Register right = ToRegister(instr->right()); __ cmpq(left, right); @@ -2574,7 +2489,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, InstanceofStub stub(flags); __ push(ToRegister(instr->value())); - __ PushHeapObject(instr->function()); + __ Push(instr->function()); static const int kAdditionalDelta = 10; int delta = @@ -2610,14 +2525,6 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, } -void LCodeGen::DoInstanceSize(LInstanceSize* instr) { - Register object = ToRegister(instr->object()); - Register result = ToRegister(instr->result()); - __ movq(result, FieldOperand(object, HeapObject::kMapOffset)); - __ movzxbq(result, FieldOperand(result, Map::kInstanceSizeOffset)); -} - - void LCodeGen::DoCmpT(LCmpT* instr) { Token::Value op = instr->op(); @@ -2682,7 +2589,7 @@ void LCodeGen::DoReturn(LReturn* instr) { void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { Register result = ToRegister(instr->result()); - __ LoadGlobalCell(result, instr->hydrogen()->cell()); + __ LoadGlobalCell(result, instr->hydrogen()->cell().handle()); if (instr->hydrogen()->RequiresHoleCheck()) { __ CompareRoot(result, Heap::kTheHoleValueRootIndex); DeoptimizeIf(equal, instr->environment()); @@ -2704,7 +2611,7 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { Register value = ToRegister(instr->value()); - Handle<Cell> cell_handle = instr->hydrogen()->cell(); + Handle<Cell> cell_handle = instr->hydrogen()->cell().handle(); // If the cell we are storing to contains the hole it could have // been deleted from the property dictionary. In that case, we need @@ -2805,7 +2712,7 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { __ load_rax(ToExternalReference(LConstantOperand::cast(instr->object()))); } else { Register object = ToRegister(instr->object()); - __ movq(result, MemOperand(object, offset)); + __ Load(result, MemOperand(object, offset), access.representation()); } return; } @@ -2819,12 +2726,11 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { } Register result = ToRegister(instr->result()); - if (access.IsInobject()) { - __ movq(result, FieldOperand(object, offset)); - } else { + if (!access.IsInobject()) { __ movq(result, FieldOperand(object, JSObject::kPropertiesOffset)); - __ movq(result, FieldOperand(result, offset)); + object = result; } + __ Load(result, FieldOperand(object, offset), access.representation()); } @@ -2879,6 +2785,12 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { } +void LCodeGen::DoLoadRoot(LLoadRoot* instr) { + Register result = ToRegister(instr->result()); + __ LoadRoot(result, instr->index()); +} + + void LCodeGen::DoLoadExternalArrayPointer( LLoadExternalArrayPointer* instr) { Register result = ToRegister(instr->result()); @@ -2896,8 +2808,9 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { instr->index()->IsConstantOperand()) { int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index())); int32_t const_length = ToInteger32(LConstantOperand::cast(instr->length())); - int index = (const_length - const_index) + 1; - __ movq(result, Operand(arguments, index * kPointerSize)); + StackArgumentsAccessor args(arguments, const_length, + ARGUMENTS_DONT_CONTAIN_RECEIVER); + __ movq(result, args.GetArgumentOperand(const_index)); } else { Register length = ToRegister(instr->length()); // There are two words between the frame pointer and the last argument. @@ -2907,8 +2820,9 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { } else { __ subl(length, ToOperand(instr->index())); } - __ movq(result, - Operand(arguments, length, times_pointer_size, kPointerSize)); + StackArgumentsAccessor args(arguments, length, + ARGUMENTS_DONT_CONTAIN_RECEIVER); + __ movq(result, args.GetArgumentOperand(0)); } } @@ -3112,7 +3026,7 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { Register result = ToRegister(instr->result()); if (instr->hydrogen()->from_inlined()) { - __ lea(result, Operand(rsp, -2 * kPointerSize)); + __ lea(result, Operand(rsp, -kFPOnStackSize + -kPCOnStackSize)); } else { // Check for arguments adapter frame. Label done, adapted; @@ -3234,7 +3148,9 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) { __ testl(length, length); __ j(zero, &invoke, Label::kNear); __ bind(&loop); - __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize)); + StackArgumentsAccessor args(elements, length, + ARGUMENTS_DONT_CONTAIN_RECEIVER); + __ push(args.GetArgumentOperand(0)); __ decl(length); __ j(not_zero, &loop); @@ -3242,7 +3158,6 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) { __ bind(&invoke); ASSERT(instr->HasPointerMap()); LPointerMap* pointers = instr->pointer_map(); - RecordPosition(pointers->position()); SafepointGenerator safepoint_generator( this, pointers, Safepoint::kLazyDeopt); ParameterCount actual(rax); @@ -3285,7 +3200,7 @@ void LCodeGen::DoOuterContext(LOuterContext* instr) { void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { __ push(rsi); // The context is the first argument. - __ PushHeapObject(instr->hydrogen()->pairs()); + __ Push(instr->hydrogen()->pairs()); __ Push(Smi::FromInt(instr->hydrogen()->flags())); CallRuntime(Runtime::kDeclareGlobals, 3, instr); } @@ -3316,11 +3231,10 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function, dont_adapt_arguments || formal_parameter_count == arity; LPointerMap* pointers = instr->pointer_map(); - RecordPosition(pointers->position()); if (can_invoke_directly) { if (rdi_state == RDI_UNINITIALIZED) { - __ LoadHeapObject(rdi, function); + __ Move(rdi, function); } // Change context. @@ -3401,10 +3315,10 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { __ LoadFromSafepointRegisterSlot(input_reg, input_reg); __ bind(&allocated); - __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset)); + __ MoveDouble(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset)); __ shl(tmp2, Immediate(1)); __ shr(tmp2, Immediate(1)); - __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2); + __ MoveDouble(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2); __ StoreToSafepointRegisterSlot(input_reg, tmp); __ bind(&done); @@ -3451,11 +3365,11 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) { Representation r = instr->hydrogen()->value()->representation(); if (r.IsDouble()) { - XMMRegister scratch = xmm0; + XMMRegister scratch = double_scratch0(); XMMRegister input_reg = ToDoubleRegister(instr->value()); __ xorps(scratch, scratch); __ subsd(scratch, input_reg); - __ andpd(input_reg, scratch); + __ andps(input_reg, scratch); } else if (r.IsInteger32()) { EmitIntegerMathAbs(instr); } else if (r.IsSmi()) { @@ -3473,7 +3387,7 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) { void LCodeGen::DoMathFloor(LMathFloor* instr) { - XMMRegister xmm_scratch = xmm0; + XMMRegister xmm_scratch = double_scratch0(); Register output_reg = ToRegister(instr->result()); XMMRegister input_reg = ToDoubleRegister(instr->value()); @@ -3520,7 +3434,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) { __ bind(&negative_sign); // Truncate, then compare and compensate. __ cvttsd2si(output_reg, input_reg); - __ cvtlsi2sd(xmm_scratch, output_reg); + __ Cvtlsi2sd(xmm_scratch, output_reg); __ ucomisd(input_reg, xmm_scratch); __ j(equal, &done, Label::kNear); __ subl(output_reg, Immediate(1)); @@ -3532,7 +3446,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) { void LCodeGen::DoMathRound(LMathRound* instr) { - const XMMRegister xmm_scratch = xmm0; + const XMMRegister xmm_scratch = double_scratch0(); Register output_reg = ToRegister(instr->result()); XMMRegister input_reg = ToDoubleRegister(instr->value()); static int64_t one_half = V8_INT64_C(0x3FE0000000000000); // 0.5 @@ -3569,7 +3483,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) { __ RecordComment("D2I conversion overflow"); DeoptimizeIf(equal, instr->environment()); - __ cvtlsi2sd(xmm_scratch, output_reg); + __ Cvtlsi2sd(xmm_scratch, output_reg); __ ucomisd(input_reg, xmm_scratch); __ j(equal, &restore, Label::kNear); __ subl(output_reg, Immediate(1)); @@ -3600,7 +3514,7 @@ void LCodeGen::DoMathSqrt(LMathSqrt* instr) { void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { - XMMRegister xmm_scratch = xmm0; + XMMRegister xmm_scratch = double_scratch0(); XMMRegister input_reg = ToDoubleRegister(instr->value()); ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); @@ -3717,8 +3631,7 @@ void LCodeGen::DoRandom(LRandom* instr) { // by computing: // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)). XMMRegister result = ToDoubleRegister(instr->result()); - // We use xmm0 as fixed scratch register here. - XMMRegister scratch4 = xmm0; + XMMRegister scratch4 = double_scratch0(); __ movq(scratch3, V8_INT64_C(0x4130000000000000), RelocInfo::NONE64); // 1.0 x 2^20 as double __ movq(scratch4, scratch3); @@ -3731,18 +3644,44 @@ void LCodeGen::DoRandom(LRandom* instr) { void LCodeGen::DoMathExp(LMathExp* instr) { XMMRegister input = ToDoubleRegister(instr->value()); XMMRegister result = ToDoubleRegister(instr->result()); + XMMRegister temp0 = double_scratch0(); Register temp1 = ToRegister(instr->temp1()); Register temp2 = ToRegister(instr->temp2()); - MathExpGenerator::EmitMathExp(masm(), input, result, xmm0, temp1, temp2); + MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2); } void LCodeGen::DoMathLog(LMathLog* instr) { - ASSERT(ToDoubleRegister(instr->result()).is(xmm1)); - TranscendentalCacheStub stub(TranscendentalCache::LOG, - TranscendentalCacheStub::UNTAGGED); - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); + ASSERT(instr->value()->Equals(instr->result())); + XMMRegister input_reg = ToDoubleRegister(instr->value()); + XMMRegister xmm_scratch = double_scratch0(); + Label positive, done, zero; + __ xorps(xmm_scratch, xmm_scratch); + __ ucomisd(input_reg, xmm_scratch); + __ j(above, &positive, Label::kNear); + __ j(equal, &zero, Label::kNear); + ExternalReference nan = + ExternalReference::address_of_canonical_non_hole_nan(); + Operand nan_operand = masm()->ExternalOperand(nan); + __ movsd(input_reg, nan_operand); + __ jmp(&done, Label::kNear); + __ bind(&zero); + ExternalReference ninf = + ExternalReference::address_of_negative_infinity(); + Operand ninf_operand = masm()->ExternalOperand(ninf); + __ movsd(input_reg, ninf_operand); + __ jmp(&done, Label::kNear); + __ bind(&positive); + __ fldln2(); + __ subq(rsp, Immediate(kDoubleSize)); + __ movsd(Operand(rsp, 0), input_reg); + __ fld_d(Operand(rsp, 0)); + __ fyl2x(); + __ fstp_d(Operand(rsp, 0)); + __ movsd(input_reg, Operand(rsp, 0)); + __ addq(rsp, Immediate(kDoubleSize)); + __ bind(&done); } @@ -3777,7 +3716,6 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { Handle<JSFunction> known_function = instr->hydrogen()->known_function(); if (known_function.is_null()) { LPointerMap* pointers = instr->pointer_map(); - RecordPosition(pointers->position()); SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); ParameterCount count(instr->arity()); __ InvokeFunction(rdi, count, CALL_FUNCTION, generator, CALL_AS_METHOD); @@ -3910,7 +3848,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) { void LCodeGen::DoCallRuntime(LCallRuntime* instr) { - CallRuntime(instr->function(), instr->arity(), instr); + CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles()); } @@ -3940,11 +3878,12 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { Register value = ToRegister(instr->value()); if (instr->object()->IsConstantOperand()) { ASSERT(value.is(rax)); + ASSERT(!access.representation().IsSpecialization()); LConstantOperand* object = LConstantOperand::cast(instr->object()); __ store_rax(ToExternalReference(object)); } else { Register object = ToRegister(instr->object()); - __ movq(MemOperand(object, offset), value); + __ Store(MemOperand(object, offset), value, representation); } return; } @@ -4013,15 +3952,16 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { if (instr->value()->IsConstantOperand()) { LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); if (operand_value->IsRegister()) { - __ movq(FieldOperand(write_register, offset), - ToRegister(operand_value)); + Register value = ToRegister(operand_value); + __ Store(FieldOperand(write_register, offset), value, representation); } else { Handle<Object> handle_value = ToHandle(operand_value); ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); __ Move(FieldOperand(write_register, offset), handle_value); } } else { - __ movq(FieldOperand(write_register, offset), ToRegister(instr->value())); + Register value = ToRegister(instr->value()); + __ Store(FieldOperand(write_register, offset), value, representation); } if (instr->hydrogen()->NeedsWriteBarrier()) { @@ -4325,8 +4265,10 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { Register object = ToRegister(instr->object()); Register temp = ToRegister(instr->temp()); - __ TestJSArrayForAllocationMemento(object, temp); + Label no_memento_found; + __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); DeoptimizeIf(equal, instr->environment()); + __ bind(&no_memento_found); } @@ -4449,9 +4391,9 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { LOperand* output = instr->result(); ASSERT(output->IsDoubleRegister()); if (input->IsRegister()) { - __ cvtlsi2sd(ToDoubleRegister(output), ToRegister(input)); + __ Cvtlsi2sd(ToDoubleRegister(output), ToRegister(input)); } else { - __ cvtlsi2sd(ToDoubleRegister(output), ToOperand(input)); + __ Cvtlsi2sd(ToDoubleRegister(output), ToOperand(input)); } } @@ -4479,6 +4421,22 @@ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { } +void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) { + LOperand* input = instr->value(); + ASSERT(input->IsRegister()); + LOperand* output = instr->result(); + if (!instr->hydrogen()->value()->HasRange() || + !instr->hydrogen()->value()->range()->IsInSmiRange() || + instr->hydrogen()->value()->range()->upper() == kMaxInt) { + // The Range class can't express upper bounds in the (kMaxInt, kMaxUint32] + // interval, so we treat kMaxInt as a sentinel for this entire interval. + __ testl(ToRegister(input), Immediate(0x80000000)); + DeoptimizeIf(not_zero, instr->environment()); + } + __ Integer32ToSmi(ToRegister(output), ToRegister(input)); +} + + void LCodeGen::DoNumberTagI(LNumberTagI* instr) { LOperand* input = instr->value(); ASSERT(input->IsRegister() && input->Equals(instr->result())); @@ -4517,15 +4475,17 @@ void LCodeGen::DoDeferredNumberTagU(LNumberTagU* instr) { Label slow; Register reg = ToRegister(instr->value()); Register tmp = reg.is(rax) ? rcx : rax; + XMMRegister temp_xmm = ToDoubleRegister(instr->temp()); // Preserve the value of all registers. PushSafepointRegistersScope scope(this); Label done; - // Load value into xmm1 which will be preserved across potential call to + // Load value into temp_xmm which will be preserved across potential call to // runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable // XMM registers on x64). - __ LoadUint32(xmm1, reg, xmm0); + XMMRegister xmm_scratch = double_scratch0(); + __ LoadUint32(temp_xmm, reg, xmm_scratch); if (FLAG_inline_new) { __ AllocateHeapNumber(reg, tmp, &slow); @@ -4543,10 +4503,10 @@ void LCodeGen::DoDeferredNumberTagU(LNumberTagU* instr) { CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); if (!reg.is(rax)) __ movq(reg, rax); - // Done. Put the value in xmm1 into the value of the allocated heap + // Done. Put the value in temp_xmm into the value of the allocated heap // number. __ bind(&done); - __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm1); + __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), temp_xmm); __ StoreToSafepointRegisterSlot(reg, reg); } @@ -4623,7 +4583,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg, bool deoptimize_on_minus_zero, LEnvironment* env, NumberUntagDMode mode) { - Label load_smi, done; + Label convert, load_smi, done; if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { // Smi check. @@ -4632,27 +4592,19 @@ void LCodeGen::EmitNumberUntagD(Register input_reg, // Heap number map check. __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), Heap::kHeapNumberMapRootIndex); - if (!can_convert_undefined_to_nan) { - DeoptimizeIf(not_equal, env); - } else { - Label heap_number, convert; - __ j(equal, &heap_number, Label::kNear); - // Convert undefined (and hole) to NaN. Compute NaN as 0/0. - __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex); - DeoptimizeIf(not_equal, env); - - __ bind(&convert); - __ xorps(result_reg, result_reg); - __ divsd(result_reg, result_reg); - __ jmp(&done, Label::kNear); + // On x64 it is safe to load at heap number offset before evaluating the map + // check, since all heap objects are at least two words long. + __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset)); - __ bind(&heap_number); + if (can_convert_undefined_to_nan) { + __ j(not_equal, &convert); + } else { + DeoptimizeIf(not_equal, env); } - // Heap number to XMM conversion. - __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset)); + if (deoptimize_on_minus_zero) { - XMMRegister xmm_scratch = xmm0; + XMMRegister xmm_scratch = double_scratch0(); __ xorps(xmm_scratch, xmm_scratch); __ ucomisd(xmm_scratch, result_reg); __ j(not_equal, &done, Label::kNear); @@ -4661,6 +4613,18 @@ void LCodeGen::EmitNumberUntagD(Register input_reg, DeoptimizeIf(not_zero, env); } __ jmp(&done, Label::kNear); + + if (can_convert_undefined_to_nan) { + __ bind(&convert); + + // Convert undefined (and hole) to NaN. Compute NaN as 0/0. + __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex); + DeoptimizeIf(not_equal, env); + + __ xorps(result_reg, result_reg); + __ divsd(result_reg, result_reg); + __ jmp(&done, Label::kNear); + } } else { ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); } @@ -4668,30 +4632,44 @@ void LCodeGen::EmitNumberUntagD(Register input_reg, // Smi to XMM conversion __ bind(&load_smi); __ SmiToInteger32(kScratchRegister, input_reg); - __ cvtlsi2sd(result_reg, kScratchRegister); + __ Cvtlsi2sd(result_reg, kScratchRegister); __ bind(&done); } void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) { - Label heap_number; Register input_reg = ToRegister(instr->value()); - if (instr->truncating()) { + Label no_heap_number, check_bools, check_false; + // Heap number map check. __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), Heap::kHeapNumberMapRootIndex); - __ j(equal, &heap_number, Label::kNear); - // Check for undefined. Undefined is converted to zero for truncating - // conversions. + __ j(not_equal, &no_heap_number, Label::kNear); + __ TruncateHeapNumberToI(input_reg, input_reg); + __ jmp(done); + + __ bind(&no_heap_number); + // Check for Oddballs. Undefined/False is converted to zero and True to one + // for truncating conversions. __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex); - DeoptimizeIf(not_equal, instr->environment()); + __ j(not_equal, &check_bools, Label::kNear); __ Set(input_reg, 0); __ jmp(done); - __ bind(&heap_number); - __ TruncateHeapNumberToI(input_reg, input_reg); + __ bind(&check_bools); + __ CompareRoot(input_reg, Heap::kTrueValueRootIndex); + __ j(not_equal, &check_false, Label::kNear); + __ Set(input_reg, 1); + __ jmp(done); + + __ bind(&check_false); + __ CompareRoot(input_reg, Heap::kFalseValueRootIndex); + __ RecordComment("Deferred TaggedToI: cannot truncate"); + DeoptimizeIf(not_equal, instr->environment()); + __ Set(input_reg, 0); + __ jmp(done); } else { Label bailout; XMMRegister xmm_temp = ToDoubleRegister(instr->temp()); @@ -4721,12 +4699,16 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) { LOperand* input = instr->value(); ASSERT(input->IsRegister()); ASSERT(input->Equals(instr->result())); - Register input_reg = ToRegister(input); - DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr); - __ JumpIfNotSmi(input_reg, deferred->entry()); - __ SmiToInteger32(input_reg, input_reg); - __ bind(deferred->exit()); + + if (instr->hydrogen()->value()->representation().IsSmi()) { + __ SmiToInteger32(input_reg, input_reg); + } else { + DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr); + __ JumpIfNotSmi(input_reg, deferred->entry()); + __ SmiToInteger32(input_reg, input_reg); + __ bind(deferred->exit()); + } } @@ -4764,7 +4746,8 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) { __ TruncateDoubleToI(result_reg, input_reg); } else { Label bailout, done; - __ DoubleToI(result_reg, input_reg, xmm0, + XMMRegister xmm_scratch = double_scratch0(); + __ DoubleToI(result_reg, input_reg, xmm_scratch, instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear); __ jmp(&done, Label::kNear); @@ -4785,7 +4768,8 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { Register result_reg = ToRegister(result); Label bailout, done; - __ DoubleToI(result_reg, input_reg, xmm0, + XMMRegister xmm_scratch = double_scratch0(); + __ DoubleToI(result_reg, input_reg, xmm_scratch, instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear); __ jmp(&done, Label::kNear); @@ -4862,8 +4846,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { void LCodeGen::DoCheckValue(LCheckValue* instr) { Register reg = ToRegister(instr->value()); - Handle<HeapObject> object = instr->hydrogen()->object(); - __ CmpHeapObject(reg, object); + __ Cmp(reg, instr->hydrogen()->object().handle()); DeoptimizeIf(not_equal, instr->environment()); } @@ -4903,22 +4886,21 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) { ASSERT(input->IsRegister()); Register reg = ToRegister(input); - SmallMapList* map_set = instr->hydrogen()->map_set(); - DeferredCheckMaps* deferred = NULL; if (instr->hydrogen()->has_migration_target()) { deferred = new(zone()) DeferredCheckMaps(this, instr, reg); __ bind(deferred->check_maps()); } + UniqueSet<Map> map_set = instr->hydrogen()->map_set(); Label success; - for (int i = 0; i < map_set->length() - 1; i++) { - Handle<Map> map = map_set->at(i); + for (int i = 0; i < map_set.size() - 1; i++) { + Handle<Map> map = map_set.at(i).handle(); __ CompareMap(reg, map, &success); __ j(equal, &success); } - Handle<Map> map = map_set->last(); + Handle<Map> map = map_set.at(map_set.size() - 1).handle(); __ CompareMap(reg, map, &success); if (instr->hydrogen()->has_migration_target()) { __ j(not_equal, deferred->entry()); @@ -4932,8 +4914,9 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) { void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { XMMRegister value_reg = ToDoubleRegister(instr->unclamped()); + XMMRegister xmm_scratch = double_scratch0(); Register result_reg = ToRegister(instr->result()); - __ ClampDoubleToUint8(value_reg, xmm0, result_reg); + __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg); } @@ -4948,6 +4931,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { ASSERT(instr->unclamped()->Equals(instr->result())); Register input_reg = ToRegister(instr->unclamped()); XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm()); + XMMRegister xmm_scratch = double_scratch0(); Label is_smi, done, heap_number; __ JumpIfSmi(input_reg, &is_smi); @@ -4966,8 +4950,8 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { // Heap number __ bind(&heap_number); - __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); - __ ClampDoubleToUint8(xmm0, temp_xmm_reg, input_reg); + __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset)); + __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg); __ jmp(&done, Label::kNear); // smi @@ -5089,7 +5073,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { // rax = regexp literal clone. int literal_offset = FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index()); - __ LoadHeapObject(rcx, instr->hydrogen()->literals()); + __ Move(rcx, instr->hydrogen()->literals()); __ movq(rbx, FieldOperand(rcx, literal_offset)); __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex); __ j(not_equal, &materialized, Label::kNear); @@ -5160,13 +5144,7 @@ void LCodeGen::DoTypeof(LTypeof* instr) { void LCodeGen::EmitPushTaggedOperand(LOperand* operand) { ASSERT(!operand->IsDoubleRegister()); if (operand->IsConstantOperand()) { - Handle<Object> object = ToHandle(LConstantOperand::cast(operand)); - AllowDeferredHandleDereference smi_check; - if (object->IsSmi()) { - __ Push(Handle<Smi>::cast(object)); - } else { - __ PushHeapObject(Handle<HeapObject>::cast(object)); - } + __ Push(ToHandle(LConstantOperand::cast(operand))); } else if (operand->IsRegister()) { __ push(ToRegister(operand)); } else { @@ -5280,7 +5258,7 @@ void LCodeGen::EmitIsConstructCall(Register temp) { __ Cmp(Operand(temp, StandardFrameConstants::kContextOffset), Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); __ j(not_equal, &check_frame_marker, Label::kNear); - __ movq(temp, Operand(rax, StandardFrameConstants::kCallerFPOffset)); + __ movq(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset)); // Check the marker in the calling frame. __ bind(&check_frame_marker); diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h index f994645019..f3f202a277 100644 --- a/deps/v8/src/x64/lithium-codegen-x64.h +++ b/deps/v8/src/x64/lithium-codegen-x64.h @@ -32,6 +32,7 @@ #include "checks.h" #include "deoptimizer.h" +#include "lithium-codegen.h" #include "safepoint-table.h" #include "scopes.h" #include "v8utils.h" @@ -44,42 +45,25 @@ namespace internal { class LDeferredCode; class SafepointGenerator; -class LCodeGen V8_FINAL BASE_EMBEDDED { +class LCodeGen: public LCodeGenBase { public: LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) - : zone_(info->zone()), - chunk_(static_cast<LPlatformChunk*>(chunk)), - masm_(assembler), - info_(info), - current_block_(-1), - current_instruction_(-1), - instructions_(chunk->instructions()), + : LCodeGenBase(chunk, assembler, info), deoptimizations_(4, info->zone()), jump_table_(4, info->zone()), deoptimization_literals_(8, info->zone()), inlined_function_count_(0), scope_(info->scope()), - status_(UNUSED), translations_(info->zone()), deferred_(8, info->zone()), osr_pc_offset_(-1), - last_lazy_deopt_pc_(0), frame_is_built_(false), safepoints_(info->zone()), resolver_(this), - expected_safepoint_kind_(Safepoint::kSimple), - old_position_(RelocInfo::kNoPosition) { + expected_safepoint_kind_(Safepoint::kSimple) { PopulateDeoptimizationLiteralsWithInlinedFunctions(); } - // Simple accessors. - MacroAssembler* masm() const { return masm_; } - CompilationInfo* info() const { return info_; } - Isolate* isolate() const { return info_->isolate(); } - Factory* factory() const { return isolate()->factory(); } - Heap* heap() const { return isolate()->heap(); } - Zone* zone() const { return zone_; } - int LookupDestination(int block_id) const { return chunk()->LookupDestination(block_id); } @@ -146,18 +130,6 @@ class LCodeGen V8_FINAL BASE_EMBEDDED { #undef DECLARE_DO private: - enum Status { - UNUSED, - GENERATING, - DONE, - ABORTED - }; - - bool is_unused() const { return status_ == UNUSED; } - bool is_generating() const { return status_ == GENERATING; } - bool is_done() const { return status_ == DONE; } - bool is_aborted() const { return status_ == ABORTED; } - StrictModeFlag strict_mode_flag() const { return info()->is_classic_mode() ? kNonStrictMode : kStrictMode; } @@ -166,7 +138,7 @@ class LCodeGen V8_FINAL BASE_EMBEDDED { Scope* scope() const { return scope_; } HGraph* graph() const { return chunk()->graph(); } - int GetNextEmittedBlock() const; + XMMRegister double_scratch0() const { return xmm0; } void EmitClassOfTest(Label* if_true, Label* if_false, @@ -178,14 +150,12 @@ class LCodeGen V8_FINAL BASE_EMBEDDED { int GetStackSlotCount() const { return chunk()->spill_slot_count(); } void Abort(BailoutReason reason); - void FPRINTF_CHECKING Comment(const char* format, ...); void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); } // Code generation passes. Returns true if code generation should // continue. bool GeneratePrologue(); - bool GenerateBody(); bool GenerateDeferredCode(); bool GenerateJumpTable(); bool GenerateSafepointTable(); @@ -211,7 +181,8 @@ class LCodeGen V8_FINAL BASE_EMBEDDED { void CallRuntime(const Runtime::Function* function, int num_arguments, - LInstruction* instr); + LInstruction* instr, + SaveFPRegsMode save_doubles = kDontSaveFPRegs); void CallRuntime(Runtime::FunctionId id, int num_arguments, @@ -284,8 +255,7 @@ class LCodeGen V8_FINAL BASE_EMBEDDED { void RecordSafepointWithRegisters(LPointerMap* pointers, int arguments, Safepoint::DeoptMode mode); - void RecordPosition(int position); - void RecordAndUpdatePosition(int position); + void RecordAndWritePosition(int position) V8_OVERRIDE; static Condition TokenToCondition(Token::Value op, bool is_unsigned); void EmitGoto(int block); @@ -340,7 +310,7 @@ class LCodeGen V8_FINAL BASE_EMBEDDED { int* offset, AllocationSiteMode mode); - void EnsureSpaceForLazyDeopt(int space_needed); + void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE; void DoLoadKeyedExternalArray(LLoadKeyed* instr); void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr); void DoLoadKeyedFixedArray(LLoadKeyed* instr); @@ -355,24 +325,14 @@ class LCodeGen V8_FINAL BASE_EMBEDDED { void MakeSureStackPagesMapped(int offset); #endif - Zone* zone_; - LPlatformChunk* const chunk_; - MacroAssembler* const masm_; - CompilationInfo* const info_; - - int current_block_; - int current_instruction_; - const ZoneList<LInstruction*>* instructions_; ZoneList<LEnvironment*> deoptimizations_; ZoneList<Deoptimizer::JumpTableEntry> jump_table_; ZoneList<Handle<Object> > deoptimization_literals_; int inlined_function_count_; Scope* const scope_; - Status status_; TranslationBuffer translations_; ZoneList<LDeferredCode*> deferred_; int osr_pc_offset_; - int last_lazy_deopt_pc_; bool frame_is_built_; // Builder that keeps track of safepoints in the code. The table @@ -384,8 +344,6 @@ class LCodeGen V8_FINAL BASE_EMBEDDED { Safepoint::Kind expected_safepoint_kind_; - int old_position_; - class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED { public: explicit PushSafepointRegistersScope(LCodeGen* codegen) diff --git a/deps/v8/src/x64/lithium-gap-resolver-x64.cc b/deps/v8/src/x64/lithium-gap-resolver-x64.cc index 71db17c931..8d1c2a2835 100644 --- a/deps/v8/src/x64/lithium-gap-resolver-x64.cc +++ b/deps/v8/src/x64/lithium-gap-resolver-x64.cc @@ -200,7 +200,7 @@ void LGapResolver::EmitMove(int index) { } else if (cgen_->IsInteger32Constant(constant_source)) { __ movl(dst, Immediate(cgen_->ToInteger32(constant_source))); } else { - __ LoadObject(dst, cgen_->ToHandle(constant_source)); + __ Move(dst, cgen_->ToHandle(constant_source)); } } else if (destination->IsDoubleRegister()) { double v = cgen_->ToDouble(constant_source); @@ -222,7 +222,7 @@ void LGapResolver::EmitMove(int index) { // value. __ movq(dst, Immediate(cgen_->ToInteger32(constant_source))); } else { - __ LoadObject(kScratchRegister, cgen_->ToHandle(constant_source)); + __ Move(kScratchRegister, cgen_->ToHandle(constant_source)); __ movq(dst, kScratchRegister); } } @@ -262,7 +262,7 @@ void LGapResolver::EmitSwap(int index) { // Swap two general-purpose registers. Register src = cgen_->ToRegister(source); Register dst = cgen_->ToRegister(destination); - __ xchg(dst, src); + __ xchgq(dst, src); } else if ((source->IsRegister() && destination->IsStackSlot()) || (source->IsStackSlot() && destination->IsRegister())) { diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc index d9daaacca0..6262e7ede3 100644 --- a/deps/v8/src/x64/lithium-x64.cc +++ b/deps/v8/src/x64/lithium-x64.cc @@ -353,19 +353,20 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) { } -int LPlatformChunk::GetNextSpillIndex(bool is_double) { +int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) { return spill_slot_count_++; } -LOperand* LPlatformChunk::GetNextSpillSlot(bool is_double) { +LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) { // All stack slots are Double stack slots on x64. // Alternatively, at some point, start using half-size // stack slots for int32 values. - int index = GetNextSpillIndex(is_double); - if (is_double) { + int index = GetNextSpillIndex(kind); + if (kind == DOUBLE_REGISTERS) { return LDoubleStackSlot::Create(index, zone()); } else { + ASSERT(kind == GENERAL_REGISTERS); return LStackSlot::Create(index, zone()); } } @@ -445,7 +446,7 @@ LPlatformChunk* LChunkBuilder::Build() { // which will be subsumed into this frame. if (graph()->has_osr()) { for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) { - chunk_->GetNextSpillIndex(false); + chunk_->GetNextSpillIndex(GENERAL_REGISTERS); } } @@ -664,7 +665,7 @@ LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr, LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) { ASSERT(!instr->HasPointerMap()); - instr->set_pointer_map(new(zone()) LPointerMap(position_, zone())); + instr->set_pointer_map(new(zone()) LPointerMap(zone())); return instr; } @@ -719,46 +720,39 @@ LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) { LInstruction* LChunkBuilder::DoShift(Token::Value op, HBitwiseBinaryOperation* instr) { - if (instr->representation().IsTagged()) { - ASSERT(instr->left()->representation().IsTagged()); - ASSERT(instr->right()->representation().IsTagged()); + if (instr->representation().IsSmiOrInteger32()) { + ASSERT(instr->left()->representation().Equals(instr->representation())); + ASSERT(instr->right()->representation().Equals(instr->representation())); + LOperand* left = UseRegisterAtStart(instr->left()); - LOperand* left = UseFixed(instr->left(), rdx); - LOperand* right = UseFixed(instr->right(), rax); - LArithmeticT* result = new(zone()) LArithmeticT(op, left, right); - return MarkAsCall(DefineFixed(result, rax), instr); - } + HValue* right_value = instr->right(); + LOperand* right = NULL; + int constant_value = 0; + if (right_value->IsConstant()) { + HConstant* constant = HConstant::cast(right_value); + right = chunk_->DefineConstantOperand(constant); + constant_value = constant->Integer32Value() & 0x1f; + } else { + right = UseFixed(right_value, rcx); + } - ASSERT(instr->representation().IsSmiOrInteger32()); - ASSERT(instr->left()->representation().Equals(instr->representation())); - ASSERT(instr->right()->representation().Equals(instr->representation())); - LOperand* left = UseRegisterAtStart(instr->left()); + // Shift operations can only deoptimize if we do a logical shift by 0 and + // the result cannot be truncated to int32. + bool does_deopt = false; + if (op == Token::SHR && constant_value == 0) { + if (FLAG_opt_safe_uint32_operations) { + does_deopt = !instr->CheckFlag(HInstruction::kUint32); + } else { + does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32); + } + } - HValue* right_value = instr->right(); - LOperand* right = NULL; - int constant_value = 0; - if (right_value->IsConstant()) { - HConstant* constant = HConstant::cast(right_value); - right = chunk_->DefineConstantOperand(constant); - constant_value = constant->Integer32Value() & 0x1f; + LInstruction* result = + DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt)); + return does_deopt ? AssignEnvironment(result) : result; } else { - right = UseFixed(right_value, rcx); - } - - // Shift operations can only deoptimize if we do a logical shift by 0 and - // the result cannot be truncated to int32. - bool does_deopt = false; - if (op == Token::SHR && constant_value == 0) { - if (FLAG_opt_safe_uint32_operations) { - does_deopt = !instr->CheckFlag(HInstruction::kUint32); - } else { - does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32); - } + return DoArithmeticT(op, instr); } - - LInstruction* result = - DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt)); - return does_deopt ? AssignEnvironment(result) : result; } @@ -767,21 +761,22 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op, ASSERT(instr->representation().IsDouble()); ASSERT(instr->left()->representation().IsDouble()); ASSERT(instr->right()->representation().IsDouble()); - ASSERT(op != Token::MOD); - LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); - LOperand* right = UseRegisterAtStart(instr->BetterRightOperand()); - LArithmeticD* result = new(zone()) LArithmeticD(op, left, right); - return DefineSameAsFirst(result); + if (op == Token::MOD) { + LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); + LOperand* right = UseFixedDouble(instr->BetterRightOperand(), xmm1); + LArithmeticD* result = new(zone()) LArithmeticD(op, left, right); + return MarkAsCall(DefineSameAsFirst(result), instr); + } else { + LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); + LOperand* right = UseRegisterAtStart(instr->BetterRightOperand()); + LArithmeticD* result = new(zone()) LArithmeticD(op, left, right); + return DefineSameAsFirst(result); + } } LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op, - HArithmeticBinaryOperation* instr) { - ASSERT(op == Token::ADD || - op == Token::DIV || - op == Token::MOD || - op == Token::MUL || - op == Token::SUB); + HBinaryOperation* instr) { HValue* left = instr->left(); HValue* right = instr->right(); ASSERT(left->representation().IsTagged()); @@ -864,10 +859,31 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) { void LChunkBuilder::VisitInstruction(HInstruction* current) { HInstruction* old_current = current_instruction_; current_instruction_ = current; - if (current->has_position()) position_ = current->position(); - LInstruction* instr = current->CompileToLithium(this); + + LInstruction* instr = NULL; + if (current->CanReplaceWithDummyUses()) { + HValue* first_operand = current->OperandCount() == 0 + ? graph()->GetConstant1() + : current->OperandAt(0); + instr = DefineAsRegister(new(zone()) LDummyUse(UseAny(first_operand))); + for (int i = 1; i < current->OperandCount(); ++i) { + LInstruction* dummy = + new(zone()) LDummyUse(UseAny(current->OperandAt(i))); + dummy->set_hydrogen_value(current); + chunk_->AddInstruction(dummy, current_block_); + } + } else { + instr = current->CompileToLithium(this); + } + + argument_count_ += current->argument_delta(); + ASSERT(argument_count_ >= 0); if (instr != NULL) { + // Associate the hydrogen instruction first, since we may need it for + // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below. + instr->set_hydrogen_value(current); + #if DEBUG // Make sure that the lithium instruction has either no fixed register // constraints in temps or the result OR no uses that are only used at @@ -897,14 +913,12 @@ void LChunkBuilder::VisitInstruction(HInstruction* current) { } #endif - instr->set_position(position_); if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) { instr = AssignPointerMap(instr); } if (FLAG_stress_environments && !instr->HasEnvironment()) { instr = AssignEnvironment(instr); } - instr->set_hydrogen_value(current); chunk_->AddInstruction(instr, current_block_); } current_instruction_ = old_current; @@ -996,7 +1010,7 @@ LEnvironment* LChunkBuilder::CreateEnvironment( LInstruction* LChunkBuilder::DoGoto(HGoto* instr) { - return new(zone()) LGoto(instr->FirstSuccessor()->block_id()); + return new(zone()) LGoto(instr->FirstSuccessor()); } @@ -1006,16 +1020,10 @@ LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) { LInstruction* LChunkBuilder::DoBranch(HBranch* instr) { - HValue* value = instr->value(); - if (value->EmitAtUses()) { - ASSERT(value->IsConstant()); - ASSERT(!value->representation().IsDouble()); - HBasicBlock* successor = HConstant::cast(value)->BooleanValue() - ? instr->FirstSuccessor() - : instr->SecondSuccessor(); - return new(zone()) LGoto(successor->block_id()); - } + LInstruction* goto_instr = CheckElideControlInstruction(instr); + if (goto_instr != NULL) return goto_instr; + HValue* value = instr->value(); LBranch* result = new(zone()) LBranch(UseRegister(value)); // Tagged values that are not known smis or booleans require a // deoptimization environment. If the instruction is generic no @@ -1067,12 +1075,6 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal( } -LInstruction* LChunkBuilder::DoInstanceSize(HInstanceSize* instr) { - LOperand* object = UseRegisterAtStart(instr->object()); - return DefineAsRegister(new(zone()) LInstanceSize(object)); -} - - LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) { LOperand* receiver = UseRegister(instr->receiver()); LOperand* function = UseRegisterAtStart(instr->function()); @@ -1095,7 +1097,6 @@ LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) { LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) { - ++argument_count_; LOperand* argument = UseOrConstant(instr->argument()); return new(zone()) LPushArgument(argument); } @@ -1161,14 +1162,12 @@ LInstruction* LChunkBuilder::DoGlobalReceiver(HGlobalReceiver* instr) { LInstruction* LChunkBuilder::DoCallConstantFunction( HCallConstantFunction* instr) { - argument_count_ -= instr->argument_count(); return MarkAsCall(DefineFixed(new(zone()) LCallConstantFunction, rax), instr); } LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) { LOperand* function = UseFixed(instr->function(), rdi); - argument_count_ -= instr->argument_count(); LInvokeFunction* result = new(zone()) LInvokeFunction(function); return MarkAsCall(DefineFixed(result, rax), instr, CANNOT_DEOPTIMIZE_EAGERLY); } @@ -1215,9 +1214,11 @@ LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) { LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) { - LOperand* input = UseFixedDouble(instr->value(), xmm1); + ASSERT(instr->representation().IsDouble()); + ASSERT(instr->value()->representation().IsDouble()); + LOperand* input = UseRegisterAtStart(instr->value()); LMathLog* result = new(zone()) LMathLog(input); - return MarkAsCall(DefineFixedDouble(result, xmm1), instr); + return DefineSameAsFirst(result); } @@ -1270,33 +1271,28 @@ LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) { LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) { ASSERT(instr->key()->representation().IsTagged()); LOperand* key = UseFixed(instr->key(), rcx); - argument_count_ -= instr->argument_count(); LCallKeyed* result = new(zone()) LCallKeyed(key); return MarkAsCall(DefineFixed(result, rax), instr); } LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) { - argument_count_ -= instr->argument_count(); return MarkAsCall(DefineFixed(new(zone()) LCallNamed, rax), instr); } LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) { - argument_count_ -= instr->argument_count(); return MarkAsCall(DefineFixed(new(zone()) LCallGlobal, rax), instr); } LInstruction* LChunkBuilder::DoCallKnownGlobal(HCallKnownGlobal* instr) { - argument_count_ -= instr->argument_count(); return MarkAsCall(DefineFixed(new(zone()) LCallKnownGlobal, rax), instr); } LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) { LOperand* constructor = UseFixed(instr->constructor(), rdi); - argument_count_ -= instr->argument_count(); LCallNew* result = new(zone()) LCallNew(constructor); return MarkAsCall(DefineFixed(result, rax), instr); } @@ -1304,7 +1300,6 @@ LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) { LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) { LOperand* constructor = UseFixed(instr->constructor(), rdi); - argument_count_ -= instr->argument_count(); LCallNewArray* result = new(zone()) LCallNewArray(constructor); return MarkAsCall(DefineFixed(result, rax), instr); } @@ -1312,14 +1307,12 @@ LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) { LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) { LOperand* function = UseFixed(instr->function(), rdi); - argument_count_ -= instr->argument_count(); LCallFunction* result = new(zone()) LCallFunction(function); return MarkAsCall(DefineFixed(result, rax), instr); } LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) { - argument_count_ -= instr->argument_count(); return MarkAsCall(DefineFixed(new(zone()) LCallRuntime, rax), instr); } @@ -1348,27 +1341,19 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) { if (instr->representation().IsSmiOrInteger32()) { ASSERT(instr->left()->representation().Equals(instr->representation())); ASSERT(instr->right()->representation().Equals(instr->representation())); + ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32)); LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand()); LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand()); return DefineSameAsFirst(new(zone()) LBitI(left, right)); } else { - ASSERT(instr->representation().IsTagged()); - ASSERT(instr->left()->representation().IsTagged()); - ASSERT(instr->right()->representation().IsTagged()); - - LOperand* left = UseFixed(instr->left(), rdx); - LOperand* right = UseFixed(instr->right(), rax); - LArithmeticT* result = new(zone()) LArithmeticT(instr->op(), left, right); - return MarkAsCall(DefineFixed(result, rax), instr); + return DoArithmeticT(instr->op(), instr); } } LInstruction* LChunkBuilder::DoDiv(HDiv* instr) { - if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::DIV, instr); - } else if (instr->representation().IsSmiOrInteger32()) { + if (instr->representation().IsSmiOrInteger32()) { ASSERT(instr->left()->representation().Equals(instr->representation())); ASSERT(instr->right()->representation().Equals(instr->representation())); if (instr->HasPowerOf2Divisor()) { @@ -1385,8 +1370,9 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) { LOperand* divisor = UseRegister(instr->right()); LDivI* result = new(zone()) LDivI(dividend, divisor, temp); return AssignEnvironment(DefineFixed(result, rax)); + } else if (instr->representation().IsDouble()) { + return DoArithmeticD(Token::DIV, instr); } else { - ASSERT(instr->representation().IsTagged()); return DoArithmeticT(Token::DIV, instr); } } @@ -1485,17 +1471,10 @@ LInstruction* LChunkBuilder::DoMod(HMod* instr) { ? AssignEnvironment(result) : result; } - } else if (instr->representation().IsTagged()) { - return DoArithmeticT(Token::MOD, instr); + } else if (instr->representation().IsDouble()) { + return DoArithmeticD(Token::MOD, instr); } else { - ASSERT(instr->representation().IsDouble()); - // We call a C function for double modulo. It can't trigger a GC. We need to - // use fixed result register for the call. - // TODO(fschneider): Allow any register as input registers. - LArithmeticD* mod = new(zone()) LArithmeticD(Token::MOD, - UseFixedDouble(left, xmm2), - UseFixedDouble(right, xmm1)); - return MarkAsCall(DefineFixedDouble(mod, xmm1), instr); + return DoArithmeticT(Token::MOD, instr); } } @@ -1515,7 +1494,6 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) { } else if (instr->representation().IsDouble()) { return DoArithmeticD(Token::MUL, instr); } else { - ASSERT(instr->representation().IsTagged()); return DoArithmeticT(Token::MUL, instr); } } @@ -1536,7 +1514,6 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) { } else if (instr->representation().IsDouble()) { return DoArithmeticD(Token::SUB, instr); } else { - ASSERT(instr->representation().IsTagged()); return DoArithmeticT(Token::SUB, instr); } } @@ -1568,7 +1545,6 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) { } else if (instr->representation().IsDouble()) { return DoArithmeticD(Token::ADD, instr); } else { - ASSERT(instr->representation().IsTagged()); return DoArithmeticT(Token::ADD, instr); } return NULL; @@ -1662,6 +1638,8 @@ LInstruction* LChunkBuilder::DoCompareNumericAndBranch( LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch( HCompareObjectEqAndBranch* instr) { + LInstruction* goto_instr = CheckElideControlInstruction(instr); + if (goto_instr != NULL) return goto_instr; LOperand* left = UseRegisterAtStart(instr->left()); LOperand* right = UseRegisterOrConstantAtStart(instr->right()); return new(zone()) LCmpObjectEqAndBranch(left, right); @@ -1670,8 +1648,8 @@ LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch( LInstruction* LChunkBuilder::DoCompareHoleAndBranch( HCompareHoleAndBranch* instr) { - LOperand* object = UseRegisterAtStart(instr->object()); - return new(zone()) LCmpHoleAndBranch(object); + LOperand* value = UseRegisterAtStart(instr->value()); + return new(zone()) LCmpHoleAndBranch(value); } @@ -1803,6 +1781,13 @@ LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation( } +LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) { + // The control instruction marking the end of a block that completed + // abruptly (e.g., threw an exception). There is nothing specific to do. + return NULL; +} + + LInstruction* LChunkBuilder::DoThrow(HThrow* instr) { LOperand* value = UseFixed(instr->value(), rax); return MarkAsCall(new(zone()) LThrow(value), instr); @@ -1837,7 +1822,6 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { // building a stack frame. if (from.IsTagged()) { if (to.IsDouble()) { - info()->MarkAsDeferredCalling(); LOperand* value = UseRegister(instr->value()); LNumberUntagD* res = new(zone()) LNumberUntagD(value); return AssignEnvironment(DefineAsRegister(res)); @@ -1899,10 +1883,18 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { } else if (to.IsSmi()) { HValue* val = instr->value(); LOperand* value = UseRegister(val); - LInstruction* result = - DefineAsRegister(new(zone()) LInteger32ToSmi(value)); - if (val->HasRange() && val->range()->IsInSmiRange()) { - return result; + LInstruction* result = NULL; + if (val->CheckFlag(HInstruction::kUint32)) { + result = DefineAsRegister(new(zone()) LUint32ToSmi(value)); + if (val->HasRange() && val->range()->IsInSmiRange() && + val->range()->upper() != kMaxInt) { + return result; + } + } else { + result = DefineAsRegister(new(zone()) LInteger32ToSmi(value)); + if (val->HasRange() && val->range()->IsInSmiRange()) { + return result; + } } return AssignEnvironment(result); } else { @@ -1934,12 +1926,6 @@ LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) { } -LInstruction* LChunkBuilder::DoIsNumberAndBranch(HIsNumberAndBranch* instr) { - return new(zone()) LIsNumberAndBranch( - UseRegisterOrConstantAtStart(instr->value())); -} - - LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) { LOperand* value = UseRegisterAtStart(instr->value()); LCheckInstanceType* result = new(zone()) LCheckInstanceType(value); @@ -2075,7 +2061,14 @@ LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) { LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) { - if (instr->access().IsExternalMemory() && instr->access().offset() == 0) { + // Use the special mov rax, moffs64 encoding for external + // memory accesses with 64-bit word-sized values. + if (instr->access().IsExternalMemory() && + instr->access().offset() == 0 && + (instr->access().representation().IsSmi() || + instr->access().representation().IsTagged() || + instr->access().representation().IsHeapObject() || + instr->access().representation().IsExternal())) { LOperand* obj = UseRegisterOrConstantAtStart(instr->object()); return DefineFixed(new(zone()) LLoadNamedField(obj), rax); } @@ -2098,6 +2091,11 @@ LInstruction* LChunkBuilder::DoLoadFunctionPrototype( } +LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) { + return DefineAsRegister(new(zone()) LLoadRoot); +} + + LInstruction* LChunkBuilder::DoLoadExternalArrayPointer( HLoadExternalArrayPointer* instr) { LOperand* input = UseRegisterAtStart(instr->value()); @@ -2391,7 +2389,6 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) { LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) { - argument_count_ -= instr->argument_count(); return MarkAsCall(DefineFixed(new(zone()) LCallStub, rax), instr); } @@ -2513,7 +2510,7 @@ LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) { if (env->entry()->arguments_pushed()) { int argument_count = env->arguments_environment()->parameter_count(); pop = new(zone()) LDrop(argument_count); - argument_count_ -= argument_count; + ASSERT(instr->argument_delta() == -argument_count); } HEnvironment* outer = current_block_->last_environment()-> diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h index b3d08c8a4c..06cb171923 100644 --- a/deps/v8/src/x64/lithium-x64.h +++ b/deps/v8/src/x64/lithium-x64.h @@ -105,7 +105,6 @@ class LCodeGen; V(InnerAllocatedObject) \ V(InstanceOf) \ V(InstanceOfKnownGlobal) \ - V(InstanceSize) \ V(InstructionGap) \ V(Integer32ToDouble) \ V(Integer32ToSmi) \ @@ -114,12 +113,12 @@ class LCodeGen; V(IsObjectAndBranch) \ V(IsStringAndBranch) \ V(IsSmiAndBranch) \ - V(IsNumberAndBranch) \ V(IsUndetectableAndBranch) \ V(Label) \ V(LazyBailout) \ V(LoadContextSlot) \ V(LoadExternalArrayPointer) \ + V(LoadRoot) \ V(LoadFieldByIndex) \ V(LoadFunctionPrototype) \ V(LoadGlobalCell) \ @@ -182,6 +181,7 @@ class LCodeGen; V(Typeof) \ V(TypeofIsAndBranch) \ V(Uint32ToDouble) \ + V(Uint32ToSmi) \ V(UnknownOSRValue) \ V(ValueOf) \ V(WrapReceiver) @@ -213,7 +213,6 @@ class LInstruction : public ZoneObject { : environment_(NULL), hydrogen_value_(NULL), bit_field_(IsCallBits::encode(false)) { - set_position(RelocInfo::kNoPosition); } virtual ~LInstruction() {} @@ -254,15 +253,6 @@ class LInstruction : public ZoneObject { LPointerMap* pointer_map() const { return pointer_map_.get(); } bool HasPointerMap() const { return pointer_map_.is_set(); } - // The 31 bits PositionBits is used to store the int position value. And the - // position value may be RelocInfo::kNoPosition (-1). The accessor always - // +1/-1 so that the encoded value of position in bit_field_ is always >= 0 - // and can fit into the 31 bits PositionBits. - void set_position(int pos) { - bit_field_ = PositionBits::update(bit_field_, pos + 1); - } - int position() { return PositionBits::decode(bit_field_) - 1; } - void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; } HValue* hydrogen_value() const { return hydrogen_value_; } @@ -272,7 +262,7 @@ class LInstruction : public ZoneObject { // Interface to the register allocator and iterators. bool ClobbersTemps() const { return IsCall(); } bool ClobbersRegisters() const { return IsCall(); } - bool ClobbersDoubleRegisters() const { return IsCall(); } + virtual bool ClobbersDoubleRegisters() const { return IsCall(); } virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { } @@ -302,7 +292,6 @@ class LInstruction : public ZoneObject { virtual LOperand* TempAt(int i) = 0; class IsCallBits: public BitField<bool, 0, 1> {}; - class PositionBits: public BitField<int, 1, 31> {}; LEnvironment* environment_; SetOncePointer<LPointerMap> pointer_map_; @@ -403,17 +392,17 @@ class LInstructionGap V8_FINAL : public LGap { class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> { public: - explicit LGoto(int block_id) : block_id_(block_id) { } + explicit LGoto(HBasicBlock* block) : block_(block) { } virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE; DECLARE_CONCRETE_INSTRUCTION(Goto, "goto") virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; virtual bool IsControl() const V8_OVERRIDE { return true; } - int block_id() const { return block_id_; } + int block_id() const { return block_->block_id(); } private: - int block_id_; + HBasicBlock* block_; }; @@ -883,19 +872,6 @@ class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 0> { }; -class LIsNumberAndBranch V8_FINAL : public LControlInstruction<1, 0> { - public: - explicit LIsNumberAndBranch(LOperand* value) { - inputs_[0] = value; - } - - LOperand* value() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(IsNumberAndBranch, "is-number-and-branch") - DECLARE_HYDROGEN_ACCESSOR(IsNumberAndBranch) -}; - - class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> { public: explicit LIsStringAndBranch(LOperand* value, LOperand* temp) { @@ -1091,19 +1067,6 @@ class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 1, 1> { }; -class LInstanceSize V8_FINAL : public LTemplateInstruction<1, 1, 0> { - public: - explicit LInstanceSize(LOperand* object) { - inputs_[0] = object; - } - - LOperand* object() { return inputs_[0]; } - - DECLARE_CONCRETE_INSTRUCTION(InstanceSize, "instance-size") - DECLARE_HYDROGEN_ACCESSOR(InstanceSize) -}; - - class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> { public: LBoundsCheck(LOperand* index, LOperand* length) { @@ -1259,7 +1222,7 @@ class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 0> { DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch") DECLARE_HYDROGEN_ACCESSOR(CompareMap) - Handle<Map> map() const { return hydrogen()->map(); } + Handle<Map> map() const { return hydrogen()->map().handle(); } }; @@ -1310,8 +1273,8 @@ class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 0> { LOperand* date() { return inputs_[0]; } Smi* index() const { return index_; } - DECLARE_CONCRETE_INSTRUCTION(ValueOf, "date-field") - DECLARE_HYDROGEN_ACCESSOR(ValueOf) + DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field") + DECLARE_HYDROGEN_ACCESSOR(DateField) private: Smi* index_; @@ -1535,6 +1498,15 @@ class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 0> { }; +class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> { + public: + DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root") + DECLARE_HYDROGEN_ACCESSOR(LoadRoot) + + Heap::RootListIndex index() const { return hydrogen()->index(); } +}; + + class LLoadExternalArrayPointer V8_FINAL : public LTemplateInstruction<1, 1, 0> { public: @@ -1923,8 +1895,13 @@ class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 0, 0> { DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime") DECLARE_HYDROGEN_ACCESSOR(CallRuntime) + virtual bool ClobbersDoubleRegisters() const V8_OVERRIDE { + return save_doubles() == kDontSaveFPRegs; + } + const Runtime::Function* function() const { return hydrogen()->function(); } int arity() const { return hydrogen()->argument_count(); } + SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); } }; @@ -1967,6 +1944,19 @@ class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 1> { }; +class LUint32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> { + public: + explicit LUint32ToSmi(LOperand* value) { + inputs_[0] = value; + } + + LOperand* value() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(Uint32ToSmi, "uint32-to-smi") + DECLARE_HYDROGEN_ACCESSOR(Change) +}; + + class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 0> { public: explicit LNumberTagI(LOperand* value) { @@ -2049,7 +2039,7 @@ class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 1> { LOperand* temp() { return temps_[0]; } DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i") - DECLARE_HYDROGEN_ACCESSOR(UnaryOperation) + DECLARE_HYDROGEN_ACCESSOR(Change) bool truncating() { return hydrogen()->CanTruncateToInt32(); } }; @@ -2205,8 +2195,10 @@ class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 1, 2> { virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE; - Handle<Map> original_map() { return hydrogen()->original_map(); } - Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); } + Handle<Map> original_map() { return hydrogen()->original_map().handle(); } + Handle<Map> transitioned_map() { + return hydrogen()->transitioned_map().handle(); + } ElementsKind from_kind() { return hydrogen()->from_kind(); } ElementsKind to_kind() { return hydrogen()->to_kind(); } }; @@ -2544,8 +2536,8 @@ class LPlatformChunk V8_FINAL : public LChunk { LPlatformChunk(CompilationInfo* info, HGraph* graph) : LChunk(info, graph) { } - int GetNextSpillIndex(bool is_double); - LOperand* GetNextSpillSlot(bool is_double); + int GetNextSpillIndex(RegisterKind kind); + LOperand* GetNextSpillSlot(RegisterKind kind); }; @@ -2562,13 +2554,14 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED { next_block_(NULL), argument_count_(0), allocator_(allocator), - position_(RelocInfo::kNoPosition), instruction_pending_deoptimization_environment_(NULL), pending_deoptimization_ast_id_(BailoutId::None()) { } // Build the sequence for the graph. LPlatformChunk* Build(); + LInstruction* CheckElideControlInstruction(HControlInstruction* instr); + // Declare methods that deal with the individual node types. #define DECLARE_DO(type) LInstruction* Do##type(H##type* node); HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) @@ -2701,7 +2694,7 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED { LInstruction* DoArithmeticD(Token::Value op, HArithmeticBinaryOperation* instr); LInstruction* DoArithmeticT(Token::Value op, - HArithmeticBinaryOperation* instr); + HBinaryOperation* instr); LPlatformChunk* chunk_; CompilationInfo* info_; @@ -2713,7 +2706,6 @@ class LChunkBuilder V8_FINAL BASE_EMBEDDED { HBasicBlock* next_block_; int argument_count_; LAllocator* allocator_; - int position_; LInstruction* instruction_pending_deoptimization_environment_; BailoutId pending_deoptimization_ast_id_; diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc index 69abc5454f..a18ff0d274 100644 --- a/deps/v8/src/x64/macro-assembler-x64.cc +++ b/deps/v8/src/x64/macro-assembler-x64.cc @@ -37,6 +37,7 @@ #include "serialize.h" #include "debug.h" #include "heap.h" +#include "isolate-inl.h" namespace v8 { namespace internal { @@ -605,22 +606,9 @@ void MacroAssembler::IndexFromHash(Register hash, Register index) { } -void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) { - CallRuntime(Runtime::FunctionForId(id), num_arguments); -} - - -void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) { - const Runtime::Function* function = Runtime::FunctionForId(id); - Set(rax, function->nargs); - LoadAddress(rbx, ExternalReference(function, isolate())); - CEntryStub ces(1, kSaveFPRegs); - CallStub(&ces); -} - - void MacroAssembler::CallRuntime(const Runtime::Function* f, - int num_arguments) { + int num_arguments, + SaveFPRegsMode save_doubles) { // If the expected number of arguments of the runtime function is // constant, we check that the actual number of arguments match the // expectation. @@ -635,7 +623,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, // smarter. Set(rax, num_arguments); LoadAddress(rbx, ExternalReference(f, isolate())); - CEntryStub ces(f->result_size); + CEntryStub ces(f->result_size, save_doubles); CallStub(&ces); } @@ -691,13 +679,16 @@ void MacroAssembler::PrepareCallApiFunction(int arg_stack_space) { } -void MacroAssembler::CallApiFunctionAndReturn(Address function_address, - Address thunk_address, - Register thunk_last_arg, - int stack_space, - int return_value_offset) { +void MacroAssembler::CallApiFunctionAndReturn( + Address function_address, + Address thunk_address, + Register thunk_last_arg, + int stack_space, + Operand return_value_operand, + Operand* context_restore_operand) { Label prologue; Label promote_scheduled_exception; + Label exception_handled; Label delete_allocated_handles; Label leave_exit_frame; Label write_back; @@ -750,7 +741,7 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address, bind(&profiler_disabled); // Call the api function! - movq(rax, reinterpret_cast<int64_t>(function_address), + movq(rax, reinterpret_cast<Address>(function_address), RelocInfo::EXTERNAL_REFERENCE); bind(&end_profiler_check); @@ -768,7 +759,7 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address, } // Load the value from ReturnValue - movq(rax, Operand(rbp, return_value_offset * kPointerSize)); + movq(rax, return_value_operand); bind(&prologue); // No more valid handles (the result handle was the last one). Restore @@ -783,6 +774,7 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address, movq(rsi, scheduled_exception_address); Cmp(Operand(rsi, 0), factory->the_hole_value()); j(not_equal, &promote_scheduled_exception); + bind(&exception_handled); #if ENABLE_EXTRA_CHECKS // Check if the function returned a valid JavaScript value. @@ -819,11 +811,19 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address, bind(&ok); #endif - LeaveApiExitFrame(); + bool restore_context = context_restore_operand != NULL; + if (restore_context) { + movq(rsi, *context_restore_operand); + } + LeaveApiExitFrame(!restore_context); ret(stack_space * kPointerSize); bind(&promote_scheduled_exception); - TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1); + { + FrameScope frame(this, StackFrame::INTERNAL); + CallRuntime(Runtime::kPromoteScheduledException, 0); + } + jmp(&exception_handled); // HandleScope limit has changed. Delete allocated extensions. bind(&delete_allocated_handles); @@ -936,6 +936,42 @@ void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, } +void MacroAssembler::Cvtlsi2sd(XMMRegister dst, Register src) { + xorps(dst, dst); + cvtlsi2sd(dst, src); +} + + +void MacroAssembler::Cvtlsi2sd(XMMRegister dst, const Operand& src) { + xorps(dst, dst); + cvtlsi2sd(dst, src); +} + + +void MacroAssembler::Load(Register dst, const Operand& src, Representation r) { + ASSERT(!r.IsDouble()); + if (r.IsByte()) { + movzxbl(dst, src); + } else if (r.IsInteger32()) { + movl(dst, src); + } else { + movq(dst, src); + } +} + + +void MacroAssembler::Store(const Operand& dst, Register src, Representation r) { + ASSERT(!r.IsDouble()); + if (r.IsByte()) { + movb(dst, src); + } else if (r.IsInteger32()) { + movl(dst, src); + } else { + movq(dst, src); + } +} + + void MacroAssembler::Set(Register dst, int64_t x) { if (x == 0) { xorl(dst, dst); @@ -1423,28 +1459,6 @@ void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1, } -void MacroAssembler::SmiTryAddConstant(Register dst, - Register src, - Smi* constant, - Label* on_not_smi_result, - Label::Distance near_jump) { - // Does not assume that src is a smi. - ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask)); - STATIC_ASSERT(kSmiTag == 0); - ASSERT(!dst.is(kScratchRegister)); - ASSERT(!src.is(kScratchRegister)); - - JumpIfNotSmi(src, on_not_smi_result, near_jump); - Register tmp = (dst.is(src) ? kScratchRegister : dst); - LoadSmiConstant(tmp, constant); - addq(tmp, src); - j(overflow, on_not_smi_result, near_jump); - if (dst.is(src)) { - movq(dst, tmp); - } -} - - void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) { if (constant->value() == 0) { if (!dst.is(src)) { @@ -1513,10 +1527,14 @@ void MacroAssembler::SmiAddConstant(Register dst, } else if (dst.is(src)) { ASSERT(!dst.is(kScratchRegister)); + Label done; LoadSmiConstant(kScratchRegister, constant); - addq(kScratchRegister, src); - j(overflow, on_not_smi_result, near_jump); - movq(dst, kScratchRegister); + addq(dst, kScratchRegister); + j(no_overflow, &done, Label::kNear); + // Restore src. + subq(dst, kScratchRegister); + jmp(on_not_smi_result, near_jump); + bind(&done); } else { LoadSmiConstant(dst, constant); addq(dst, src); @@ -1616,6 +1634,29 @@ void MacroAssembler::SmiNeg(Register dst, } +template<class T> +static void SmiAddHelper(MacroAssembler* masm, + Register dst, + Register src1, + T src2, + Label* on_not_smi_result, + Label::Distance near_jump) { + if (dst.is(src1)) { + Label done; + masm->addq(dst, src2); + masm->j(no_overflow, &done, Label::kNear); + // Restore src1. + masm->subq(dst, src2); + masm->jmp(on_not_smi_result, near_jump); + masm->bind(&done); + } else { + masm->movq(dst, src1); + masm->addq(dst, src2); + masm->j(overflow, on_not_smi_result, near_jump); + } +} + + void MacroAssembler::SmiAdd(Register dst, Register src1, Register src2, @@ -1623,16 +1664,7 @@ void MacroAssembler::SmiAdd(Register dst, Label::Distance near_jump) { ASSERT_NOT_NULL(on_not_smi_result); ASSERT(!dst.is(src2)); - if (dst.is(src1)) { - movq(kScratchRegister, src1); - addq(kScratchRegister, src2); - j(overflow, on_not_smi_result, near_jump); - movq(dst, kScratchRegister); - } else { - movq(dst, src1); - addq(dst, src2); - j(overflow, on_not_smi_result, near_jump); - } + SmiAddHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump); } @@ -1642,17 +1674,8 @@ void MacroAssembler::SmiAdd(Register dst, Label* on_not_smi_result, Label::Distance near_jump) { ASSERT_NOT_NULL(on_not_smi_result); - if (dst.is(src1)) { - movq(kScratchRegister, src1); - addq(kScratchRegister, src2); - j(overflow, on_not_smi_result, near_jump); - movq(dst, kScratchRegister); - } else { - ASSERT(!src2.AddressUsesRegister(dst)); - movq(dst, src1); - addq(dst, src2); - j(overflow, on_not_smi_result, near_jump); - } + ASSERT(!src2.AddressUsesRegister(dst)); + SmiAddHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump); } @@ -1675,34 +1698,37 @@ void MacroAssembler::SmiAdd(Register dst, } -void MacroAssembler::SmiSub(Register dst, - Register src1, - Register src2, - Label* on_not_smi_result, - Label::Distance near_jump) { - ASSERT_NOT_NULL(on_not_smi_result); - ASSERT(!dst.is(src2)); +template<class T> +static void SmiSubHelper(MacroAssembler* masm, + Register dst, + Register src1, + T src2, + Label* on_not_smi_result, + Label::Distance near_jump) { if (dst.is(src1)) { - cmpq(dst, src2); - j(overflow, on_not_smi_result, near_jump); - subq(dst, src2); + Label done; + masm->subq(dst, src2); + masm->j(no_overflow, &done, Label::kNear); + // Restore src1. + masm->addq(dst, src2); + masm->jmp(on_not_smi_result, near_jump); + masm->bind(&done); } else { - movq(dst, src1); - subq(dst, src2); - j(overflow, on_not_smi_result, near_jump); + masm->movq(dst, src1); + masm->subq(dst, src2); + masm->j(overflow, on_not_smi_result, near_jump); } } -void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) { - // No overflow checking. Use only when it's known that - // overflowing is impossible (e.g., subtracting two positive smis). +void MacroAssembler::SmiSub(Register dst, + Register src1, + Register src2, + Label* on_not_smi_result, + Label::Distance near_jump) { + ASSERT_NOT_NULL(on_not_smi_result); ASSERT(!dst.is(src2)); - if (!dst.is(src1)) { - movq(dst, src1); - } - subq(dst, src2); - Assert(no_overflow, kSmiSubtractionOverflow); + SmiSubHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump); } @@ -1712,29 +1738,36 @@ void MacroAssembler::SmiSub(Register dst, Label* on_not_smi_result, Label::Distance near_jump) { ASSERT_NOT_NULL(on_not_smi_result); - if (dst.is(src1)) { - movq(kScratchRegister, src2); - cmpq(src1, kScratchRegister); - j(overflow, on_not_smi_result, near_jump); - subq(src1, kScratchRegister); - } else { - movq(dst, src1); - subq(dst, src2); - j(overflow, on_not_smi_result, near_jump); - } + ASSERT(!src2.AddressUsesRegister(dst)); + SmiSubHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump); } -void MacroAssembler::SmiSub(Register dst, - Register src1, - const Operand& src2) { +template<class T> +static void SmiSubNoOverflowHelper(MacroAssembler* masm, + Register dst, + Register src1, + T src2) { // No overflow checking. Use only when it's known that // overflowing is impossible (e.g., subtracting two positive smis). if (!dst.is(src1)) { - movq(dst, src1); + masm->movq(dst, src1); } - subq(dst, src2); - Assert(no_overflow, kSmiSubtractionOverflow); + masm->subq(dst, src2); + masm->Assert(no_overflow, kSmiSubtractionOverflow); +} + + +void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) { + ASSERT(!dst.is(src2)); + SmiSubNoOverflowHelper<Register>(this, dst, src1, src2); +} + + +void MacroAssembler::SmiSub(Register dst, + Register src1, + const Operand& src2) { + SmiSubNoOverflowHelper<Operand>(this, dst, src1, src2); } @@ -2240,6 +2273,90 @@ void MacroAssembler::Test(const Operand& src, Smi* source) { // ---------------------------------------------------------------------------- +void MacroAssembler::LookupNumberStringCache(Register object, + Register result, + Register scratch1, + Register scratch2, + Label* not_found) { + // Use of registers. Register result is used as a temporary. + Register number_string_cache = result; + Register mask = scratch1; + Register scratch = scratch2; + + // Load the number string cache. + LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); + + // Make the hash mask from the length of the number string cache. It + // contains two elements (number and string) for each cache entry. + SmiToInteger32( + mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset)); + shrl(mask, Immediate(1)); + subq(mask, Immediate(1)); // Make mask. + + // Calculate the entry in the number string cache. The hash value in the + // number string cache for smis is just the smi value, and the hash for + // doubles is the xor of the upper and lower words. See + // Heap::GetNumberStringCache. + Label is_smi; + Label load_result_from_cache; + JumpIfSmi(object, &is_smi); + CheckMap(object, + isolate()->factory()->heap_number_map(), + not_found, + DONT_DO_SMI_CHECK); + + STATIC_ASSERT(8 == kDoubleSize); + movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4)); + xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset)); + and_(scratch, mask); + // Each entry in string cache consists of two pointer sized fields, + // but times_twice_pointer_size (multiplication by 16) scale factor + // is not supported by addrmode on x64 platform. + // So we have to premultiply entry index before lookup. + shl(scratch, Immediate(kPointerSizeLog2 + 1)); + + Register index = scratch; + Register probe = mask; + movq(probe, + FieldOperand(number_string_cache, + index, + times_1, + FixedArray::kHeaderSize)); + JumpIfSmi(probe, not_found); + movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset)); + ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset)); + j(parity_even, not_found); // Bail out if NaN is involved. + j(not_equal, not_found); // The cache did not contain this value. + jmp(&load_result_from_cache); + + bind(&is_smi); + SmiToInteger32(scratch, object); + and_(scratch, mask); + // Each entry in string cache consists of two pointer sized fields, + // but times_twice_pointer_size (multiplication by 16) scale factor + // is not supported by addrmode on x64 platform. + // So we have to premultiply entry index before lookup. + shl(scratch, Immediate(kPointerSizeLog2 + 1)); + + // Check if the entry is the smi we are looking for. + cmpq(object, + FieldOperand(number_string_cache, + index, + times_1, + FixedArray::kHeaderSize)); + j(not_equal, not_found); + + // Get the result from the cache. + bind(&load_result_from_cache); + movq(result, + FieldOperand(number_string_cache, + index, + times_1, + FixedArray::kHeaderSize + kPointerSize)); + IncrementCounter(isolate()->counters()->number_to_string_native(), 1); +} + + void MacroAssembler::JumpIfNotString(Register object, Register object_map, Label* not_string, @@ -2376,8 +2493,7 @@ void MacroAssembler::Move(Register dst, Handle<Object> source) { if (source->IsSmi()) { Move(dst, Smi::cast(*source)); } else { - ASSERT(source->IsHeapObject()); - movq(dst, source, RelocInfo::EMBEDDED_OBJECT); + MoveHeapObject(dst, source); } } @@ -2387,8 +2503,7 @@ void MacroAssembler::Move(const Operand& dst, Handle<Object> source) { if (source->IsSmi()) { Move(dst, Smi::cast(*source)); } else { - ASSERT(source->IsHeapObject()); - movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT); + MoveHeapObject(kScratchRegister, source); movq(dst, kScratchRegister); } } @@ -2399,8 +2514,7 @@ void MacroAssembler::Cmp(Register dst, Handle<Object> source) { if (source->IsSmi()) { Cmp(dst, Smi::cast(*source)); } else { - ASSERT(source->IsHeapObject()); - movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT); + MoveHeapObject(kScratchRegister, source); cmpq(dst, kScratchRegister); } } @@ -2411,8 +2525,7 @@ void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) { if (source->IsSmi()) { Cmp(dst, Smi::cast(*source)); } else { - ASSERT(source->IsHeapObject()); - movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT); + MoveHeapObject(kScratchRegister, source); cmpq(dst, kScratchRegister); } } @@ -2423,47 +2536,22 @@ void MacroAssembler::Push(Handle<Object> source) { if (source->IsSmi()) { Push(Smi::cast(*source)); } else { - ASSERT(source->IsHeapObject()); - movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT); + MoveHeapObject(kScratchRegister, source); push(kScratchRegister); } } -void MacroAssembler::LoadHeapObject(Register result, - Handle<HeapObject> object) { +void MacroAssembler::MoveHeapObject(Register result, + Handle<Object> object) { AllowDeferredHandleDereference using_raw_address; + ASSERT(object->IsHeapObject()); if (isolate()->heap()->InNewSpace(*object)) { Handle<Cell> cell = isolate()->factory()->NewCell(object); movq(result, cell, RelocInfo::CELL); movq(result, Operand(result, 0)); } else { - Move(result, object); - } -} - - -void MacroAssembler::CmpHeapObject(Register reg, Handle<HeapObject> object) { - AllowDeferredHandleDereference using_raw_address; - if (isolate()->heap()->InNewSpace(*object)) { - Handle<Cell> cell = isolate()->factory()->NewCell(object); - movq(kScratchRegister, cell, RelocInfo::CELL); - cmpq(reg, Operand(kScratchRegister, 0)); - } else { - Cmp(reg, object); - } -} - - -void MacroAssembler::PushHeapObject(Handle<HeapObject> object) { - AllowDeferredHandleDereference using_raw_address; - if (isolate()->heap()->InNewSpace(*object)) { - Handle<Cell> cell = isolate()->factory()->NewCell(object); - movq(kScratchRegister, cell, RelocInfo::CELL); - movq(kScratchRegister, Operand(kScratchRegister, 0)); - push(kScratchRegister); - } else { - Push(object); + movq(result, object, RelocInfo::EMBEDDED_OBJECT); } } @@ -2548,7 +2636,8 @@ void MacroAssembler::Call(Handle<Code> code_object, #ifdef DEBUG int end_position = pc_offset() + CallSize(code_object); #endif - ASSERT(RelocInfo::IsCodeTarget(rmode)); + ASSERT(RelocInfo::IsCodeTarget(rmode) || + rmode == RelocInfo::CODE_AGE_SEQUENCE); call(code_object, rmode, ast_id); #ifdef DEBUG CHECK_EQ(end_position, pc_offset()); @@ -2651,7 +2740,8 @@ Operand MacroAssembler::SafepointRegisterSlot(Register reg) { void MacroAssembler::PushTryHandler(StackHandler::Kind kind, int handler_index) { // Adjust this code if not the case. - STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); + STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize + + kFPOnStackSize); STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); @@ -2710,7 +2800,8 @@ void MacroAssembler::JumpToHandlerEntry() { void MacroAssembler::Throw(Register value) { // Adjust this code if not the case. - STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); + STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize + + kFPOnStackSize); STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); @@ -2750,7 +2841,8 @@ void MacroAssembler::Throw(Register value) { void MacroAssembler::ThrowUncatchable(Register value) { // Adjust this code if not the case. - STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); + STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize + + kFPOnStackSize); STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); @@ -2917,7 +3009,7 @@ void MacroAssembler::StoreNumberToDoubleElements( // Value is a smi. convert to a double and store. // Preserve original value. SmiToInteger32(kScratchRegister, maybe_number); - cvtlsi2sd(xmm_scratch, kScratchRegister); + Cvtlsi2sd(xmm_scratch, kScratchRegister); movsd(FieldOperand(elements, index, times_8, FixedDoubleArray::kHeaderSize - elements_offset), xmm_scratch); @@ -3050,7 +3142,7 @@ void MacroAssembler::DoubleToI(Register result_reg, Label* conversion_failed, Label::Distance dst) { cvttsd2si(result_reg, input_reg); - cvtlsi2sd(xmm0, result_reg); + Cvtlsi2sd(xmm0, result_reg); ucomisd(xmm0, input_reg); j(not_equal, conversion_failed, dst); j(parity_even, conversion_failed, dst); // NaN. @@ -3087,7 +3179,7 @@ void MacroAssembler::TaggedToI(Register result_reg, movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); cvttsd2si(result_reg, xmm0); - cvtlsi2sd(temp, result_reg); + Cvtlsi2sd(temp, result_reg); ucomisd(xmm0, temp); RecordComment("Deferred TaggedToI: lost precision"); j(not_equal, lost_precision, dst); @@ -3472,7 +3564,7 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function, ASSERT(flag == JUMP_FUNCTION || has_frame()); // Get the function and setup the context. - LoadHeapObject(rdi, function); + Move(rdi, function); movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset)); // We call indirectly through the code field in the function to @@ -3559,6 +3651,30 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected, } +void MacroAssembler::Prologue(PrologueFrameMode frame_mode) { + if (frame_mode == BUILD_STUB_FRAME) { + push(rbp); // Caller's frame pointer. + movq(rbp, rsp); + push(rsi); // Callee's context. + Push(Smi::FromInt(StackFrame::STUB)); + } else { + PredictableCodeSizeScope predictible_code_size_scope(this, + kNoCodeAgeSequenceLength); + if (isolate()->IsCodePreAgingActive()) { + // Pre-age the code. + Call(isolate()->builtins()->MarkCodeAsExecutedOnce(), + RelocInfo::CODE_AGE_SEQUENCE); + Nop(kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength); + } else { + push(rbp); // Caller's frame pointer. + movq(rbp, rsp); + push(rsi); // Callee's context. + push(rdi); // Callee's JS function. + } + } +} + + void MacroAssembler::EnterFrame(StackFrame::Type type) { push(rbp); movq(rbp, rsp); @@ -3590,9 +3706,10 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) { void MacroAssembler::EnterExitFramePrologue(bool save_rax) { // Set up the frame structure on the stack. // All constants are relative to the frame pointer of the exit frame. - ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize); - ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize); - ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize); + ASSERT(ExitFrameConstants::kCallerSPDisplacement == + kFPOnStackSize + kPCOnStackSize); + ASSERT(ExitFrameConstants::kCallerPCOffset == kFPOnStackSize); + ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize); push(rbp); movq(rbp, rsp); @@ -3620,7 +3737,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space, #endif // Optionally save all XMM registers. if (save_doubles) { - int space = XMMRegister::kMaxNumRegisters * kDoubleSize + + int space = XMMRegister::kMaxNumAllocatableRegisters * kDoubleSize + arg_stack_space * kPointerSize; subq(rsp, Immediate(space)); int offset = -2 * kPointerSize; @@ -3683,23 +3800,25 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) { PushReturnAddressFrom(rcx); - LeaveExitFrameEpilogue(); + LeaveExitFrameEpilogue(true); } -void MacroAssembler::LeaveApiExitFrame() { +void MacroAssembler::LeaveApiExitFrame(bool restore_context) { movq(rsp, rbp); pop(rbp); - LeaveExitFrameEpilogue(); + LeaveExitFrameEpilogue(restore_context); } -void MacroAssembler::LeaveExitFrameEpilogue() { +void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) { // Restore current context from top and clear it in debug mode. ExternalReference context_address(Isolate::kContextAddress, isolate()); Operand context_operand = ExternalOperand(context_address); - movq(rsi, context_operand); + if (restore_context) { + movq(rsi, context_operand); + } #ifdef DEBUG movq(context_operand, Immediate(0)); #endif @@ -3971,6 +4090,10 @@ void MacroAssembler::Allocate(int object_size, // Load address of new object into result. LoadAllocationTopHelper(result, scratch, flags); + if (isolate()->heap_profiler()->is_tracking_allocations()) { + RecordObjectAllocation(isolate(), result, object_size); + } + // Align the next allocation. Storing the filler map without checking top is // safe in new-space because the limit of the heap is aligned there. if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) { @@ -4050,6 +4173,10 @@ void MacroAssembler::Allocate(Register object_size, // Load address of new object into result. LoadAllocationTopHelper(result, scratch, flags); + if (isolate()->heap_profiler()->is_tracking_allocations()) { + RecordObjectAllocation(isolate(), result, object_size); + } + // Align the next allocation. Storing the filler map without checking top is // safe in new-space because the limit of the heap is aligned there. if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) { @@ -4791,8 +4918,8 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) { void MacroAssembler::TestJSArrayForAllocationMemento( Register receiver_reg, - Register scratch_reg) { - Label no_memento_available; + Register scratch_reg, + Label* no_memento_found) { ExternalReference new_space_start = ExternalReference::new_space_start(isolate()); ExternalReference new_space_allocation_top = @@ -4802,12 +4929,43 @@ void MacroAssembler::TestJSArrayForAllocationMemento( JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag)); movq(kScratchRegister, new_space_start); cmpq(scratch_reg, kScratchRegister); - j(less, &no_memento_available); + j(less, no_memento_found); cmpq(scratch_reg, ExternalOperand(new_space_allocation_top)); - j(greater, &no_memento_available); + j(greater, no_memento_found); CompareRoot(MemOperand(scratch_reg, -AllocationMemento::kSize), Heap::kAllocationMementoMapRootIndex); - bind(&no_memento_available); +} + + +void MacroAssembler::RecordObjectAllocation(Isolate* isolate, + Register object, + Register object_size) { + FrameScope frame(this, StackFrame::EXIT); + PushSafepointRegisters(); + PrepareCallCFunction(3); + // In case object is rdx + movq(kScratchRegister, object); + movq(arg_reg_3, object_size); + movq(arg_reg_2, kScratchRegister); + movq(arg_reg_1, isolate, RelocInfo::EXTERNAL_REFERENCE); + CallCFunction( + ExternalReference::record_object_allocation_function(isolate), 3); + PopSafepointRegisters(); +} + + +void MacroAssembler::RecordObjectAllocation(Isolate* isolate, + Register object, + int object_size) { + FrameScope frame(this, StackFrame::EXIT); + PushSafepointRegisters(); + PrepareCallCFunction(3); + movq(arg_reg_2, object); + movq(arg_reg_3, Immediate(object_size)); + movq(arg_reg_1, isolate, RelocInfo::EXTERNAL_REFERENCE); + CallCFunction( + ExternalReference::record_object_allocation_function(isolate), 3); + PopSafepointRegisters(); } diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h index 09c8a800cc..24374349a2 100644 --- a/deps/v8/src/x64/macro-assembler-x64.h +++ b/deps/v8/src/x64/macro-assembler-x64.h @@ -282,6 +282,9 @@ class MacroAssembler: public Assembler { void DebugBreak(); #endif + // Generates function and stub prologue code. + void Prologue(PrologueFrameMode frame_mode); + // Enter specific kind of exit frame; either in normal or // debug mode. Expects the number of arguments in register rax and // sets up the number of arguments in register rdi and the pointer @@ -302,7 +305,7 @@ class MacroAssembler: public Assembler { // Leave the current exit frame. Expects/provides the return value in // register rax (untouched). - void LeaveApiExitFrame(); + void LeaveApiExitFrame(bool restore_context); // Push and pop the registers that can hold pointers. void PushSafepointRegisters() { Pushad(); } @@ -532,15 +535,6 @@ class MacroAssembler: public Assembler { // Smis represent a subset of integers. The subset is always equivalent to // a two's complement interpretation of a fixed number of bits. - // Optimistically adds an integer constant to a supposed smi. - // If the src is not a smi, or the result is not a smi, jump to - // the label. - void SmiTryAddConstant(Register dst, - Register src, - Smi* constant, - Label* on_not_smi_result, - Label::Distance near_jump = Label::kFar); - // Add an integer constant to a tagged smi, giving a tagged smi as result. // No overflow testing on the result is done. void SmiAddConstant(Register dst, Register src, Smi* constant); @@ -578,8 +572,8 @@ class MacroAssembler: public Assembler { Label::Distance near_jump = Label::kFar); // Adds smi values and return the result as a smi. - // If dst is src1, then src1 will be destroyed, even if - // the operation is unsuccessful. + // If dst is src1, then src1 will be destroyed if the operation is + // successful, otherwise kept intact. void SmiAdd(Register dst, Register src1, Register src2, @@ -596,18 +590,13 @@ class MacroAssembler: public Assembler { Register src2); // Subtracts smi values and return the result as a smi. - // If dst is src1, then src1 will be destroyed, even if - // the operation is unsuccessful. + // If dst is src1, then src1 will be destroyed if the operation is + // successful, otherwise kept intact. void SmiSub(Register dst, Register src1, Register src2, Label* on_not_smi_result, Label::Distance near_jump = Label::kFar); - - void SmiSub(Register dst, - Register src1, - Register src2); - void SmiSub(Register dst, Register src1, const Operand& src2, @@ -616,6 +605,10 @@ class MacroAssembler: public Assembler { void SmiSub(Register dst, Register src1, + Register src2); + + void SmiSub(Register dst, + Register src1, const Operand& src2); // Multiplies smi values and return the result as a smi, @@ -739,6 +732,17 @@ class MacroAssembler: public Assembler { // --------------------------------------------------------------------------- // String macros. + // Generate code to do a lookup in the number string cache. If the number in + // the register object is found in the cache the generated code falls through + // with the result in the result register. The object and the result register + // can be the same. If the number is not found in the cache the code jumps to + // the label not_found with only the content of register object unchanged. + void LookupNumberStringCache(Register object, + Register result, + Register scratch1, + Register scratch2, + Label* not_found); + // If object is a string, its map is loaded into object_map. void JumpIfNotString(Register object, Register object_map, @@ -780,10 +784,20 @@ class MacroAssembler: public Assembler { // --------------------------------------------------------------------------- // Macro instructions. + // Load/store with specific representation. + void Load(Register dst, const Operand& src, Representation r); + void Store(const Operand& dst, Register src, Representation r); + // Load a register with a long value as efficiently as possible. void Set(Register dst, int64_t x); void Set(const Operand& dst, int64_t x); + // cvtsi2sd instruction only writes to the low 64-bit of dst register, which + // hinders register renaming and makes dependence chains longer. So we use + // xorps to clear the dst register before cvtsi2sd to solve this issue. + void Cvtlsi2sd(XMMRegister dst, Register src); + void Cvtlsi2sd(XMMRegister dst, const Operand& src); + // Move if the registers are not identical. void Move(Register target, Register source); @@ -801,27 +815,7 @@ class MacroAssembler: public Assembler { // Load a heap object and handle the case of new-space objects by // indirecting via a global cell. - void LoadHeapObject(Register result, Handle<HeapObject> object); - void CmpHeapObject(Register reg, Handle<HeapObject> object); - void PushHeapObject(Handle<HeapObject> object); - - void LoadObject(Register result, Handle<Object> object) { - AllowDeferredHandleDereference heap_object_check; - if (object->IsHeapObject()) { - LoadHeapObject(result, Handle<HeapObject>::cast(object)); - } else { - Move(result, object); - } - } - - void CmpObject(Register reg, Handle<Object> object) { - AllowDeferredHandleDereference heap_object_check; - if (object->IsHeapObject()) { - CmpHeapObject(reg, Handle<HeapObject>::cast(object)); - } else { - Cmp(reg, object); - } - } + void MoveHeapObject(Register result, Handle<Object> object); // Load a global cell into a register. void LoadGlobalCell(Register dst, Handle<Cell> cell); @@ -835,6 +829,8 @@ class MacroAssembler: public Assembler { void Pop(Register dst) { pop(dst); } void PushReturnAddressFrom(Register src) { push(src); } void PopReturnAddressTo(Register dst) { pop(dst); } + void MoveDouble(Register dst, const Operand& src) { movq(dst, src); } + void MoveDouble(const Operand& dst, Register src) { movq(dst, src); } // Control Flow void Jump(Address destination, RelocInfo::Mode rmode); @@ -1104,6 +1100,15 @@ class MacroAssembler: public Assembler { Label* gc_required, AllocationFlags flags); + // Record a JS object allocation if allocations tracking mode is on. + void RecordObjectAllocation(Isolate* isolate, + Register object, + Register object_size); + + void RecordObjectAllocation(Isolate* isolate, + Register object, + int object_size); + // Undo allocation in new space. The object passed and objects allocated after // it will no longer be allocated. Make sure that no pointers are left to the // object(s) no longer allocated as they would be invalid when allocation is @@ -1232,13 +1237,20 @@ class MacroAssembler: public Assembler { void StubReturn(int argc); // Call a runtime routine. - void CallRuntime(const Runtime::Function* f, int num_arguments); + void CallRuntime(const Runtime::Function* f, + int num_arguments, + SaveFPRegsMode save_doubles = kDontSaveFPRegs); // Call a runtime function and save the value of XMM registers. - void CallRuntimeSaveDoubles(Runtime::FunctionId id); + void CallRuntimeSaveDoubles(Runtime::FunctionId id) { + const Runtime::Function* function = Runtime::FunctionForId(id); + CallRuntime(function, function->nargs, kSaveFPRegs); + } // Convenience function: Same as above, but takes the fid instead. - void CallRuntime(Runtime::FunctionId id, int num_arguments); + void CallRuntime(Runtime::FunctionId id, int num_arguments) { + CallRuntime(Runtime::FunctionForId(id), num_arguments); + } // Convenience function: call an external reference. void CallExternalReference(const ExternalReference& ext, @@ -1274,7 +1286,8 @@ class MacroAssembler: public Assembler { Address thunk_address, Register thunk_last_arg, int stack_space, - int return_value_offset_from_rbp); + Operand return_value_operand, + Operand* context_restore_operand); // Before calling a C-function from generated code, align arguments on stack. // After aligning the frame, arguments must be stored in rsp[0], rsp[8], @@ -1384,9 +1397,20 @@ class MacroAssembler: public Assembler { // to another type. // On entry, receiver_reg should point to the array object. // scratch_reg gets clobbered. - // If allocation info is present, condition flags are set to equal + // If allocation info is present, condition flags are set to equal. void TestJSArrayForAllocationMemento(Register receiver_reg, - Register scratch_reg); + Register scratch_reg, + Label* no_memento_found); + + void JumpIfJSArrayHasAllocationMemento(Register receiver_reg, + Register scratch_reg, + Label* memento_found) { + Label no_memento_found; + TestJSArrayForAllocationMemento(receiver_reg, scratch_reg, + &no_memento_found); + j(equal, memento_found); + bind(&no_memento_found); + } private: // Order general registers are pushed by Pushad. @@ -1430,7 +1454,7 @@ class MacroAssembler: public Assembler { // accessible via StackSpaceOperand. void EnterExitFrameEpilogue(int arg_stack_space, bool save_doubles); - void LeaveExitFrameEpilogue(); + void LeaveExitFrameEpilogue(bool restore_context); // Allocation support helpers. // Loads the top of new-space into the result register. diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc index 95276d530d..2a0c3675f2 100644 --- a/deps/v8/src/x64/stub-cache-x64.cc +++ b/deps/v8/src/x64/stub-cache-x64.cc @@ -304,32 +304,28 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm, Register receiver, Register scratch1, Register scratch2, - Label* miss, - bool support_wrappers) { + Label* miss) { Label check_wrapper; // Check if the object is a string leaving the instance type in the // scratch register. - GenerateStringCheck(masm, receiver, scratch1, miss, - support_wrappers ? &check_wrapper : miss); + GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper); // Load length directly from the string. __ movq(rax, FieldOperand(receiver, String::kLengthOffset)); __ ret(0); - if (support_wrappers) { - // Check if the object is a JSValue wrapper. - __ bind(&check_wrapper); - __ cmpl(scratch1, Immediate(JS_VALUE_TYPE)); - __ j(not_equal, miss); + // Check if the object is a JSValue wrapper. + __ bind(&check_wrapper); + __ cmpl(scratch1, Immediate(JS_VALUE_TYPE)); + __ j(not_equal, miss); - // Check if the wrapped value is a string and load the length - // directly if it is. - __ movq(scratch2, FieldOperand(receiver, JSValue::kValueOffset)); - GenerateStringCheck(masm, scratch2, scratch1, miss, miss); - __ movq(rax, FieldOperand(scratch2, String::kLengthOffset)); - __ ret(0); - } + // Check if the wrapped value is a string and load the length + // directly if it is. + __ movq(scratch2, FieldOperand(receiver, JSValue::kValueOffset)); + GenerateStringCheck(masm, scratch2, scratch1, miss, miss); + __ movq(rax, FieldOperand(scratch2, String::kLengthOffset)); + __ ret(0); } @@ -447,88 +443,96 @@ static void FreeSpaceForFastApiCall(MacroAssembler* masm, Register scratch) { // Generates call to API function. static void GenerateFastApiCall(MacroAssembler* masm, const CallOptimization& optimization, - int argc) { + int argc, + bool restore_context) { // ----------- S t a t e ------------- // -- rsp[0] : return address - // -- rsp[8] : object passing the type check - // (last fast api call extra argument, - // set by CheckPrototypes) - // -- rsp[16] : api function - // (first fast api call extra argument) - // -- rsp[24] : api call data - // -- rsp[32] : isolate - // -- rsp[40] : ReturnValue default value - // -- rsp[48] : ReturnValue - // - // -- rsp[56] : last argument + // -- rsp[8] - rsp[56] : FunctionCallbackInfo, incl. + // : object passing the type check + // (set by CheckPrototypes) + // -- rsp[64] : last argument // -- ... - // -- rsp[(argc + 6) * 8] : first argument - // -- rsp[(argc + 7) * 8] : receiver + // -- rsp[(argc + 7) * 8] : first argument + // -- rsp[(argc + 8) * 8] : receiver // ----------------------------------- + typedef FunctionCallbackArguments FCA; + StackArgumentsAccessor args(rsp, argc + kFastApiCallArguments); + + // Save calling context. + int offset = argc + kFastApiCallArguments; + __ movq(args.GetArgumentOperand(offset - FCA::kContextSaveIndex), rsi); + // Get the function and setup the context. Handle<JSFunction> function = optimization.constant_function(); - __ LoadHeapObject(rdi, function); + __ Move(rdi, function); __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset)); - - int api_call_argc = argc + kFastApiCallArguments; - StackArgumentsAccessor args(rsp, api_call_argc); - - // Pass the additional arguments. - __ movq(args.GetArgumentOperand(api_call_argc - 1), rdi); + // Construct the FunctionCallbackInfo on the stack. + __ movq(args.GetArgumentOperand(offset - FCA::kCalleeIndex), rdi); Handle<CallHandlerInfo> api_call_info = optimization.api_call_info(); Handle<Object> call_data(api_call_info->data(), masm->isolate()); if (masm->isolate()->heap()->InNewSpace(*call_data)) { __ Move(rcx, api_call_info); __ movq(rbx, FieldOperand(rcx, CallHandlerInfo::kDataOffset)); - __ movq(args.GetArgumentOperand(api_call_argc - 2), rbx); + __ movq(args.GetArgumentOperand(offset - FCA::kDataIndex), rbx); } else { - __ Move(args.GetArgumentOperand(api_call_argc - 2), call_data); + __ Move(args.GetArgumentOperand(offset - FCA::kDataIndex), call_data); } __ movq(kScratchRegister, ExternalReference::isolate_address(masm->isolate())); - __ movq(args.GetArgumentOperand(api_call_argc - 3), kScratchRegister); + __ movq(args.GetArgumentOperand(offset - FCA::kIsolateIndex), + kScratchRegister); __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex); - __ movq(args.GetArgumentOperand(api_call_argc - 4), kScratchRegister); - __ movq(args.GetArgumentOperand(api_call_argc - 5), kScratchRegister); + __ movq(args.GetArgumentOperand(offset - FCA::kReturnValueDefaultValueIndex), + kScratchRegister); + __ movq(args.GetArgumentOperand(offset - FCA::kReturnValueOffset), + kScratchRegister); // Prepare arguments. - STATIC_ASSERT(kFastApiCallArguments == 6); - __ lea(rbx, Operand(rsp, kFastApiCallArguments * kPointerSize)); + STATIC_ASSERT(kFastApiCallArguments == 7); + __ lea(rbx, Operand(rsp, 1 * kPointerSize)); // Function address is a foreign pointer outside V8's heap. Address function_address = v8::ToCData<Address>(api_call_info->callback()); -#if defined(__MINGW64__) || defined(_WIN64) - Register arguments_arg = rcx; - Register callback_arg = rdx; -#else - Register arguments_arg = rdi; - Register callback_arg = rsi; -#endif - // Allocate the v8::Arguments structure in the arguments' space since // it's not controlled by GC. const int kApiStackSpace = 4; __ PrepareCallApiFunction(kApiStackSpace); - __ movq(StackSpaceOperand(0), rbx); // v8::Arguments::implicit_args_. - __ addq(rbx, Immediate(argc * kPointerSize)); - __ movq(StackSpaceOperand(1), rbx); // v8::Arguments::values_. - __ Set(StackSpaceOperand(2), argc); // v8::Arguments::length_. - // v8::Arguments::is_construct_call_. + __ movq(StackSpaceOperand(0), rbx); // FunctionCallbackInfo::implicit_args_. + __ addq(rbx, Immediate((argc + kFastApiCallArguments - 1) * kPointerSize)); + __ movq(StackSpaceOperand(1), rbx); // FunctionCallbackInfo::values_. + __ Set(StackSpaceOperand(2), argc); // FunctionCallbackInfo::length_. + // FunctionCallbackInfo::is_construct_call_. __ Set(StackSpaceOperand(3), 0); +#if defined(__MINGW64__) || defined(_WIN64) + Register arguments_arg = rcx; + Register callback_arg = rdx; +#else + Register arguments_arg = rdi; + Register callback_arg = rsi; +#endif + // v8::InvocationCallback's argument. __ lea(arguments_arg, StackSpaceOperand(0)); Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback); - __ CallApiFunctionAndReturn(function_address, - thunk_address, - callback_arg, - api_call_argc + 1, - kFastApiCallArguments + 1); + StackArgumentsAccessor args_from_rbp(rbp, kFastApiCallArguments, + ARGUMENTS_DONT_CONTAIN_RECEIVER); + Operand context_restore_operand = args_from_rbp.GetArgumentOperand( + kFastApiCallArguments - 1 - FCA::kContextSaveIndex); + Operand return_value_operand = args_from_rbp.GetArgumentOperand( + kFastApiCallArguments - 1 - FCA::kReturnValueOffset); + __ CallApiFunctionAndReturn( + function_address, + thunk_address, + callback_arg, + argc + kFastApiCallArguments + 1, + return_value_operand, + restore_context ? &context_restore_operand : NULL); } @@ -542,26 +546,26 @@ static void GenerateFastApiCall(MacroAssembler* masm, ASSERT(optimization.is_simple_api_call()); ASSERT(!receiver.is(scratch)); - const int stack_space = kFastApiCallArguments + argc + 1; - // Copy return value. - __ movq(scratch, Operand(rsp, 0)); - // Assign stack space for the call arguments. - __ subq(rsp, Immediate(stack_space * kPointerSize)); - // Move the return address on top of the stack. - __ movq(Operand(rsp, 0), scratch); + const int fast_api_call_argc = argc + kFastApiCallArguments; + StackArgumentsAccessor args(rsp, fast_api_call_argc); + // argc + 1 is the argument number before FastApiCall arguments, 1 ~ receiver + const int kHolderIndex = argc + 1 + + kFastApiCallArguments - 1 - FunctionCallbackArguments::kHolderIndex; + __ movq(scratch, StackOperandForReturnAddress(0)); + // Assign stack space for the call arguments and receiver. + __ subq(rsp, Immediate((fast_api_call_argc + 1) * kPointerSize)); + __ movq(StackOperandForReturnAddress(0), scratch); // Write holder to stack frame. - __ movq(Operand(rsp, 1 * kPointerSize), receiver); - // Write receiver to stack frame. - int index = stack_space; - __ movq(Operand(rsp, index-- * kPointerSize), receiver); + __ movq(args.GetArgumentOperand(kHolderIndex), receiver); + __ movq(args.GetReceiverOperand(), receiver); // Write the arguments to stack frame. for (int i = 0; i < argc; i++) { ASSERT(!receiver.is(values[i])); ASSERT(!scratch.is(values[i])); - __ movq(Operand(rsp, index-- * kPointerSize), values[i]); + __ movq(args.GetArgumentOperand(i + 1), values[i]); } - GenerateFastApiCall(masm, optimization, argc); + GenerateFastApiCall(masm, optimization, argc, true); } @@ -675,7 +679,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { // Invoke function. if (can_do_fast_api_call) { - GenerateFastApiCall(masm, optimization, arguments_.immediate()); + GenerateFastApiCall(masm, optimization, arguments_.immediate(), false); } else { CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_) ? CALL_AS_FUNCTION @@ -763,9 +767,9 @@ class CallInterceptorCompiler BASE_EMBEDDED { }; -void BaseStoreStubCompiler::GenerateRestoreName(MacroAssembler* masm, - Label* label, - Handle<Name> name) { +void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm, + Label* label, + Handle<Name> name) { if (!label->is_unused()) { __ bind(label); __ Move(this->name(), name); @@ -791,7 +795,7 @@ static void GenerateCheckPropertyCell(MacroAssembler* masm, } -void BaseStoreStubCompiler::GenerateNegativeHolderLookup( +void StoreStubCompiler::GenerateNegativeHolderLookup( MacroAssembler* masm, Handle<JSObject> holder, Register holder_reg, @@ -809,19 +813,19 @@ void BaseStoreStubCompiler::GenerateNegativeHolderLookup( // Receiver_reg is preserved on jumps to miss_label, but may be destroyed if // store is successful. -void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm, - Handle<JSObject> object, - LookupResult* lookup, - Handle<Map> transition, - Handle<Name> name, - Register receiver_reg, - Register storage_reg, - Register value_reg, - Register scratch1, - Register scratch2, - Register unused, - Label* miss_label, - Label* slow) { +void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm, + Handle<JSObject> object, + LookupResult* lookup, + Handle<Map> transition, + Handle<Name> name, + Register receiver_reg, + Register storage_reg, + Register value_reg, + Register scratch1, + Register scratch2, + Register unused, + Label* miss_label, + Label* slow) { int descriptor = transition->LastAdded(); DescriptorArray* descriptors = transition->instance_descriptors(); PropertyDetails details = descriptors->GetDetails(descriptor); @@ -830,7 +834,7 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm, if (details.type() == CONSTANT) { Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate()); - __ CmpObject(value_reg, constant); + __ Cmp(value_reg, constant); __ j(not_equal, miss_label); } else if (FLAG_track_fields && representation.IsSmi()) { __ JumpIfNotSmi(value_reg, miss_label); @@ -842,7 +846,7 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm, __ JumpIfNotSmi(value_reg, &heap_number); __ SmiToInteger32(scratch1, value_reg); - __ cvtlsi2sd(xmm0, scratch1); + __ Cvtlsi2sd(xmm0, scratch1); __ jmp(&do_store); __ bind(&heap_number); @@ -954,15 +958,15 @@ void BaseStoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm, // Both name_reg and receiver_reg are preserved on jumps to miss_label, // but may be destroyed if store is successful. -void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm, - Handle<JSObject> object, - LookupResult* lookup, - Register receiver_reg, - Register name_reg, - Register value_reg, - Register scratch1, - Register scratch2, - Label* miss_label) { +void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm, + Handle<JSObject> object, + LookupResult* lookup, + Register receiver_reg, + Register name_reg, + Register value_reg, + Register scratch1, + Register scratch2, + Label* miss_label) { // Stub never generated for non-global objects that require access // checks. ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); @@ -996,7 +1000,7 @@ void BaseStoreStubCompiler::GenerateStoreField(MacroAssembler* masm, Label do_store, heap_number; __ JumpIfNotSmi(value_reg, &heap_number); __ SmiToInteger32(scratch2, value_reg); - __ cvtlsi2sd(xmm0, scratch2); + __ Cvtlsi2sd(xmm0, scratch2); __ jmp(&do_store); __ bind(&heap_number); @@ -1107,8 +1111,13 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object, Register reg = object_reg; int depth = 0; + StackArgumentsAccessor args(rsp, kFastApiCallArguments, + ARGUMENTS_DONT_CONTAIN_RECEIVER); + const int kHolderIndex = kFastApiCallArguments - 1 - + FunctionCallbackArguments::kHolderIndex; + if (save_at_depth == depth) { - __ movq(Operand(rsp, kPCOnStackSize), object_reg); + __ movq(args.GetArgumentOperand(kHolderIndex), object_reg); } // Check the maps in the prototype chain. @@ -1168,7 +1177,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object, } if (save_at_depth == depth) { - __ movq(Operand(rsp, kPCOnStackSize), reg); + __ movq(args.GetArgumentOperand(kHolderIndex), reg); } // Go to the next object in the prototype chain. @@ -1200,9 +1209,9 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object, } -void BaseLoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, - Label* success, - Label* miss) { +void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, + Label* success, + Label* miss) { if (!miss->is_unused()) { __ jmp(success); __ bind(miss); @@ -1211,9 +1220,9 @@ void BaseLoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, } -void BaseStoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, - Label* success, - Label* miss) { +void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, + Label* success, + Label* miss) { if (!miss->is_unused()) { __ jmp(success); GenerateRestoreName(masm(), miss, name); @@ -1222,7 +1231,7 @@ void BaseStoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, } -Register BaseLoadStubCompiler::CallbackHandlerFrontend( +Register LoadStubCompiler::CallbackHandlerFrontend( Handle<JSObject> object, Register object_reg, Handle<JSObject> holder, @@ -1273,7 +1282,7 @@ Register BaseLoadStubCompiler::CallbackHandlerFrontend( } -void BaseLoadStubCompiler::NonexistentHandlerFrontend( +void LoadStubCompiler::NonexistentHandlerFrontend( Handle<JSObject> object, Handle<JSObject> last, Handle<Name> name, @@ -1293,7 +1302,7 @@ void BaseLoadStubCompiler::NonexistentHandlerFrontend( } -void BaseLoadStubCompiler::GenerateLoadField(Register reg, +void LoadStubCompiler::GenerateLoadField(Register reg, Handle<JSObject> holder, PropertyIndex field, Representation representation) { @@ -1312,26 +1321,27 @@ void BaseLoadStubCompiler::GenerateLoadField(Register reg, } -void BaseLoadStubCompiler::GenerateLoadCallback( +void LoadStubCompiler::GenerateLoadCallback( const CallOptimization& call_optimization) { GenerateFastApiCall( masm(), call_optimization, receiver(), scratch3(), 0, NULL); } -void BaseLoadStubCompiler::GenerateLoadCallback( +void LoadStubCompiler::GenerateLoadCallback( Register reg, Handle<ExecutableAccessorInfo> callback) { // Insert additional parameters into the stack frame above return address. ASSERT(!scratch4().is(reg)); __ PopReturnAddressTo(scratch4()); - STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 0); - STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == -1); - STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == -2); - STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == -3); - STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == -4); - STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == -5); + STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0); + STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1); + STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2); + STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3); + STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4); + STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5); + STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6); __ push(receiver()); // receiver if (heap()->InNewSpace(callback->data())) { ASSERT(!scratch2().is(reg)); @@ -1349,7 +1359,7 @@ void BaseLoadStubCompiler::GenerateLoadCallback( __ push(reg); // holder __ push(name()); // name // Save a pointer to where we pushed the arguments pointer. This will be - // passed as the const ExecutableAccessorInfo& to the C++ callback. + // passed as the const PropertyAccessorInfo& to the C++ callback. Address getter_address = v8::ToCData<Address>(callback->getter()); @@ -1374,10 +1384,9 @@ void BaseLoadStubCompiler::GenerateLoadCallback( const int kArgStackSpace = 1; __ PrepareCallApiFunction(kArgStackSpace); - STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6); - __ lea(rax, Operand(name_arg, 6 * kPointerSize)); + __ lea(rax, Operand(name_arg, 1 * kPointerSize)); - // v8::AccessorInfo::args_. + // v8::PropertyAccessorInfo::args_. __ movq(StackSpaceOperand(0), rax); // The context register (rsi) has been saved in PrepareCallApiFunction and @@ -1386,22 +1395,28 @@ void BaseLoadStubCompiler::GenerateLoadCallback( Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback); + // The name handler is counted as an argument. + StackArgumentsAccessor args(rbp, PropertyCallbackArguments::kArgsLength); + Operand return_value_operand = args.GetArgumentOperand( + PropertyCallbackArguments::kArgsLength - 1 - + PropertyCallbackArguments::kReturnValueOffset); __ CallApiFunctionAndReturn(getter_address, thunk_address, getter_arg, kStackSpace, - 6); + return_value_operand, + NULL); } -void BaseLoadStubCompiler::GenerateLoadConstant(Handle<Object> value) { +void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) { // Return the constant value. - __ LoadObject(rax, value); + __ Move(rax, value); __ ret(0); } -void BaseLoadStubCompiler::GenerateLoadInterceptor( +void LoadStubCompiler::GenerateLoadInterceptor( Register holder_reg, Handle<JSObject> object, Handle<JSObject> interceptor_holder, @@ -2183,7 +2198,7 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall( GenerateNameCheck(name, &miss); if (cell.is_null()) { - __ movq(rdx, args.GetArgumentOperand(argc - 1)); + __ movq(rdx, args.GetReceiverOperand()); __ JumpIfSmi(rdx, &miss); CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi, name, &miss); @@ -2196,7 +2211,7 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall( // Load the char code argument. Register code = rbx; - __ movq(code, args.GetArgumentOperand(argc)); + __ movq(code, args.GetArgumentOperand(1)); // Check the code is a smi. Label slow; @@ -2246,6 +2261,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall( // -- rsp[(argc + 1) * 4] : receiver // ----------------------------------- const int argc = arguments().immediate(); + StackArgumentsAccessor args(rsp, argc); // If the object is not a JSObject or we got an unexpected number of // arguments, bail out to the regular call. @@ -2257,7 +2273,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall( GenerateNameCheck(name, &miss); if (cell.is_null()) { - __ movq(rdx, Operand(rsp, 2 * kPointerSize)); + __ movq(rdx, args.GetReceiverOperand()); STATIC_ASSERT(kSmiTag == 0); __ JumpIfSmi(rdx, &miss); @@ -2272,7 +2288,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall( } // Load the (only) argument into rax. - __ movq(rax, Operand(rsp, 1 * kPointerSize)); + __ movq(rax, args.GetArgumentOperand(1)); // Check if the argument is a smi. Label smi; @@ -2339,7 +2355,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall( // Return the argument (when it's an already round heap number). __ bind(&already_round); - __ movq(rax, Operand(rsp, 1 * kPointerSize)); + __ movq(rax, args.GetArgumentOperand(1)); __ ret(2 * kPointerSize); // Tail call the full function. We do not have to patch the receiver @@ -2383,7 +2399,7 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall( GenerateNameCheck(name, &miss); if (cell.is_null()) { - __ movq(rdx, args.GetArgumentOperand(argc - 1)); + __ movq(rdx, args.GetReceiverOperand()); __ JumpIfSmi(rdx, &miss); CheckPrototypes(Handle<JSObject>::cast(object), rdx, holder, rbx, rax, rdi, name, &miss); @@ -2394,7 +2410,7 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall( GenerateLoadFunctionFromCell(cell, function, &miss); } // Load the (only) argument into rax. - __ movq(rax, args.GetArgumentOperand(argc)); + __ movq(rax, args.GetArgumentOperand(1)); // Check if the argument is a smi. Label not_smi; @@ -2424,7 +2440,7 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall( // Check if the argument is a heap number and load its value. __ bind(¬_smi); __ CheckMap(rax, factory()->heap_number_map(), &slow, DONT_DO_SMI_CHECK); - __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset)); + __ MoveDouble(rbx, FieldOperand(rax, HeapNumber::kValueOffset)); // Check the sign of the argument. If the argument is positive, // just return it. @@ -2442,7 +2458,7 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall( __ bind(&negative_sign); __ xor_(rbx, rdi); __ AllocateHeapNumber(rax, rdx, &slow); - __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rbx); + __ MoveDouble(FieldOperand(rax, HeapNumber::kValueOffset), rbx); __ ret(2 * kPointerSize); // Tail call the full function. We do not have to patch the receiver @@ -2508,7 +2524,7 @@ Handle<Code> CallStubCompiler::CompileFastApiCall( StackOperandForReturnAddress(kFastApiCallArguments * kPointerSize)); __ movq(StackOperandForReturnAddress(0), rax); - GenerateFastApiCall(masm(), optimization, argc); + GenerateFastApiCall(masm(), optimization, argc, false); __ bind(&miss); __ addq(rsp, Immediate(kFastApiCallArguments * kPointerSize)); @@ -3006,6 +3022,7 @@ void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name, void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm, + Register receiver, Handle<JSFunction> getter) { // ----------- S t a t e ------------- // -- rax : receiver @@ -3017,7 +3034,7 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm, if (!getter.is_null()) { // Call the JavaScript getter with the receiver on the stack. - __ push(rax); + __ push(receiver); ParameterCount actual(0); ParameterCount expected(getter); __ InvokeFunction(getter, expected, actual, |