diff options
author | Ryan Dahl <ry@tinyclouds.org> | 2011-07-15 17:47:20 -0700 |
---|---|---|
committer | Ryan Dahl <ry@tinyclouds.org> | 2011-07-15 17:47:20 -0700 |
commit | ef1be160d66b7da8bc2da857b1c33c6f680d86f1 (patch) | |
tree | a1ff31aa2841ebde94d9bff4b0f6692840ea811b /deps/v8/src | |
parent | e5564a3f29e0a818832a97c7c3b28d7c8b3b0460 (diff) | |
download | node-new-ef1be160d66b7da8bc2da857b1c33c6f680d86f1.tar.gz |
Upgrade V8 to 3.4.12.1
Diffstat (limited to 'deps/v8/src')
128 files changed, 2317 insertions, 3190 deletions
diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript index 4b0ba16341..6b3059aea6 100755 --- a/deps/v8/src/SConscript +++ b/deps/v8/src/SConscript @@ -231,15 +231,11 @@ SOURCES = { PREPARSER_SOURCES = { 'all': Split(""" allocation.cc - bignum.cc - cached-powers.cc - conversions.cc hashmap.cc preparse-data.cc preparser.cc preparser-api.cc scanner-base.cc - strtod.cc token.cc unicode.cc utils.cc @@ -317,10 +313,7 @@ def ConfigureObjectFiles(): env.Replace(**context.flags['v8']) context.ApplyEnvOverrides(env) env['BUILDERS']['JS2C'] = Builder(action=js2c.JS2C) - if 'ENABLE_LOGGING_AND_PROFILING' in env['CPPDEFINES']: - env['BUILDERS']['Snapshot'] = Builder(action='$SOURCE $TARGET --logfile "$LOGFILE" --log-snapshot-positions') - else: - env['BUILDERS']['Snapshot'] = Builder(action='$SOURCE $TARGET') + env['BUILDERS']['Snapshot'] = Builder(action='$SOURCE $TARGET --logfile "$LOGFILE" --log-snapshot-positions') def BuildJS2CEnv(type): js2c_env = { 'TYPE': type, 'COMPRESSION': 'off' } diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index 71a715c1bb..dc1f90c0e2 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -54,16 +54,11 @@ #define LOG_API(isolate, expr) LOG(isolate, ApiEntryCall(expr)) -#ifdef ENABLE_VMSTATE_TRACKING #define ENTER_V8(isolate) \ ASSERT((isolate)->IsInitialized()); \ i::VMState __state__((isolate), i::OTHER) #define LEAVE_V8(isolate) \ i::VMState __state__((isolate), i::EXTERNAL) -#else -#define ENTER_V8(isolate) ((void) 0) -#define LEAVE_V8(isolate) ((void) 0) -#endif namespace v8 { @@ -114,9 +109,7 @@ namespace v8 { static void DefaultFatalErrorHandler(const char* location, const char* message) { -#ifdef ENABLE_VMSTATE_TRACKING i::VMState __state__(i::Isolate::Current(), i::OTHER); -#endif API_Fatal(location, message); } @@ -4832,37 +4825,20 @@ void V8::RemoveMemoryAllocationCallback(MemoryAllocationCallback callback) { void V8::PauseProfiler() { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); isolate->logger()->PauseProfiler(); -#endif } void V8::ResumeProfiler() { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); isolate->logger()->ResumeProfiler(); -#endif } bool V8::IsProfilerPaused() { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); return isolate->logger()->IsProfilerPaused(); -#else - return true; -#endif -} - - -int V8::GetLogLines(int from_pos, char* dest_buf, int max_size) { -#ifdef ENABLE_LOGGING_AND_PROFILING - ASSERT(max_size >= kMinimumSizeForLogLinesBuffer); - return LOGGER->GetLogLines(from_pos, dest_buf, max_size); -#endif - return 0; } @@ -5327,7 +5303,6 @@ Local<Context> Debug::GetDebugContext() { Handle<String> CpuProfileNode::GetFunctionName() const { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::CpuProfileNode::GetFunctionName"); const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this); @@ -5340,117 +5315,77 @@ Handle<String> CpuProfileNode::GetFunctionName() const { isolate->factory()->LookupAsciiSymbol(entry->name_prefix()), isolate->factory()->LookupAsciiSymbol(entry->name())))); } -#else - return v8::String::Empty(); -#endif } Handle<String> CpuProfileNode::GetScriptResourceName() const { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::CpuProfileNode::GetScriptResourceName"); const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this); return Handle<String>(ToApi<String>(isolate->factory()->LookupAsciiSymbol( node->entry()->resource_name()))); -#else - return v8::String::Empty(); -#endif } int CpuProfileNode::GetLineNumber() const { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::CpuProfileNode::GetLineNumber"); return reinterpret_cast<const i::ProfileNode*>(this)->entry()->line_number(); -#else - return 0; -#endif } double CpuProfileNode::GetTotalTime() const { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::CpuProfileNode::GetTotalTime"); return reinterpret_cast<const i::ProfileNode*>(this)->GetTotalMillis(); -#else - return 0.0; -#endif } double CpuProfileNode::GetSelfTime() const { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::CpuProfileNode::GetSelfTime"); return reinterpret_cast<const i::ProfileNode*>(this)->GetSelfMillis(); -#else - return 0.0; -#endif } double CpuProfileNode::GetTotalSamplesCount() const { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::CpuProfileNode::GetTotalSamplesCount"); return reinterpret_cast<const i::ProfileNode*>(this)->total_ticks(); -#else - return 0.0; -#endif } double CpuProfileNode::GetSelfSamplesCount() const { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::CpuProfileNode::GetSelfSamplesCount"); return reinterpret_cast<const i::ProfileNode*>(this)->self_ticks(); -#else - return 0.0; -#endif } unsigned CpuProfileNode::GetCallUid() const { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::CpuProfileNode::GetCallUid"); return reinterpret_cast<const i::ProfileNode*>(this)->entry()->GetCallUid(); -#else - return 0; -#endif } int CpuProfileNode::GetChildrenCount() const { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::CpuProfileNode::GetChildrenCount"); return reinterpret_cast<const i::ProfileNode*>(this)->children()->length(); -#else - return 0; -#endif } const CpuProfileNode* CpuProfileNode::GetChild(int index) const { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::CpuProfileNode::GetChild"); const i::ProfileNode* child = reinterpret_cast<const i::ProfileNode*>(this)->children()->at(index); return reinterpret_cast<const CpuProfileNode*>(child); -#else - return NULL; -#endif } void CpuProfile::Delete() { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::CpuProfile::Delete"); i::CpuProfiler::DeleteProfile(reinterpret_cast<i::CpuProfile*>(this)); @@ -5459,153 +5394,109 @@ void CpuProfile::Delete() { // If this was the last profile, clean up all accessory data as well. i::CpuProfiler::DeleteAllProfiles(); } -#endif } unsigned CpuProfile::GetUid() const { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::CpuProfile::GetUid"); return reinterpret_cast<const i::CpuProfile*>(this)->uid(); -#else - return 0; -#endif } Handle<String> CpuProfile::GetTitle() const { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::CpuProfile::GetTitle"); const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this); return Handle<String>(ToApi<String>(isolate->factory()->LookupAsciiSymbol( profile->title()))); -#else - return v8::String::Empty(); -#endif } const CpuProfileNode* CpuProfile::GetBottomUpRoot() const { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::CpuProfile::GetBottomUpRoot"); const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this); return reinterpret_cast<const CpuProfileNode*>(profile->bottom_up()->root()); -#else - return NULL; -#endif } const CpuProfileNode* CpuProfile::GetTopDownRoot() const { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::CpuProfile::GetTopDownRoot"); const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this); return reinterpret_cast<const CpuProfileNode*>(profile->top_down()->root()); -#else - return NULL; -#endif } int CpuProfiler::GetProfilesCount() { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::CpuProfiler::GetProfilesCount"); return i::CpuProfiler::GetProfilesCount(); -#else - return 0; -#endif } const CpuProfile* CpuProfiler::GetProfile(int index, Handle<Value> security_token) { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::CpuProfiler::GetProfile"); return reinterpret_cast<const CpuProfile*>( i::CpuProfiler::GetProfile( security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token), index)); -#else - return NULL; -#endif } const CpuProfile* CpuProfiler::FindProfile(unsigned uid, Handle<Value> security_token) { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::CpuProfiler::FindProfile"); return reinterpret_cast<const CpuProfile*>( i::CpuProfiler::FindProfile( security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token), uid)); -#else - return NULL; -#endif } void CpuProfiler::StartProfiling(Handle<String> title) { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::CpuProfiler::StartProfiling"); i::CpuProfiler::StartProfiling(*Utils::OpenHandle(*title)); -#endif } const CpuProfile* CpuProfiler::StopProfiling(Handle<String> title, Handle<Value> security_token) { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::CpuProfiler::StopProfiling"); return reinterpret_cast<const CpuProfile*>( i::CpuProfiler::StopProfiling( security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token), *Utils::OpenHandle(*title))); -#else - return NULL; -#endif } void CpuProfiler::DeleteAllProfiles() { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::CpuProfiler::DeleteAllProfiles"); i::CpuProfiler::DeleteAllProfiles(); -#endif } -#ifdef ENABLE_LOGGING_AND_PROFILING static i::HeapGraphEdge* ToInternal(const HeapGraphEdge* edge) { return const_cast<i::HeapGraphEdge*>( reinterpret_cast<const i::HeapGraphEdge*>(edge)); } -#endif HeapGraphEdge::Type HeapGraphEdge::GetType() const { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::HeapGraphEdge::GetType"); return static_cast<HeapGraphEdge::Type>(ToInternal(this)->type()); -#else - return static_cast<HeapGraphEdge::Type>(0); -#endif } Handle<Value> HeapGraphEdge::GetName() const { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::HeapGraphEdge::GetName"); i::HeapGraphEdge* edge = ToInternal(this); @@ -5622,166 +5513,112 @@ Handle<Value> HeapGraphEdge::GetName() const { edge->index()))); default: UNREACHABLE(); } -#endif return v8::Undefined(); } const HeapGraphNode* HeapGraphEdge::GetFromNode() const { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::HeapGraphEdge::GetFromNode"); const i::HeapEntry* from = ToInternal(this)->From(); return reinterpret_cast<const HeapGraphNode*>(from); -#else - return NULL; -#endif } const HeapGraphNode* HeapGraphEdge::GetToNode() const { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::HeapGraphEdge::GetToNode"); const i::HeapEntry* to = ToInternal(this)->to(); return reinterpret_cast<const HeapGraphNode*>(to); -#else - return NULL; -#endif } -#ifdef ENABLE_LOGGING_AND_PROFILING static i::HeapEntry* ToInternal(const HeapGraphNode* entry) { return const_cast<i::HeapEntry*>( reinterpret_cast<const i::HeapEntry*>(entry)); } -#endif HeapGraphNode::Type HeapGraphNode::GetType() const { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::HeapGraphNode::GetType"); return static_cast<HeapGraphNode::Type>(ToInternal(this)->type()); -#else - return static_cast<HeapGraphNode::Type>(0); -#endif } Handle<String> HeapGraphNode::GetName() const { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::HeapGraphNode::GetName"); return Handle<String>(ToApi<String>(isolate->factory()->LookupAsciiSymbol( ToInternal(this)->name()))); -#else - return v8::String::Empty(); -#endif } uint64_t HeapGraphNode::GetId() const { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::HeapGraphNode::GetId"); return ToInternal(this)->id(); -#else - return 0; -#endif } int HeapGraphNode::GetSelfSize() const { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::HeapGraphNode::GetSelfSize"); return ToInternal(this)->self_size(); -#else - return 0; -#endif } int HeapGraphNode::GetRetainedSize(bool exact) const { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainedSize"); return ToInternal(this)->RetainedSize(exact); -#else - return 0; -#endif } int HeapGraphNode::GetChildrenCount() const { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::HeapSnapshot::GetChildrenCount"); return ToInternal(this)->children().length(); -#else - return 0; -#endif } const HeapGraphEdge* HeapGraphNode::GetChild(int index) const { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::HeapSnapshot::GetChild"); return reinterpret_cast<const HeapGraphEdge*>( &ToInternal(this)->children()[index]); -#else - return NULL; -#endif } int HeapGraphNode::GetRetainersCount() const { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainersCount"); return ToInternal(this)->retainers().length(); -#else - return 0; -#endif } const HeapGraphEdge* HeapGraphNode::GetRetainer(int index) const { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainer"); return reinterpret_cast<const HeapGraphEdge*>( ToInternal(this)->retainers()[index]); -#else - return NULL; -#endif } const HeapGraphNode* HeapGraphNode::GetDominatorNode() const { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::HeapSnapshot::GetDominatorNode"); return reinterpret_cast<const HeapGraphNode*>(ToInternal(this)->dominator()); -#else - return NULL; -#endif } -#ifdef ENABLE_LOGGING_AND_PROFILING static i::HeapSnapshot* ToInternal(const HeapSnapshot* snapshot) { return const_cast<i::HeapSnapshot*>( reinterpret_cast<const i::HeapSnapshot*>(snapshot)); } -#endif void HeapSnapshot::Delete() { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::HeapSnapshot::Delete"); if (i::HeapProfiler::GetSnapshotsCount() > 1) { @@ -5790,93 +5627,63 @@ void HeapSnapshot::Delete() { // If this is the last snapshot, clean up all accessory data as well. i::HeapProfiler::DeleteAllSnapshots(); } -#endif } HeapSnapshot::Type HeapSnapshot::GetType() const { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::HeapSnapshot::GetType"); return static_cast<HeapSnapshot::Type>(ToInternal(this)->type()); -#else - return static_cast<HeapSnapshot::Type>(0); -#endif } unsigned HeapSnapshot::GetUid() const { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::HeapSnapshot::GetUid"); return ToInternal(this)->uid(); -#else - return 0; -#endif } Handle<String> HeapSnapshot::GetTitle() const { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::HeapSnapshot::GetTitle"); return Handle<String>(ToApi<String>(isolate->factory()->LookupAsciiSymbol( ToInternal(this)->title()))); -#else - return v8::String::Empty(); -#endif } const HeapGraphNode* HeapSnapshot::GetRoot() const { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::HeapSnapshot::GetHead"); return reinterpret_cast<const HeapGraphNode*>(ToInternal(this)->root()); -#else - return 0; -#endif } const HeapGraphNode* HeapSnapshot::GetNodeById(uint64_t id) const { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::HeapSnapshot::GetNodeById"); return reinterpret_cast<const HeapGraphNode*>( ToInternal(this)->GetEntryById(id)); -#else - return NULL; -#endif } int HeapSnapshot::GetNodesCount() const { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::HeapSnapshot::GetNodesCount"); return ToInternal(this)->entries()->length(); -#else - return 0; -#endif } const HeapGraphNode* HeapSnapshot::GetNode(int index) const { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::HeapSnapshot::GetNode"); return reinterpret_cast<const HeapGraphNode*>( ToInternal(this)->entries()->at(index)); -#else - return 0; -#endif } void HeapSnapshot::Serialize(OutputStream* stream, HeapSnapshot::SerializationFormat format) const { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::HeapSnapshot::Serialize"); ApiCheck(format == kJSON, @@ -5890,49 +5697,35 @@ void HeapSnapshot::Serialize(OutputStream* stream, "Invalid stream chunk size"); i::HeapSnapshotJSONSerializer serializer(ToInternal(this)); serializer.Serialize(stream); -#endif } int HeapProfiler::GetSnapshotsCount() { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::HeapProfiler::GetSnapshotsCount"); return i::HeapProfiler::GetSnapshotsCount(); -#else - return 0; -#endif } const HeapSnapshot* HeapProfiler::GetSnapshot(int index) { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::HeapProfiler::GetSnapshot"); return reinterpret_cast<const HeapSnapshot*>( i::HeapProfiler::GetSnapshot(index)); -#else - return NULL; -#endif } const HeapSnapshot* HeapProfiler::FindSnapshot(unsigned uid) { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::HeapProfiler::FindSnapshot"); return reinterpret_cast<const HeapSnapshot*>( i::HeapProfiler::FindSnapshot(uid)); -#else - return NULL; -#endif } const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title, HeapSnapshot::Type type, ActivityControl* control) { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::HeapProfiler::TakeSnapshot"); i::HeapSnapshot::Type internal_type = i::HeapSnapshot::kFull; @@ -5946,27 +5739,20 @@ const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title, return reinterpret_cast<const HeapSnapshot*>( i::HeapProfiler::TakeSnapshot( *Utils::OpenHandle(*title), internal_type, control)); -#else - return NULL; -#endif } void HeapProfiler::DeleteAllSnapshots() { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::HeapProfiler::DeleteAllSnapshots"); i::HeapProfiler::DeleteAllSnapshots(); -#endif } void HeapProfiler::DefineWrapperClass(uint16_t class_id, WrapperInfoCallback callback) { -#ifdef ENABLE_LOGGING_AND_PROFILING i::Isolate::Current()->heap_profiler()->DefineWrapperClass(class_id, callback); -#endif } diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc index 94572c92e9..ab7c6f247a 100644 --- a/deps/v8/src/arm/code-stubs-arm.cc +++ b/deps/v8/src/arm/code-stubs-arm.cc @@ -304,12 +304,6 @@ class ConvertToDoubleStub : public CodeStub { } void Generate(MacroAssembler* masm); - - const char* GetName() { return "ConvertToDoubleStub"; } - -#ifdef DEBUG - void Print() { PrintF("ConvertToDoubleStub\n"); } -#endif }; @@ -1689,25 +1683,17 @@ void ToBooleanStub::Generate(MacroAssembler* masm) { } -const char* UnaryOpStub::GetName() { - if (name_ != NULL) return name_; - const int kMaxNameLength = 100; - name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray( - kMaxNameLength); - if (name_ == NULL) return "OOM"; +void UnaryOpStub::PrintName(StringStream* stream) { const char* op_name = Token::Name(op_); const char* overwrite_name = NULL; // Make g++ happy. switch (mode_) { case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break; case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break; } - - OS::SNPrintF(Vector<char>(name_, kMaxNameLength), - "UnaryOpStub_%s_%s_%s", - op_name, - overwrite_name, - UnaryOpIC::GetName(operand_type_)); - return name_; + stream->Add("UnaryOpStub_%s_%s_%s", + op_name, + overwrite_name, + UnaryOpIC::GetName(operand_type_)); } @@ -2043,12 +2029,7 @@ void BinaryOpStub::Generate(MacroAssembler* masm) { } -const char* BinaryOpStub::GetName() { - if (name_ != NULL) return name_; - const int kMaxNameLength = 100; - name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray( - kMaxNameLength); - if (name_ == NULL) return "OOM"; +void BinaryOpStub::PrintName(StringStream* stream) { const char* op_name = Token::Name(op_); const char* overwrite_name; switch (mode_) { @@ -2057,13 +2038,10 @@ const char* BinaryOpStub::GetName() { case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; default: overwrite_name = "UnknownOverwrite"; break; } - - OS::SNPrintF(Vector<char>(name_, kMaxNameLength), - "BinaryOpStub_%s_%s_%s", - op_name, - overwrite_name, - BinaryOpIC::GetName(operands_type_)); - return name_; + stream->Add("BinaryOpStub_%s_%s_%s", + op_name, + overwrite_name, + BinaryOpIC::GetName(operands_type_)); } @@ -3568,7 +3546,6 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // Setup frame pointer for the frame to be pushed. __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); -#ifdef ENABLE_LOGGING_AND_PROFILING // If this is the outermost JS call, set js_entry_sp value. Label non_outermost_js; ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address, isolate); @@ -3584,7 +3561,6 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { __ mov(ip, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME))); __ bind(&cont); __ push(ip); -#endif // Call a faked try-block that does the invoke. __ bl(&invoke); @@ -3645,7 +3621,6 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { __ PopTryHandler(); __ bind(&exit); // r0 holds result -#ifdef ENABLE_LOGGING_AND_PROFILING // Check if the current stack frame is marked as the outermost JS frame. Label non_outermost_js_2; __ pop(r5); @@ -3655,7 +3630,6 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { __ mov(r5, Operand(ExternalReference(js_entry_sp))); __ str(r6, MemOperand(r5)); __ bind(&non_outermost_js_2); -#endif // Restore the top frame descriptors from the stack. __ pop(r3); @@ -4755,16 +4729,9 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { // Unfortunately you have to run without snapshots to see most of these // names in the profile since most compare stubs end up in the snapshot. -const char* CompareStub::GetName() { +void CompareStub::PrintName(StringStream* stream) { ASSERT((lhs_.is(r0) && rhs_.is(r1)) || (lhs_.is(r1) && rhs_.is(r0))); - - if (name_ != NULL) return name_; - const int kMaxNameLength = 100; - name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray( - kMaxNameLength); - if (name_ == NULL) return "OOM"; - const char* cc_name; switch (cc_) { case lt: cc_name = "LT"; break; @@ -4775,40 +4742,14 @@ const char* CompareStub::GetName() { case ne: cc_name = "NE"; break; default: cc_name = "UnknownCondition"; break; } - - const char* lhs_name = lhs_.is(r0) ? "_r0" : "_r1"; - const char* rhs_name = rhs_.is(r0) ? "_r0" : "_r1"; - - const char* strict_name = ""; - if (strict_ && (cc_ == eq || cc_ == ne)) { - strict_name = "_STRICT"; - } - - const char* never_nan_nan_name = ""; - if (never_nan_nan_ && (cc_ == eq || cc_ == ne)) { - never_nan_nan_name = "_NO_NAN"; - } - - const char* include_number_compare_name = ""; - if (!include_number_compare_) { - include_number_compare_name = "_NO_NUMBER"; - } - - const char* include_smi_compare_name = ""; - if (!include_smi_compare_) { - include_smi_compare_name = "_NO_SMI"; - } - - OS::SNPrintF(Vector<char>(name_, kMaxNameLength), - "CompareStub_%s%s%s%s%s%s", - cc_name, - lhs_name, - rhs_name, - strict_name, - never_nan_nan_name, - include_number_compare_name, - include_smi_compare_name); - return name_; + bool is_equality = cc_ == eq || cc_ == ne; + stream->Add("CompareStub_%s", cc_name); + stream->Add(lhs_.is(r0) ? "_r0" : "_r1"); + stream->Add(rhs_.is(r0) ? "_r0" : "_r1"); + if (strict_ && is_equality) stream->Add("_STRICT"); + if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN"); + if (!include_number_compare_) stream->Add("_NO_NUMBER"); + if (!include_smi_compare_) stream->Add("_NO_SMI"); } diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h index 7427351308..557f7e6d41 100644 --- a/deps/v8/src/arm/code-stubs-arm.h +++ b/deps/v8/src/arm/code-stubs-arm.h @@ -65,8 +65,7 @@ class UnaryOpStub: public CodeStub { UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED) : op_(op), mode_(mode), - operand_type_(operand_type), - name_(NULL) { + operand_type_(operand_type) { } private: @@ -76,19 +75,7 @@ class UnaryOpStub: public CodeStub { // Operand type information determined at runtime. UnaryOpIC::TypeInfo operand_type_; - char* name_; - - const char* GetName(); - -#ifdef DEBUG - void Print() { - PrintF("UnaryOpStub %d (op %s), (mode %d, runtime_type_info %s)\n", - MinorKey(), - Token::String(op_), - static_cast<int>(mode_), - UnaryOpIC::GetName(operand_type_)); - } -#endif + virtual void PrintName(StringStream* stream); class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {}; class OpBits: public BitField<Token::Value, 1, 7> {}; @@ -142,8 +129,7 @@ class BinaryOpStub: public CodeStub { : op_(op), mode_(mode), operands_type_(BinaryOpIC::UNINITIALIZED), - result_type_(BinaryOpIC::UNINITIALIZED), - name_(NULL) { + result_type_(BinaryOpIC::UNINITIALIZED) { use_vfp3_ = CpuFeatures::IsSupported(VFP3); ASSERT(OpBits::is_valid(Token::NUM_TOKENS)); } @@ -156,8 +142,7 @@ class BinaryOpStub: public CodeStub { mode_(ModeBits::decode(key)), use_vfp3_(VFP3Bits::decode(key)), operands_type_(operands_type), - result_type_(result_type), - name_(NULL) { } + result_type_(result_type) { } private: enum SmiCodeGenerateHeapNumberResults { @@ -173,20 +158,7 @@ class BinaryOpStub: public CodeStub { BinaryOpIC::TypeInfo operands_type_; BinaryOpIC::TypeInfo result_type_; - char* name_; - - const char* GetName(); - -#ifdef DEBUG - void Print() { - PrintF("BinaryOpStub %d (op %s), " - "(mode %d, runtime_type_info %s)\n", - MinorKey(), - Token::String(op_), - static_cast<int>(mode_), - BinaryOpIC::GetName(operands_type_)); - } -#endif + virtual void PrintName(StringStream* stream); // Minor key encoding in 16 bits RRRTTTVOOOOOOOMM. class ModeBits: public BitField<OverwriteMode, 0, 2> {}; @@ -370,12 +342,6 @@ class WriteInt32ToHeapNumberStub : public CodeStub { } void Generate(MacroAssembler* masm); - - const char* GetName() { return "WriteInt32ToHeapNumberStub"; } - -#ifdef DEBUG - void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); } -#endif }; @@ -402,8 +368,6 @@ class NumberToStringStub: public CodeStub { int MinorKey() { return 0; } void Generate(MacroAssembler* masm); - - const char* GetName() { return "NumberToStringStub"; } }; @@ -421,8 +385,6 @@ class RegExpCEntryStub: public CodeStub { int MinorKey() { return 0; } bool NeedsImmovableCode() { return true; } - - const char* GetName() { return "RegExpCEntryStub"; } }; @@ -443,8 +405,6 @@ class DirectCEntryStub: public CodeStub { int MinorKey() { return 0; } bool NeedsImmovableCode() { return true; } - - const char* GetName() { return "DirectCEntryStub"; } }; @@ -627,13 +587,6 @@ class StringDictionaryLookupStub: public CodeStub { StringDictionary::kHeaderSize + StringDictionary::kElementsStartIndex * kPointerSize; - -#ifdef DEBUG - void Print() { - PrintF("StringDictionaryLookupStub\n"); - } -#endif - Major MajorKey() { return StringDictionaryNegativeLookup; } int MinorKey() { diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h index 01aa8052e1..d27982abac 100644 --- a/deps/v8/src/arm/codegen-arm.h +++ b/deps/v8/src/arm/codegen-arm.h @@ -58,9 +58,7 @@ class CodeGenerator: public AstVisitor { // Print the code after compiling it. static void PrintCode(Handle<Code> code, CompilationInfo* info); -#ifdef ENABLE_LOGGING_AND_PROFILING static bool ShouldGenerateLog(Expression* type); -#endif static void SetFunctionInfo(Handle<JSFunction> fun, FunctionLiteral* lit, diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc index 4b55915e91..c3440eb3ea 100644 --- a/deps/v8/src/arm/full-codegen-arm.cc +++ b/deps/v8/src/arm/full-codegen-arm.cc @@ -776,7 +776,7 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable, // IDs for bailouts from optimized code. ASSERT(prop->obj()->AsVariableProxy() != NULL); { AccumulatorValueContext for_object(this); - EmitVariableLoad(prop->obj()->AsVariableProxy()->var()); + EmitVariableLoad(prop->obj()->AsVariableProxy()); } __ push(r0); @@ -1113,7 +1113,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info, void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) { Comment cmnt(masm_, "[ VariableProxy"); - EmitVariableLoad(expr->var()); + EmitVariableLoad(expr); } @@ -1262,7 +1262,11 @@ void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase( } -void FullCodeGenerator::EmitVariableLoad(Variable* var) { +void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) { + // Record position before possible IC call. + SetSourcePosition(proxy->position()); + Variable* var = proxy->var(); + // Three cases: non-this global variables, lookup slots, and all other // types of slots. Slot* slot = var->AsSlot(); @@ -1593,7 +1597,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) { { AccumulatorValueContext context(this); switch (assign_type) { case VARIABLE: - EmitVariableLoad(expr->target()->AsVariableProxy()->var()); + EmitVariableLoad(expr->target()->AsVariableProxy()); PrepareForBailout(expr->target(), TOS_REG); break; case NAMED_PROPERTY: @@ -2772,13 +2776,12 @@ void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) { // with '%2s' (see Logger::LogRuntime for all the formats). // 2 (array): Arguments to the format string. ASSERT_EQ(args->length(), 3); -#ifdef ENABLE_LOGGING_AND_PROFILING if (CodeGenerator::ShouldGenerateLog(args->at(0))) { VisitForStackValue(args->at(1)); VisitForStackValue(args->at(2)); __ CallRuntime(Runtime::kLog, 2); } -#endif + // Finally, we're expected to leave a value on the top of the stack. __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); context()->Plug(r0); @@ -3816,7 +3819,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { if (assign_type == VARIABLE) { ASSERT(expr->expression()->AsVariableProxy()->var() != NULL); AccumulatorValueContext context(this); - EmitVariableLoad(expr->expression()->AsVariableProxy()->var()); + EmitVariableLoad(expr->expression()->AsVariableProxy()); } else { // Reserve space for result of postfix operation. if (expr->is_postfix() && !context()->IsEffect()) { diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc index dea875bad4..6038153a1a 100644 --- a/deps/v8/src/arm/ic-arm.cc +++ b/deps/v8/src/arm/ic-arm.cc @@ -212,101 +212,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm, } -static void GenerateNumberDictionaryLoad(MacroAssembler* masm, - Label* miss, - Register elements, - Register key, - Register result, - Register t0, - Register t1, - Register t2) { - // Register use: - // - // elements - holds the slow-case elements of the receiver on entry. - // Unchanged unless 'result' is the same register. - // - // key - holds the smi key on entry. - // Unchanged unless 'result' is the same register. - // - // result - holds the result on exit if the load succeeded. - // Allowed to be the same as 'key' or 'result'. - // Unchanged on bailout so 'key' or 'result' can be used - // in further computation. - // - // Scratch registers: - // - // t0 - holds the untagged key on entry and holds the hash once computed. - // - // t1 - used to hold the capacity mask of the dictionary - // - // t2 - used for the index into the dictionary. - Label done; - - // Compute the hash code from the untagged key. This must be kept in sync - // with ComputeIntegerHash in utils.h. - // - // hash = ~hash + (hash << 15); - __ mvn(t1, Operand(t0)); - __ add(t0, t1, Operand(t0, LSL, 15)); - // hash = hash ^ (hash >> 12); - __ eor(t0, t0, Operand(t0, LSR, 12)); - // hash = hash + (hash << 2); - __ add(t0, t0, Operand(t0, LSL, 2)); - // hash = hash ^ (hash >> 4); - __ eor(t0, t0, Operand(t0, LSR, 4)); - // hash = hash * 2057; - __ mov(t1, Operand(2057)); - __ mul(t0, t0, t1); - // hash = hash ^ (hash >> 16); - __ eor(t0, t0, Operand(t0, LSR, 16)); - - // Compute the capacity mask. - __ ldr(t1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset)); - __ mov(t1, Operand(t1, ASR, kSmiTagSize)); // convert smi to int - __ sub(t1, t1, Operand(1)); - - // Generate an unrolled loop that performs a few probes before giving up. - static const int kProbes = 4; - for (int i = 0; i < kProbes; i++) { - // Use t2 for index calculations and keep the hash intact in t0. - __ mov(t2, t0); - // Compute the masked index: (hash + i + i * i) & mask. - if (i > 0) { - __ add(t2, t2, Operand(NumberDictionary::GetProbeOffset(i))); - } - __ and_(t2, t2, Operand(t1)); - - // Scale the index by multiplying by the element size. - ASSERT(NumberDictionary::kEntrySize == 3); - __ add(t2, t2, Operand(t2, LSL, 1)); // t2 = t2 * 3 - - // Check if the key is identical to the name. - __ add(t2, elements, Operand(t2, LSL, kPointerSizeLog2)); - __ ldr(ip, FieldMemOperand(t2, NumberDictionary::kElementsStartOffset)); - __ cmp(key, Operand(ip)); - if (i != kProbes - 1) { - __ b(eq, &done); - } else { - __ b(ne, miss); - } - } - - __ bind(&done); - // Check that the value is a normal property. - // t2: elements + (index * kPointerSize) - const int kDetailsOffset = - NumberDictionary::kElementsStartOffset + 2 * kPointerSize; - __ ldr(t1, FieldMemOperand(t2, kDetailsOffset)); - __ tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::mask()))); - __ b(ne, miss); - - // Get the value at the masked, scaled index and return. - const int kValueOffset = - NumberDictionary::kElementsStartOffset + kPointerSize; - __ ldr(result, FieldMemOperand(t2, kValueOffset)); -} - - void LoadIC::GenerateArrayLength(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- r2 : name @@ -738,7 +643,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) { __ b(ne, &slow_load); __ mov(r0, Operand(r2, ASR, kSmiTagSize)); // r0: untagged index - GenerateNumberDictionaryLoad(masm, &slow_load, r4, r2, r1, r0, r3, r5); + __ LoadFromNumberDictionary(&slow_load, r4, r2, r1, r0, r3, r5); __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1, r0, r3); __ jmp(&do_call); @@ -1127,7 +1032,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ cmp(r3, ip); __ b(ne, &slow); __ mov(r2, Operand(r0, ASR, kSmiTagSize)); - GenerateNumberDictionaryLoad(masm, &slow, r4, r0, r0, r2, r3, r5); + __ LoadFromNumberDictionary(&slow, r4, r0, r0, r2, r3, r5); __ Ret(); // Slow case, key and receiver still in r0 and r1. diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc index ee36314209..dc93aea346 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.cc +++ b/deps/v8/src/arm/lithium-codegen-arm.cc @@ -551,6 +551,13 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code, RecordPosition(pointers->position()); __ Call(code, mode); RegisterLazyDeoptimization(instr, safepoint_mode); + + // Signal that we don't inline smi code before these stubs in the + // optimizing code generator. + if (code->kind() == Code::BINARY_OP_IC || + code->kind() == Code::COMPARE_IC) { + __ nop(); + } } @@ -1506,6 +1513,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) { BinaryOpStub stub(instr->op(), NO_OVERWRITE); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + __ nop(); // Signals no inlined code. } diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index 08a1cb9453..320879a627 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -1343,6 +1343,100 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, } +void MacroAssembler::LoadFromNumberDictionary(Label* miss, + Register elements, + Register key, + Register result, + Register t0, + Register t1, + Register t2) { + // Register use: + // + // elements - holds the slow-case elements of the receiver on entry. + // Unchanged unless 'result' is the same register. + // + // key - holds the smi key on entry. + // Unchanged unless 'result' is the same register. + // + // result - holds the result on exit if the load succeeded. + // Allowed to be the same as 'key' or 'result'. + // Unchanged on bailout so 'key' or 'result' can be used + // in further computation. + // + // Scratch registers: + // + // t0 - holds the untagged key on entry and holds the hash once computed. + // + // t1 - used to hold the capacity mask of the dictionary + // + // t2 - used for the index into the dictionary. + Label done; + + // Compute the hash code from the untagged key. This must be kept in sync + // with ComputeIntegerHash in utils.h. + // + // hash = ~hash + (hash << 15); + mvn(t1, Operand(t0)); + add(t0, t1, Operand(t0, LSL, 15)); + // hash = hash ^ (hash >> 12); + eor(t0, t0, Operand(t0, LSR, 12)); + // hash = hash + (hash << 2); + add(t0, t0, Operand(t0, LSL, 2)); + // hash = hash ^ (hash >> 4); + eor(t0, t0, Operand(t0, LSR, 4)); + // hash = hash * 2057; + mov(t1, Operand(2057)); + mul(t0, t0, t1); + // hash = hash ^ (hash >> 16); + eor(t0, t0, Operand(t0, LSR, 16)); + + // Compute the capacity mask. + ldr(t1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset)); + mov(t1, Operand(t1, ASR, kSmiTagSize)); // convert smi to int + sub(t1, t1, Operand(1)); + + // Generate an unrolled loop that performs a few probes before giving up. + static const int kProbes = 4; + for (int i = 0; i < kProbes; i++) { + // Use t2 for index calculations and keep the hash intact in t0. + mov(t2, t0); + // Compute the masked index: (hash + i + i * i) & mask. + if (i > 0) { + add(t2, t2, Operand(NumberDictionary::GetProbeOffset(i))); + } + and_(t2, t2, Operand(t1)); + + // Scale the index by multiplying by the element size. + ASSERT(NumberDictionary::kEntrySize == 3); + add(t2, t2, Operand(t2, LSL, 1)); // t2 = t2 * 3 + + // Check if the key is identical to the name. + add(t2, elements, Operand(t2, LSL, kPointerSizeLog2)); + ldr(ip, FieldMemOperand(t2, NumberDictionary::kElementsStartOffset)); + cmp(key, Operand(ip)); + if (i != kProbes - 1) { + b(eq, &done); + } else { + b(ne, miss); + } + } + + bind(&done); + // Check that the value is a normal property. + // t2: elements + (index * kPointerSize) + const int kDetailsOffset = + NumberDictionary::kElementsStartOffset + 2 * kPointerSize; + ldr(t1, FieldMemOperand(t2, kDetailsOffset)); + tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::mask()))); + b(ne, miss); + + // Get the value at the masked, scaled index and return. + const int kValueOffset = + NumberDictionary::kElementsStartOffset + kPointerSize; + ldr(result, FieldMemOperand(t2, kValueOffset)); +} + + void MacroAssembler::AllocateInNewSpace(int object_size, Register result, Register scratch1, diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index 1918858ebe..07281a7caf 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -433,6 +433,16 @@ class MacroAssembler: public Assembler { Register scratch, Label* miss); + + void LoadFromNumberDictionary(Label* miss, + Register elements, + Register key, + Register result, + Register t0, + Register t1, + Register t2); + + inline void MarkCode(NopMarkerTypes type) { nop(type); } diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.h b/deps/v8/src/arm/regexp-macro-assembler-arm.h index d771e4033f..0e653868b6 100644 --- a/deps/v8/src/arm/regexp-macro-assembler-arm.h +++ b/deps/v8/src/arm/regexp-macro-assembler-arm.h @@ -28,6 +28,9 @@ #ifndef V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_ #define V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_ +#include "arm/assembler-arm.h" +#include "arm/assembler-arm-inl.h" + namespace v8 { namespace internal { diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc index caa6a0eef9..86e49716d3 100644 --- a/deps/v8/src/arm/stub-cache-arm.cc +++ b/deps/v8/src/arm/stub-cache-arm.cc @@ -3100,7 +3100,8 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) { // -- r1 : receiver // ----------------------------------- Code* stub; - MaybeObject* maybe_stub = ComputeSharedKeyedLoadElementStub(receiver_map); + JSObject::ElementsKind elements_kind = receiver_map->elements_kind(); + MaybeObject* maybe_stub = KeyedLoadElementStub(elements_kind).TryGetCode(); if (!maybe_stub->To(&stub)) return maybe_stub; __ DispatchMap(r1, r2, @@ -3193,7 +3194,10 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) { // -- r3 : scratch // ----------------------------------- Code* stub; - MaybeObject* maybe_stub = ComputeSharedKeyedStoreElementStub(receiver_map); + JSObject::ElementsKind elements_kind = receiver_map->elements_kind(); + bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE; + MaybeObject* maybe_stub = + KeyedStoreElementStub(is_js_array, elements_kind).TryGetCode(); if (!maybe_stub->To(&stub)) return maybe_stub; __ DispatchMap(r2, r3, @@ -3388,6 +3392,53 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) { #define __ ACCESS_MASM(masm) +void KeyedLoadStubCompiler::GenerateLoadDictionaryElement( + MacroAssembler* masm) { + // ---------- S t a t e -------------- + // -- lr : return address + // -- r0 : key + // -- r1 : receiver + // ----------------------------------- + Label slow, miss_force_generic; + + Register key = r0; + Register receiver = r1; + + __ JumpIfNotSmi(key, &miss_force_generic); + __ mov(r2, Operand(key, ASR, kSmiTagSize)); + __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset)); + __ LoadFromNumberDictionary(&slow, r4, key, r0, r2, r3, r5); + __ Ret(); + + __ bind(&slow); + __ IncrementCounter( + masm->isolate()->counters()->keyed_load_external_array_slow(), + 1, r2, r3); + + // ---------- S t a t e -------------- + // -- lr : return address + // -- r0 : key + // -- r1 : receiver + // ----------------------------------- + Handle<Code> slow_ic = + masm->isolate()->builtins()->KeyedLoadIC_Slow(); + __ Jump(slow_ic, RelocInfo::CODE_TARGET); + + // Miss case, call the runtime. + __ bind(&miss_force_generic); + + // ---------- S t a t e -------------- + // -- lr : return address + // -- r0 : key + // -- r1 : receiver + // ----------------------------------- + + Handle<Code> miss_ic = + masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric(); + __ Jump(miss_ic, RelocInfo::CODE_TARGET); +} + + static bool IsElementTypeSigned(JSObject::ElementsKind elements_kind) { switch (elements_kind) { case JSObject::EXTERNAL_BYTE_ELEMENTS: diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc index db57280f41..5c0ef5a4a5 100644 --- a/deps/v8/src/code-stubs.cc +++ b/deps/v8/src/code-stubs.cc @@ -61,21 +61,29 @@ void CodeStub::GenerateCode(MacroAssembler* masm) { } +SmartPointer<const char> CodeStub::GetName() { + char buffer[100]; + NoAllocationStringAllocator allocator(buffer, + static_cast<unsigned>(sizeof(buffer))); + StringStream stream(&allocator); + PrintName(&stream); + return stream.ToCString(); +} + + void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) { code->set_major_key(MajorKey()); Isolate* isolate = masm->isolate(); - PROFILE(isolate, CodeCreateEvent(Logger::STUB_TAG, code, GetName())); - GDBJIT(AddCode(GDBJITInterface::STUB, GetName(), code)); + SmartPointer<const char> name = GetName(); + PROFILE(isolate, CodeCreateEvent(Logger::STUB_TAG, code, *name)); + GDBJIT(AddCode(GDBJITInterface::STUB, *name, code)); Counters* counters = isolate->counters(); counters->total_stubs_code_size()->Increment(code->instruction_size()); #ifdef ENABLE_DISASSEMBLER if (FLAG_print_code_stubs) { -#ifdef DEBUG - Print(); -#endif - code->Disassemble(GetName()); + code->Disassemble(*name); PrintF("\n"); } #endif @@ -170,7 +178,7 @@ MaybeObject* CodeStub::TryGetCode() { const char* CodeStub::MajorName(CodeStub::Major major_key, bool allow_unknown_keys) { switch (major_key) { -#define DEF_CASE(name) case name: return #name; +#define DEF_CASE(name) case name: return #name "Stub"; CODE_STUB_LIST(DEF_CASE) #undef DEF_CASE default: @@ -213,13 +221,7 @@ void ICCompareStub::Generate(MacroAssembler* masm) { } -const char* InstanceofStub::GetName() { - if (name_ != NULL) return name_; - const int kMaxNameLength = 100; - name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray( - kMaxNameLength); - if (name_ == NULL) return "OOM"; - +void InstanceofStub::PrintName(StringStream* stream) { const char* args = ""; if (HasArgsInRegisters()) { args = "_REGS"; @@ -235,33 +237,95 @@ const char* InstanceofStub::GetName() { return_true_false_object = "_TRUEFALSE"; } - OS::SNPrintF(Vector<char>(name_, kMaxNameLength), - "InstanceofStub%s%s%s", - args, - inline_check, - return_true_false_object); - return name_; + stream->Add("InstanceofStub%s%s%s", + args, + inline_check, + return_true_false_object); } -void KeyedLoadFastElementStub::Generate(MacroAssembler* masm) { - KeyedLoadStubCompiler::GenerateLoadFastElement(masm); +void KeyedLoadElementStub::Generate(MacroAssembler* masm) { + switch (elements_kind_) { + case JSObject::FAST_ELEMENTS: + KeyedLoadStubCompiler::GenerateLoadFastElement(masm); + break; + case JSObject::FAST_DOUBLE_ELEMENTS: + UNIMPLEMENTED(); + break; + case JSObject::EXTERNAL_BYTE_ELEMENTS: + case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS: + case JSObject::EXTERNAL_SHORT_ELEMENTS: + case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS: + case JSObject::EXTERNAL_INT_ELEMENTS: + case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS: + case JSObject::EXTERNAL_FLOAT_ELEMENTS: + case JSObject::EXTERNAL_DOUBLE_ELEMENTS: + case JSObject::EXTERNAL_PIXEL_ELEMENTS: + KeyedLoadStubCompiler::GenerateLoadExternalArray(masm, elements_kind_); + break; + case JSObject::DICTIONARY_ELEMENTS: + KeyedLoadStubCompiler::GenerateLoadDictionaryElement(masm); + break; + case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS: + UNREACHABLE(); + break; + } } -void KeyedStoreFastElementStub::Generate(MacroAssembler* masm) { - KeyedStoreStubCompiler::GenerateStoreFastElement(masm, is_js_array_); +void KeyedStoreElementStub::Generate(MacroAssembler* masm) { + switch (elements_kind_) { + case JSObject::FAST_ELEMENTS: + KeyedStoreStubCompiler::GenerateStoreFastElement(masm, is_js_array_); + break; + case JSObject::FAST_DOUBLE_ELEMENTS: + UNIMPLEMENTED(); + break; + case JSObject::EXTERNAL_BYTE_ELEMENTS: + case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS: + case JSObject::EXTERNAL_SHORT_ELEMENTS: + case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS: + case JSObject::EXTERNAL_INT_ELEMENTS: + case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS: + case JSObject::EXTERNAL_FLOAT_ELEMENTS: + case JSObject::EXTERNAL_DOUBLE_ELEMENTS: + case JSObject::EXTERNAL_PIXEL_ELEMENTS: + KeyedStoreStubCompiler::GenerateStoreExternalArray(masm, elements_kind_); + break; + case JSObject::DICTIONARY_ELEMENTS: + KeyedStoreStubCompiler::GenerateStoreDictionaryElement(masm); + break; + case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS: + UNREACHABLE(); + break; + } } -void KeyedLoadExternalArrayStub::Generate(MacroAssembler* masm) { - KeyedLoadStubCompiler::GenerateLoadExternalArray(masm, elements_kind_); +void ArgumentsAccessStub::PrintName(StringStream* stream) { + const char* type_name = NULL; // Make g++ happy. + switch (type_) { + case READ_ELEMENT: type_name = "ReadElement"; break; + case NEW_NON_STRICT_FAST: type_name = "NewNonStrictFast"; break; + case NEW_NON_STRICT_SLOW: type_name = "NewNonStrictSlow"; break; + case NEW_STRICT: type_name = "NewStrict"; break; + } + stream->Add("ArgumentsAccessStub_%s", type_name); } -void KeyedStoreExternalArrayStub::Generate(MacroAssembler* masm) { - KeyedStoreStubCompiler::GenerateStoreExternalArray(masm, elements_kind_); +void CallFunctionStub::PrintName(StringStream* stream) { + const char* in_loop_name = NULL; // Make g++ happy. + switch (in_loop_) { + case NOT_IN_LOOP: in_loop_name = ""; break; + case IN_LOOP: in_loop_name = "_InLoop"; break; + } + const char* flags_name = NULL; // Make g++ happy. + switch (flags_) { + case NO_CALL_FUNCTION_FLAGS: flags_name = ""; break; + case RECEIVER_MIGHT_BE_IMPLICIT: flags_name = "_Implicit"; break; + } + stream->Add("CallFunctionStub_Args%d%s%s", argc_, in_loop_name, flags_name); } - } } // namespace v8::internal diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h index 3a756585e5..17c245c80e 100644 --- a/deps/v8/src/code-stubs.h +++ b/deps/v8/src/code-stubs.h @@ -70,10 +70,8 @@ namespace internal { V(NumberToString) \ V(CEntry) \ V(JSEntry) \ - V(KeyedLoadFastElement) \ - V(KeyedStoreFastElement) \ - V(KeyedLoadExternalArray) \ - V(KeyedStoreExternalArray) \ + V(KeyedLoadElement) \ + V(KeyedStoreElement) \ V(DebuggerStatement) \ V(StringDictionaryNegativeLookup) @@ -183,16 +181,15 @@ class CodeStub BASE_EMBEDDED { } // Returns a name for logging/debugging purposes. - virtual const char* GetName() { return MajorName(MajorKey(), false); } + SmartPointer<const char> GetName(); + virtual void PrintName(StringStream* stream) { + stream->Add("%s", MajorName(MajorKey(), false)); + } // Returns whether the code generated for this stub needs to be allocated as // a fixed (non-moveable) code object. virtual bool NeedsImmovableCode() { return false; } - #ifdef DEBUG - virtual void Print() { PrintF("%s\n", GetName()); } -#endif - // Computes the key based on major and minor. uint32_t GetKey() { ASSERT(static_cast<int>(MajorKey()) < NUMBER_OF_IDS); @@ -274,8 +271,6 @@ class StackCheckStub : public CodeStub { void Generate(MacroAssembler* masm); private: - const char* GetName() { return "StackCheckStub"; } - Major MajorKey() { return StackCheck; } int MinorKey() { return 0; } }; @@ -290,7 +285,6 @@ class ToNumberStub: public CodeStub { private: Major MajorKey() { return ToNumber; } int MinorKey() { return 0; } - const char* GetName() { return "ToNumberStub"; } }; @@ -302,7 +296,6 @@ class FastNewClosureStub : public CodeStub { void Generate(MacroAssembler* masm); private: - const char* GetName() { return "FastNewClosureStub"; } Major MajorKey() { return FastNewClosure; } int MinorKey() { return strict_mode_; } @@ -323,7 +316,6 @@ class FastNewContextStub : public CodeStub { private: int slots_; - const char* GetName() { return "FastNewContextStub"; } Major MajorKey() { return FastNewContext; } int MinorKey() { return slots_; } }; @@ -352,7 +344,6 @@ class FastCloneShallowArrayStub : public CodeStub { Mode mode_; int length_; - const char* GetName() { return "FastCloneShallowArrayStub"; } Major MajorKey() { return FastCloneShallowArray; } int MinorKey() { ASSERT(mode_ == 0 || mode_ == 1); @@ -370,7 +361,7 @@ class InstanceofStub: public CodeStub { kReturnTrueFalseObject = 1 << 2 }; - explicit InstanceofStub(Flags flags) : flags_(flags), name_(NULL) { } + explicit InstanceofStub(Flags flags) : flags_(flags) { } static Register left(); static Register right(); @@ -393,10 +384,9 @@ class InstanceofStub: public CodeStub { return (flags_ & kReturnTrueFalseObject) != 0; } - const char* GetName(); + virtual void PrintName(StringStream* stream); Flags flags_; - char* name_; }; @@ -408,8 +398,6 @@ class MathPowStub: public CodeStub { private: virtual CodeStub::Major MajorKey() { return MathPow; } virtual int MinorKey() { return 0; } - - const char* GetName() { return "MathPowStub"; } }; @@ -476,8 +464,7 @@ class CompareStub: public CodeStub { include_number_compare_((flags & NO_NUMBER_COMPARE_IN_STUB) == 0), include_smi_compare_((flags & NO_SMI_COMPARE_IN_STUB) == 0), lhs_(lhs), - rhs_(rhs), - name_(NULL) { } + rhs_(rhs) { } CompareStub(Condition cc, bool strict, @@ -488,8 +475,7 @@ class CompareStub: public CodeStub { include_number_compare_((flags & NO_NUMBER_COMPARE_IN_STUB) == 0), include_smi_compare_((flags & NO_SMI_COMPARE_IN_STUB) == 0), lhs_(no_reg), - rhs_(no_reg), - name_(NULL) { } + rhs_(no_reg) { } void Generate(MacroAssembler* masm); @@ -543,26 +529,7 @@ class CompareStub: public CodeStub { // Unfortunately you have to run without snapshots to see most of these // names in the profile since most compare stubs end up in the snapshot. - char* name_; - const char* GetName(); -#ifdef DEBUG - void Print() { - PrintF("CompareStub (minor %d) (cc %d), (strict %s), " - "(never_nan_nan %s), (smi_compare %s) (number_compare %s) ", - MinorKey(), - static_cast<int>(cc_), - strict_ ? "true" : "false", - never_nan_nan_ ? "true" : "false", - include_smi_compare_ ? "inluded" : "not included", - include_number_compare_ ? "included" : "not included"); - - if (!lhs_.is(no_reg) && !rhs_.is(no_reg)) { - PrintF("(lhs r%d), (rhs r%d)\n", lhs_.code(), rhs_.code()); - } else { - PrintF("\n"); - } - } -#endif + virtual void PrintName(StringStream* stream); }; @@ -593,8 +560,6 @@ class CEntryStub : public CodeStub { int MinorKey(); bool NeedsImmovableCode(); - - const char* GetName() { return "CEntryStub"; } }; @@ -610,8 +575,6 @@ class JSEntryStub : public CodeStub { private: Major MajorKey() { return JSEntry; } int MinorKey() { return 0; } - - const char* GetName() { return "JSEntryStub"; } }; @@ -624,7 +587,9 @@ class JSConstructEntryStub : public JSEntryStub { private: int MinorKey() { return 1; } - const char* GetName() { return "JSConstructEntryStub"; } + virtual void PrintName(StringStream* stream) { + stream->Add("JSConstructEntryStub"); + } }; @@ -651,13 +616,7 @@ class ArgumentsAccessStub: public CodeStub { void GenerateNewNonStrictFast(MacroAssembler* masm); void GenerateNewNonStrictSlow(MacroAssembler* masm); - const char* GetName() { return "ArgumentsAccessStub"; } - -#ifdef DEBUG - void Print() { - PrintF("ArgumentsAccessStub (type %d)\n", type_); - } -#endif + virtual void PrintName(StringStream* stream); }; @@ -670,14 +629,6 @@ class RegExpExecStub: public CodeStub { int MinorKey() { return 0; } void Generate(MacroAssembler* masm); - - const char* GetName() { return "RegExpExecStub"; } - -#ifdef DEBUG - void Print() { - PrintF("RegExpExecStub\n"); - } -#endif }; @@ -690,14 +641,6 @@ class RegExpConstructResultStub: public CodeStub { int MinorKey() { return 0; } void Generate(MacroAssembler* masm); - - const char* GetName() { return "RegExpConstructResultStub"; } - -#ifdef DEBUG - void Print() { - PrintF("RegExpConstructResultStub\n"); - } -#endif }; @@ -717,14 +660,7 @@ class CallFunctionStub: public CodeStub { InLoopFlag in_loop_; CallFunctionFlags flags_; -#ifdef DEBUG - void Print() { - PrintF("CallFunctionStub (args %d, in_loop %d, flags %d)\n", - argc_, - static_cast<int>(in_loop_), - static_cast<int>(flags_)); - } -#endif + virtual void PrintName(StringStream* stream); // Minor key encoding in 32 bits with Bitfield <Type, shift, size>. class InLoopBits: public BitField<InLoopFlag, 0, 1> {}; @@ -921,83 +857,44 @@ class AllowStubCallsScope { DISALLOW_COPY_AND_ASSIGN(AllowStubCallsScope); }; -#ifdef DEBUG -#define DECLARE_ARRAY_STUB_PRINT(name) void Print() { PrintF(#name); } -#else -#define DECLARE_ARRAY_STUB_PRINT(name) -#endif - -class KeyedLoadFastElementStub : public CodeStub { +class KeyedLoadElementStub : public CodeStub { public: - explicit KeyedLoadFastElementStub() { - } + explicit KeyedLoadElementStub(JSObject::ElementsKind elements_kind) + : elements_kind_(elements_kind) + { } - Major MajorKey() { return KeyedLoadFastElement; } - int MinorKey() { return 0; } + Major MajorKey() { return KeyedLoadElement; } + int MinorKey() { return elements_kind_; } void Generate(MacroAssembler* masm); - const char* GetName() { return "KeyedLoadFastElementStub"; } + private: + JSObject::ElementsKind elements_kind_; - DECLARE_ARRAY_STUB_PRINT(KeyedLoadFastElementStub) + DISALLOW_COPY_AND_ASSIGN(KeyedLoadElementStub); }; -class KeyedStoreFastElementStub : public CodeStub { +class KeyedStoreElementStub : public CodeStub { public: - explicit KeyedStoreFastElementStub(bool is_js_array) - : is_js_array_(is_js_array) { } + KeyedStoreElementStub(bool is_js_array, + JSObject::ElementsKind elements_kind) + : is_js_array_(is_js_array), + elements_kind_(elements_kind) { } - Major MajorKey() { return KeyedStoreFastElement; } - int MinorKey() { return is_js_array_ ? 1 : 0; } + Major MajorKey() { return KeyedStoreElement; } + int MinorKey() { + return (is_js_array_ ? 0 : JSObject::kElementsKindCount) + elements_kind_; + } void Generate(MacroAssembler* masm); - const char* GetName() { return "KeyedStoreFastElementStub"; } - - DECLARE_ARRAY_STUB_PRINT(KeyedStoreFastElementStub) - private: bool is_js_array_; -}; - - -class KeyedLoadExternalArrayStub : public CodeStub { - public: - explicit KeyedLoadExternalArrayStub(JSObject::ElementsKind elements_kind) - : elements_kind_(elements_kind) { } - - Major MajorKey() { return KeyedLoadExternalArray; } - int MinorKey() { return elements_kind_; } - - void Generate(MacroAssembler* masm); - - const char* GetName() { return "KeyedLoadExternalArrayStub"; } - - DECLARE_ARRAY_STUB_PRINT(KeyedLoadExternalArrayStub) - - protected: JSObject::ElementsKind elements_kind_; -}; - - -class KeyedStoreExternalArrayStub : public CodeStub { - public: - explicit KeyedStoreExternalArrayStub(JSObject::ElementsKind elements_kind) - : elements_kind_(elements_kind) { } - - Major MajorKey() { return KeyedStoreExternalArray; } - int MinorKey() { return elements_kind_; } - - void Generate(MacroAssembler* masm); - - const char* GetName() { return "KeyedStoreExternalArrayStub"; } - DECLARE_ARRAY_STUB_PRINT(KeyedStoreExternalArrayStub) - - protected: - JSObject::ElementsKind elements_kind_; + DISALLOW_COPY_AND_ASSIGN(KeyedStoreElementStub); }; diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc index 4e5c781361..fb723a3bcc 100644 --- a/deps/v8/src/codegen.cc +++ b/deps/v8/src/codegen.cc @@ -169,8 +169,6 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) { #endif // ENABLE_DISASSEMBLER } -#ifdef ENABLE_LOGGING_AND_PROFILING - static Vector<const char> kRegexp = CStrVector("regexp"); bool CodeGenerator::ShouldGenerateLog(Expression* type) { @@ -187,8 +185,6 @@ bool CodeGenerator::ShouldGenerateLog(Expression* type) { return false; } -#endif - bool CodeGenerator::RecordPositions(MacroAssembler* masm, int pos, diff --git a/deps/v8/src/conversions-inl.h b/deps/v8/src/conversions-inl.h index bb24a9c2b7..f1f526ffc0 100644 --- a/deps/v8/src/conversions-inl.h +++ b/deps/v8/src/conversions-inl.h @@ -43,6 +43,11 @@ namespace v8 { namespace internal { +static inline double JunkStringValue() { + return std::numeric_limits<double>::quiet_NaN(); +} + + // The fast double-to-unsigned-int conversion routine does not guarantee // rounding towards zero, or any reasonable value if the argument is larger // than what fits in an unsigned 32-bit integer. @@ -151,7 +156,7 @@ static double InternalStringToIntDouble(UnicodeCache* unicode_cache, !AdvanceToNonspace(unicode_cache, ¤t, end)) { break; } else { - return JUNK_STRING_VALUE; + return JunkStringValue(); } } @@ -181,7 +186,7 @@ static double InternalStringToIntDouble(UnicodeCache* unicode_cache, if (!allow_trailing_junk && AdvanceToNonspace(unicode_cache, ¤t, end)) { - return JUNK_STRING_VALUE; + return JunkStringValue(); } int middle_value = (1 << (overflow_bits_count - 1)); @@ -229,7 +234,7 @@ static double InternalStringToInt(UnicodeCache* unicode_cache, EndMark end, int radix) { const bool allow_trailing_junk = true; - const double empty_string_val = JUNK_STRING_VALUE; + const double empty_string_val = JunkStringValue(); if (!AdvanceToNonspace(unicode_cache, ¤t, end)) { return empty_string_val; @@ -242,12 +247,12 @@ static double InternalStringToInt(UnicodeCache* unicode_cache, // Ignore leading sign; skip following spaces. ++current; if (current == end) { - return JUNK_STRING_VALUE; + return JunkStringValue(); } } else if (*current == '-') { ++current; if (current == end) { - return JUNK_STRING_VALUE; + return JunkStringValue(); } negative = true; } @@ -260,7 +265,7 @@ static double InternalStringToInt(UnicodeCache* unicode_cache, if (*current == 'x' || *current == 'X') { radix = 16; ++current; - if (current == end) return JUNK_STRING_VALUE; + if (current == end) return JunkStringValue(); } else { radix = 8; leading_zero = true; @@ -275,14 +280,14 @@ static double InternalStringToInt(UnicodeCache* unicode_cache, if (current == end) return SignedZero(negative); if (*current == 'x' || *current == 'X') { ++current; - if (current == end) return JUNK_STRING_VALUE; + if (current == end) return JunkStringValue(); } else { leading_zero = true; } } } - if (radix < 2 || radix > 36) return JUNK_STRING_VALUE; + if (radix < 2 || radix > 36) return JunkStringValue(); // Skip leading zeros. while (*current == '0') { @@ -292,7 +297,7 @@ static double InternalStringToInt(UnicodeCache* unicode_cache, } if (!leading_zero && !isDigit(*current, radix)) { - return JUNK_STRING_VALUE; + return JunkStringValue(); } if (IsPowerOf2(radix)) { @@ -340,7 +345,7 @@ static double InternalStringToInt(UnicodeCache* unicode_cache, if (!allow_trailing_junk && AdvanceToNonspace(unicode_cache, ¤t, end)) { - return JUNK_STRING_VALUE; + return JunkStringValue(); } ASSERT(buffer_pos < kBufferSize); @@ -406,7 +411,7 @@ static double InternalStringToInt(UnicodeCache* unicode_cache, if (!allow_trailing_junk && AdvanceToNonspace(unicode_cache, ¤t, end)) { - return JUNK_STRING_VALUE; + return JunkStringValue(); } return negative ? -v : v; @@ -456,22 +461,22 @@ static double InternalStringToDouble(UnicodeCache* unicode_cache, if (*current == '+') { // Ignore leading sign. ++current; - if (current == end) return JUNK_STRING_VALUE; + if (current == end) return JunkStringValue(); } else if (*current == '-') { ++current; - if (current == end) return JUNK_STRING_VALUE; + if (current == end) return JunkStringValue(); negative = true; } static const char kInfinitySymbol[] = "Infinity"; if (*current == kInfinitySymbol[0]) { if (!SubStringEquals(¤t, end, kInfinitySymbol)) { - return JUNK_STRING_VALUE; + return JunkStringValue(); } if (!allow_trailing_junk && AdvanceToNonspace(unicode_cache, ¤t, end)) { - return JUNK_STRING_VALUE; + return JunkStringValue(); } ASSERT(buffer_pos == 0); @@ -489,7 +494,7 @@ static double InternalStringToDouble(UnicodeCache* unicode_cache, if ((flags & ALLOW_HEX) && (*current == 'x' || *current == 'X')) { ++current; if (current == end || !isDigit(*current, 16)) { - return JUNK_STRING_VALUE; // "0x". + return JunkStringValue(); // "0x". } return InternalStringToIntDouble<4>(unicode_cache, @@ -529,13 +534,13 @@ static double InternalStringToDouble(UnicodeCache* unicode_cache, } if (*current == '.') { - if (octal && !allow_trailing_junk) return JUNK_STRING_VALUE; + if (octal && !allow_trailing_junk) return JunkStringValue(); if (octal) goto parsing_done; ++current; if (current == end) { if (significant_digits == 0 && !leading_zero) { - return JUNK_STRING_VALUE; + return JunkStringValue(); } else { goto parsing_done; } @@ -576,18 +581,18 @@ static double InternalStringToDouble(UnicodeCache* unicode_cache, // If exponent < 0 then string was [+-]\.0*... // If significant_digits != 0 the string is not equal to 0. // Otherwise there are no digits in the string. - return JUNK_STRING_VALUE; + return JunkStringValue(); } // Parse exponential part. if (*current == 'e' || *current == 'E') { - if (octal) return JUNK_STRING_VALUE; + if (octal) return JunkStringValue(); ++current; if (current == end) { if (allow_trailing_junk) { goto parsing_done; } else { - return JUNK_STRING_VALUE; + return JunkStringValue(); } } char sign = '+'; @@ -598,7 +603,7 @@ static double InternalStringToDouble(UnicodeCache* unicode_cache, if (allow_trailing_junk) { goto parsing_done; } else { - return JUNK_STRING_VALUE; + return JunkStringValue(); } } } @@ -607,7 +612,7 @@ static double InternalStringToDouble(UnicodeCache* unicode_cache, if (allow_trailing_junk) { goto parsing_done; } else { - return JUNK_STRING_VALUE; + return JunkStringValue(); } } @@ -631,7 +636,7 @@ static double InternalStringToDouble(UnicodeCache* unicode_cache, if (!allow_trailing_junk && AdvanceToNonspace(unicode_cache, ¤t, end)) { - return JUNK_STRING_VALUE; + return JunkStringValue(); } parsing_done: diff --git a/deps/v8/src/conversions.cc b/deps/v8/src/conversions.cc index 232eda08c9..c34fe519c4 100644 --- a/deps/v8/src/conversions.cc +++ b/deps/v8/src/conversions.cc @@ -430,24 +430,4 @@ char* DoubleToRadixCString(double value, int radix) { return builder.Finalize(); } - -static Mutex* dtoa_lock_one = OS::CreateMutex(); -static Mutex* dtoa_lock_zero = OS::CreateMutex(); - - } } // namespace v8::internal - - -extern "C" { -void ACQUIRE_DTOA_LOCK(int n) { - ASSERT(n == 0 || n == 1); - (n == 0 ? v8::internal::dtoa_lock_zero : v8::internal::dtoa_lock_one)->Lock(); -} - - -void FREE_DTOA_LOCK(int n) { - ASSERT(n == 0 || n == 1); - (n == 0 ? v8::internal::dtoa_lock_zero : v8::internal::dtoa_lock_one)-> - Unlock(); -} -} diff --git a/deps/v8/src/conversions.h b/deps/v8/src/conversions.h index c3e27b2025..7b02c47f6a 100644 --- a/deps/v8/src/conversions.h +++ b/deps/v8/src/conversions.h @@ -44,8 +44,6 @@ namespace internal { // we don't need to preserve all the digits. const int kMaxSignificantDigits = 772; -static const double JUNK_STRING_VALUE = - std::numeric_limits<double>::quiet_NaN(); static bool isDigit(int x, int radix) { return (x >= '0' && x <= '9' && x < '0' + radix) diff --git a/deps/v8/src/cpu-profiler-inl.h b/deps/v8/src/cpu-profiler-inl.h index d7a23a518a..938b632214 100644 --- a/deps/v8/src/cpu-profiler-inl.h +++ b/deps/v8/src/cpu-profiler-inl.h @@ -30,8 +30,6 @@ #include "cpu-profiler.h" -#ifdef ENABLE_LOGGING_AND_PROFILING - #include <new> #include "circular-queue-inl.h" #include "profile-generator-inl.h" @@ -83,6 +81,4 @@ bool ProfilerEventsProcessor::FilterOutCodeCreateEvent( } } // namespace v8::internal -#endif // ENABLE_LOGGING_AND_PROFILING - #endif // V8_CPU_PROFILER_INL_H_ diff --git a/deps/v8/src/cpu-profiler.cc b/deps/v8/src/cpu-profiler.cc index 8b10e8188c..bb480fc345 100644 --- a/deps/v8/src/cpu-profiler.cc +++ b/deps/v8/src/cpu-profiler.cc @@ -29,8 +29,6 @@ #include "cpu-profiler-inl.h" -#ifdef ENABLE_LOGGING_AND_PROFILING - #include "frames-inl.h" #include "hashmap.h" #include "log-inl.h" @@ -574,31 +572,21 @@ void CpuProfiler::StopProcessor() { logger->logging_nesting_ = saved_logging_nesting_; } -} } // namespace v8::internal - -#endif // ENABLE_LOGGING_AND_PROFILING - -namespace v8 { -namespace internal { void CpuProfiler::Setup() { -#ifdef ENABLE_LOGGING_AND_PROFILING Isolate* isolate = Isolate::Current(); if (isolate->cpu_profiler() == NULL) { isolate->set_cpu_profiler(new CpuProfiler()); } -#endif } void CpuProfiler::TearDown() { -#ifdef ENABLE_LOGGING_AND_PROFILING Isolate* isolate = Isolate::Current(); if (isolate->cpu_profiler() != NULL) { delete isolate->cpu_profiler(); } isolate->set_cpu_profiler(NULL); -#endif } } } // namespace v8::internal diff --git a/deps/v8/src/cpu-profiler.h b/deps/v8/src/cpu-profiler.h index 42d79a578e..4175e8f680 100644 --- a/deps/v8/src/cpu-profiler.h +++ b/deps/v8/src/cpu-profiler.h @@ -28,8 +28,6 @@ #ifndef V8_CPU_PROFILER_H_ #define V8_CPU_PROFILER_H_ -#ifdef ENABLE_LOGGING_AND_PROFILING - #include "allocation.h" #include "atomicops.h" #include "circular-queue.h" @@ -206,9 +204,6 @@ class ProfilerEventsProcessor : public Thread { v8::internal::CpuProfiler::Call; \ } \ } while (false) -#else -#define PROFILE(isolate, Call) LOG(isolate, Call) -#endif // ENABLE_LOGGING_AND_PROFILING namespace v8 { @@ -221,7 +216,6 @@ class CpuProfiler { static void Setup(); static void TearDown(); -#ifdef ENABLE_LOGGING_AND_PROFILING static void StartProfiling(const char* title); static void StartProfiling(String* title); static CpuProfile* StopProfiling(const char* title); @@ -289,10 +283,6 @@ class CpuProfiler { bool need_to_stop_sampler_; Atomic32 is_profiling_; -#else - static INLINE(bool is_profiling(Isolate* isolate)) { return false; } -#endif // ENABLE_LOGGING_AND_PROFILING - private: DISALLOW_COPY_AND_ASSIGN(CpuProfiler); }; diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc index 6f948c6e5c..5f57350093 100644 --- a/deps/v8/src/d8.cc +++ b/deps/v8/src/d8.cc @@ -41,6 +41,9 @@ #include "natives.h" #include "platform.h" +#if !defined(_WIN32) && !defined(_WIN64) +#include <unistd.h> // NOLINT +#endif namespace v8 { @@ -97,6 +100,8 @@ CounterCollection Shell::local_counters_; CounterCollection* Shell::counters_ = &local_counters_; Persistent<Context> Shell::utility_context_; Persistent<Context> Shell::evaluation_context_; +i::Mutex* Shell::context_mutex_(i::OS::CreateMutex()); +ShellOptions Shell::options; bool CounterMap::Match(void* key1, void* key2) { @@ -119,6 +124,7 @@ bool Shell::ExecuteString(Handle<String> source, bool report_exceptions) { HandleScope handle_scope; TryCatch try_catch; + options.script_executed = true; if (i::FLAG_debugger) { // When debugging make exceptions appear to be uncaught. try_catch.SetVerbose(true); @@ -238,7 +244,7 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args, if (raw_length < 0) { return ThrowException(String::New("Array length must not be negative.")); } - if (raw_length > v8::internal::ExternalArray::kMaxLength) { + if (raw_length > i::ExternalArray::kMaxLength) { return ThrowException( String::New("Array length exceeds maximum length.")); } @@ -246,7 +252,7 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args, } else { return ThrowException(String::New("Array length must be a number.")); } - if (length > static_cast<size_t>(internal::ExternalArray::kMaxLength)) { + if (length > static_cast<size_t>(i::ExternalArray::kMaxLength)) { return ThrowException(String::New("Array length exceeds maximum length.")); } void* data = calloc(length, element_size); @@ -540,7 +546,6 @@ void Shell::InstallUtilityScript() { shell_source_name.length()); Handle<Script> script = Script::Compile(source, name); script->Run(); - // Mark the d8 shell script as native to avoid it showing up as normal source // in the debugger. i::Handle<i::Object> compiled_script = Utils::OpenHandle(*script); @@ -550,6 +555,13 @@ void Shell::InstallUtilityScript() { : i::Handle<i::Script>(i::Script::cast( i::SharedFunctionInfo::cast(*compiled_script)->script())); script_object->set_type(i::Smi::FromInt(i::Script::TYPE_NATIVE)); + +#ifdef ENABLE_DEBUGGER_SUPPORT + // Start the in-process debugger if requested. + if (i::FLAG_debugger && !i::FLAG_debugger_agent) { + v8::Debug::SetDebugEventListener(HandleDebugEvent); + } +#endif } @@ -625,7 +637,7 @@ Handle<ObjectTemplate> Shell::CreateGlobalTemplate() { } -void Shell::Initialize(bool test_shell) { +void Shell::Initialize() { #ifdef COMPRESS_STARTUP_DATA_BZ2 BZip2Decompressor startup_data_decompressor; int bz2_result = startup_data_decompressor.Decompress(); @@ -645,7 +657,7 @@ void Shell::Initialize(bool test_shell) { V8::SetAddHistogramSampleFunction(AddHistogramSample); } - if (test_shell) return; + if (options.test_shell) return; Locker lock; HandleScope scope; @@ -657,26 +669,17 @@ void Shell::Initialize(bool test_shell) { if (i::FLAG_debugger_agent) { v8::Debug::EnableAgent("d8 shell", i::FLAG_debugger_port, true); } - - // Start the in-process debugger if requested. - if (i::FLAG_debugger && !i::FLAG_debugger_agent) { - v8::Debug::SetDebugEventListener(HandleDebugEvent); - } #endif } -void Shell::RenewEvaluationContext() { +Persistent<Context> Shell::CreateEvaluationContext() { + // This needs to be a critical section since this is not thread-safe + i::ScopedLock lock(context_mutex_); // Initialize the global objects - HandleScope scope; Handle<ObjectTemplate> global_template = CreateGlobalTemplate(); - - // (Re-)create the evaluation context - if (!evaluation_context_.IsEmpty()) { - evaluation_context_.Dispose(); - } - evaluation_context_ = Context::New(NULL, global_template); - Context::Scope utility_scope(evaluation_context_); + Persistent<Context> context = Context::New(NULL, global_template); + Context::Scope scope(context); i::JSArguments js_args = i::FLAG_js_arguments; i::Handle<i::FixedArray> arguments_array = @@ -688,28 +691,27 @@ void Shell::RenewEvaluationContext() { } i::Handle<i::JSArray> arguments_jsarray = FACTORY->NewJSArrayWithElements(arguments_array); - evaluation_context_->Global()->Set(String::New("arguments"), + context->Global()->Set(String::New("arguments"), Utils::ToLocal(arguments_jsarray)); + return context; } void Shell::OnExit() { if (i::FLAG_dump_counters) { - ::printf("+----------------------------------------+-------------+\n"); - ::printf("| Name | Value |\n"); - ::printf("+----------------------------------------+-------------+\n"); + printf("+----------------------------------------+-------------+\n"); + printf("| Name | Value |\n"); + printf("+----------------------------------------+-------------+\n"); for (CounterMap::Iterator i(counter_map_); i.More(); i.Next()) { Counter* counter = i.CurrentValue(); if (counter->is_histogram()) { - ::printf("| c:%-36s | %11i |\n", i.CurrentKey(), counter->count()); - ::printf("| t:%-36s | %11i |\n", - i.CurrentKey(), - counter->sample_total()); + printf("| c:%-36s | %11i |\n", i.CurrentKey(), counter->count()); + printf("| t:%-36s | %11i |\n", i.CurrentKey(), counter->sample_total()); } else { - ::printf("| %-38s | %11i |\n", i.CurrentKey(), counter->count()); + printf("| %-38s | %11i |\n", i.CurrentKey(), counter->count()); } } - ::printf("+----------------------------------------+-------------+\n"); + printf("+----------------------------------------+-------------+\n"); } if (counters_file_ != NULL) delete counters_file_; @@ -717,7 +719,8 @@ void Shell::OnExit() { static char* ReadChars(const char* name, int* size_out) { - v8::Unlocker unlocker; // Release the V8 lock while reading files. + // Release the V8 lock while reading files. + v8::Unlocker unlocker(Isolate::GetCurrent()); FILE* file = i::OS::FOpen(name, "rb"); if (file == NULL) return NULL; @@ -806,11 +809,6 @@ class ShellThread : public i::Thread { void ShellThread::Run() { - // Prepare the context for this thread. - Locker locker; - HandleScope scope; - Handle<ObjectTemplate> global_template = Shell::CreateGlobalTemplate(); - char* ptr = const_cast<char*>(files_.start()); while ((ptr != NULL) && (*ptr != '\0')) { // For each newline-separated line. @@ -822,7 +820,10 @@ void ShellThread::Run() { continue; } - Persistent<Context> thread_context = Context::New(NULL, global_template); + // Prepare the context for this thread. + Locker locker; + HandleScope scope; + Persistent<Context> thread_context = Shell::CreateEvaluationContext(); Context::Scope context_scope(thread_context); while ((ptr != NULL) && (*ptr != '\0')) { @@ -848,153 +849,296 @@ void ShellThread::Run() { } } -int Shell::RunMain(int argc, char* argv[], bool* executed) { - // Default use preemption if threads are created. - bool use_preemption = true; - // Default to use lowest possible thread preemption interval to test as many - // edgecases as possible. - int preemption_interval = 1; +void SourceGroup::ExitShell(int exit_code) { + // Use _exit instead of exit to avoid races between isolate + // threads and static destructors. + fflush(stdout); + fflush(stderr); + _exit(exit_code); +} - i::List<i::Thread*> threads(1); - { - // Since the thread below may spawn new threads accessing V8 holding the - // V8 lock here is mandatory. - Locker locker; - RenewEvaluationContext(); - Context::Scope context_scope(evaluation_context_); - for (int i = 1; i < argc; i++) { - char* str = argv[i]; - if (strcmp(str, "--preemption") == 0) { - use_preemption = true; - } else if (strcmp(str, "--no-preemption") == 0) { - use_preemption = false; - } else if (strcmp(str, "--preemption-interval") == 0) { - if (i + 1 < argc) { - char* end = NULL; - preemption_interval = strtol(argv[++i], &end, 10); // NOLINT - if (preemption_interval <= 0 || *end != '\0' || errno == ERANGE) { - printf("Invalid value for --preemption-interval '%s'\n", argv[i]); - return 1; - } - } else { - printf("Missing value for --preemption-interval\n"); - return 1; - } - } else if (strcmp(str, "-f") == 0) { - // Ignore any -f flags for compatibility with other stand-alone - // JavaScript engines. - continue; - } else if (strncmp(str, "--", 2) == 0) { - printf("Warning: unknown flag %s.\nTry --help for options\n", str); - } else if (strcmp(str, "-e") == 0 && i + 1 < argc) { - // Execute argument given to -e option directly. - v8::HandleScope handle_scope; - v8::Handle<v8::String> file_name = v8::String::New("unnamed"); - v8::Handle<v8::String> source = v8::String::New(argv[++i]); - (*executed) = true; - if (!ExecuteString(source, file_name, false, true)) { - OnExit(); - return 1; - } - } else if (strcmp(str, "-p") == 0 && i + 1 < argc) { - int size = 0; - const char* files = ReadChars(argv[++i], &size); - if (files == NULL) return 1; - ShellThread* thread = - new ShellThread(threads.length(), - i::Vector<const char>(files, size)); - thread->Start(); - threads.Add(thread); - (*executed) = true; - } else { - // Use all other arguments as names of files to load and run. - HandleScope handle_scope; - Handle<String> file_name = v8::String::New(str); - Handle<String> source = ReadFile(str); - (*executed) = true; - if (source.IsEmpty()) { - printf("Error reading '%s'\n", str); - return 1; - } - if (!ExecuteString(source, file_name, false, true)) { - OnExit(); - return 1; - } +void SourceGroup::Execute() { + for (int i = begin_offset_; i < end_offset_; ++i) { + const char* arg = argv_[i]; + if (strcmp(arg, "-e") == 0 && i + 1 < end_offset_) { + // Execute argument given to -e option directly. + HandleScope handle_scope; + Handle<String> file_name = String::New("unnamed"); + Handle<String> source = String::New(argv_[i + 1]); + if (!Shell::ExecuteString(source, file_name, false, true)) { + ExitShell(1); + return; + } + ++i; + } else if (arg[0] == '-') { + // Ignore other options. They have been parsed already. + } else { + // Use all other arguments as names of files to load and run. + HandleScope handle_scope; + Handle<String> file_name = String::New(arg); + Handle<String> source = ReadFile(arg); + if (source.IsEmpty()) { + printf("Error reading '%s'\n", arg); + ExitShell(1); + return; + } + if (!Shell::ExecuteString(source, file_name, false, true)) { + ExitShell(1); + return; } } + } +} - // Start preemption if threads have been created and preemption is enabled. - if (threads.length() > 0 && use_preemption) { - Locker::StartPreemption(preemption_interval); - } + +Handle<String> SourceGroup::ReadFile(const char* name) { + FILE* file = fopen(name, "rb"); + if (file == NULL) return Handle<String>(); + + fseek(file, 0, SEEK_END); + int size = ftell(file); + rewind(file); + + char* chars = new char[size + 1]; + chars[size] = '\0'; + for (int i = 0; i < size;) { + int read = fread(&chars[i], 1, size - i, file); + i += read; } + fclose(file); + Handle<String> result = String::New(chars, size); + delete[] chars; + return result; +} - for (int i = 0; i < threads.length(); i++) { - i::Thread* thread = threads[i]; - thread->Join(); - delete thread; + +i::Thread::Options SourceGroup::GetThreadOptions() { + i::Thread::Options options; + options.name = "IsolateThread"; + // On some systems (OSX 10.6) the stack size default is 0.5Mb or less + // which is not enough to parse the big literal expressions used in tests. + // The stack size should be at least StackGuard::kLimitSize + some + // OS-specific padding for thread startup code. + options.stack_size = 2 << 20; // 2 Mb seems to be enough + return options; +} + + +void SourceGroup::ExecuteInThread() { + Isolate* isolate = Isolate::New(); + do { + if (next_semaphore_ != NULL) next_semaphore_->Wait(); + { + Isolate::Scope iscope(isolate); + Locker lock(isolate); + HandleScope scope; + Persistent<Context> context = Shell::CreateEvaluationContext(); + { + Context::Scope cscope(context); + Execute(); + } + context.Dispose(); + } + if (done_semaphore_ != NULL) done_semaphore_->Signal(); + } while (!Shell::options.last_run); + isolate->Dispose(); +} + + +void SourceGroup::StartExecuteInThread() { + if (thread_ == NULL) { + thread_ = new IsolateThread(this); + thread_->Start(); } - OnExit(); - return 0; + next_semaphore_->Signal(); } -int Shell::Main(int argc, char* argv[]) { - // Figure out if we're requested to stress the optimization - // infrastructure by running tests multiple times and forcing - // optimization in the last run. - bool FLAG_stress_opt = false; - bool FLAG_stress_deopt = false; - bool FLAG_interactive_shell = false; - bool FLAG_test_shell = false; - bool script_executed = false; +void SourceGroup::WaitForThread() { + if (thread_ == NULL) return; + if (Shell::options.last_run) { + thread_->Join(); + thread_ = NULL; + } else { + done_semaphore_->Wait(); + } +} + +bool Shell::SetOptions(int argc, char* argv[]) { for (int i = 0; i < argc; i++) { if (strcmp(argv[i], "--stress-opt") == 0) { - FLAG_stress_opt = true; + options.stress_opt = true; argv[i] = NULL; } else if (strcmp(argv[i], "--stress-deopt") == 0) { - FLAG_stress_deopt = true; + options.stress_deopt = true; argv[i] = NULL; } else if (strcmp(argv[i], "--noalways-opt") == 0) { // No support for stressing if we can't use --always-opt. - FLAG_stress_opt = false; - FLAG_stress_deopt = false; + options.stress_opt = false; + options.stress_deopt = false; } else if (strcmp(argv[i], "--shell") == 0) { - FLAG_interactive_shell = true; + options.interactive_shell = true; argv[i] = NULL; } else if (strcmp(argv[i], "--test") == 0) { - FLAG_test_shell = true; + options.test_shell = true; + argv[i] = NULL; + } else if (strcmp(argv[i], "--preemption") == 0) { + options.use_preemption = true; argv[i] = NULL; + } else if (strcmp(argv[i], "--no-preemption") == 0) { + options.use_preemption = false; + argv[i] = NULL; + } else if (strcmp(argv[i], "--preemption-interval") == 0) { + if (++i < argc) { + argv[i-1] = NULL; + char* end = NULL; + options.preemption_interval = strtol(argv[i], &end, 10); // NOLINT + if (options.preemption_interval <= 0 + || *end != '\0' + || errno == ERANGE) { + printf("Invalid value for --preemption-interval '%s'\n", argv[i]); + return false; + } + argv[i] = NULL; + } else { + printf("Missing value for --preemption-interval\n"); + return false; + } + } else if (strcmp(argv[i], "-f") == 0) { + // Ignore any -f flags for compatibility with other stand-alone + // JavaScript engines. + continue; + } else if (strcmp(argv[i], "--isolate") == 0) { + options.num_isolates++; + } + } + + // Run parallel threads if we are not using --isolate + for (int i = 1; i < argc; i++) { + if (argv[i] == NULL) continue; + if (strcmp(argv[i], "-p") == 0 && i + 1 < argc) { + if (options.num_isolates > 1) { + printf("-p is not compatible with --isolate\n"); + return false; + } + argv[i] = NULL; + if (options.parallel_files == NULL) { + options.parallel_files = new i::List<i::Vector<const char> >(); + } + int size = 0; + const char* files = ReadChars(argv[++i], &size); + if (files == NULL) { + printf("-p option incomplete\n"); + return false; + } + argv[i] = NULL; + options.parallel_files->Add(i::Vector<const char>(files, size)); } } v8::V8::SetFlagsFromCommandLine(&argc, argv, true); - Initialize(FLAG_test_shell); + // set up isolated source groups + options.isolate_sources = new SourceGroup[options.num_isolates]; + SourceGroup* current = options.isolate_sources; + current->Begin(argv, 1); + for (int i = 1; i < argc; i++) { + const char* str = argv[i]; + if (strcmp(str, "--isolate") == 0) { + current->End(i); + current++; + current->Begin(argv, i + 1); + } else if (strncmp(argv[i], "--", 2) == 0) { + printf("Warning: unknown flag %s.\nTry --help for options\n", argv[i]); + } + } + current->End(argc); + + return true; +} + + +int Shell::RunMain(int argc, char* argv[]) { + i::List<i::Thread*> threads(1); + + { + if (options.parallel_files != NULL) + for (int i = 0; i < options.parallel_files->length(); i++) { + i::Vector<const char> files = options.parallel_files->at(i); + ShellThread* thread = new ShellThread(threads.length(), files); + thread->Start(); + threads.Add(thread); + } + + for (int i = 1; i < options.num_isolates; ++i) { + options.isolate_sources[i].StartExecuteInThread(); + } + + Locker lock; + HandleScope scope; + Persistent<Context> context = CreateEvaluationContext(); + { + Context::Scope cscope(context); + options.isolate_sources[0].Execute(); + } + if (options.last_run) { + // Keep using the same context in the interactive shell + evaluation_context_ = context; + } else { + context.Dispose(); + } + // Start preemption if threads have been created and preemption is enabled. + if (options.parallel_files != NULL + && threads.length() > 0 + && options.use_preemption) { + Locker::StartPreemption(options.preemption_interval); + } + } + + for (int i = 1; i < options.num_isolates; ++i) { + options.isolate_sources[i].WaitForThread(); + } + + if (options.parallel_files != NULL) + for (int i = 0; i < threads.length(); i++) { + i::Thread* thread = threads[i]; + thread->Join(); + delete thread; + } + + OnExit(); + return 0; +} + + +int Shell::Main(int argc, char* argv[]) { + if (!SetOptions(argc, argv)) return 1; + Initialize(); int result = 0; - if (FLAG_stress_opt || FLAG_stress_deopt) { - v8::Testing::SetStressRunType( - FLAG_stress_opt ? v8::Testing::kStressTypeOpt - : v8::Testing::kStressTypeDeopt); - int stress_runs = v8::Testing::GetStressRuns(); + if (options.stress_opt || options.stress_deopt) { + Testing::SetStressRunType( + options.stress_opt ? Testing::kStressTypeOpt + : Testing::kStressTypeDeopt); + int stress_runs = Testing::GetStressRuns(); for (int i = 0; i < stress_runs && result == 0; i++) { printf("============ Stress %d/%d ============\n", i + 1, stress_runs); - v8::Testing::PrepareStressRun(i); - result = RunMain(argc, argv, &script_executed); + Testing::PrepareStressRun(i); + options.last_run = (i == stress_runs - 1); + result = RunMain(argc, argv); } printf("======== Full Deoptimization =======\n"); - v8::Testing::DeoptimizeAll(); + Testing::DeoptimizeAll(); } else { - result = RunMain(argc, argv, &script_executed); + result = RunMain(argc, argv); } #ifdef ENABLE_DEBUGGER_SUPPORT // Run remote debugger if requested, but never on --test - if (i::FLAG_remote_debugger && !FLAG_test_shell) { + if (i::FLAG_remote_debugger && !options.test_shell) { InstallUtilityScript(); RunRemoteDebugger(i::FLAG_debugger_port); return 0; @@ -1003,12 +1147,15 @@ int Shell::Main(int argc, char* argv[]) { // Run interactive shell if explicitly requested or if no script has been // executed, but never on --test - if ((FLAG_interactive_shell || !script_executed) && !FLAG_test_shell) { + + if (( options.interactive_shell + || !options.script_executed ) + && !options.test_shell ) { InstallUtilityScript(); RunShell(); } - v8::V8::Dispose(); + V8::Dispose(); return result; } diff --git a/deps/v8/src/d8.gyp b/deps/v8/src/d8.gyp index 8b52ed9a9a..85914ec672 100644 --- a/deps/v8/src/d8.gyp +++ b/deps/v8/src/d8.gyp @@ -38,10 +38,7 @@ '../src', ], 'defines': [ - 'ENABLE_LOGGING_AND_PROFILING', 'ENABLE_DEBUGGER_SUPPORT', - 'ENABLE_VMSTATE_TRACKING', - 'V8_FAST_TLS', ], 'sources': [ 'd8.cc', diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h index e225469993..7f0272710b 100644 --- a/deps/v8/src/d8.h +++ b/deps/v8/src/d8.h @@ -112,6 +112,87 @@ class CounterMap { }; +class SourceGroup { + public: + SourceGroup() + : next_semaphore_(v8::internal::OS::CreateSemaphore(0)), + done_semaphore_(v8::internal::OS::CreateSemaphore(0)), + thread_(NULL), + argv_(NULL), + begin_offset_(0), + end_offset_(0) { } + + void Begin(char** argv, int offset) { + argv_ = const_cast<const char**>(argv); + begin_offset_ = offset; + } + + void End(int offset) { end_offset_ = offset; } + + void Execute(); + + void StartExecuteInThread(); + void WaitForThread(); + + private: + class IsolateThread : public i::Thread { + public: + explicit IsolateThread(SourceGroup* group) + : i::Thread(GetThreadOptions()), group_(group) {} + + virtual void Run() { + group_->ExecuteInThread(); + } + + private: + SourceGroup* group_; + }; + + static i::Thread::Options GetThreadOptions(); + void ExecuteInThread(); + + i::Semaphore* next_semaphore_; + i::Semaphore* done_semaphore_; + i::Thread* thread_; + + void ExitShell(int exit_code); + Handle<String> ReadFile(const char* name); + + const char** argv_; + int begin_offset_; + int end_offset_; +}; + + +class ShellOptions { + public: + ShellOptions() + : script_executed(false), + last_run(true), + stress_opt(false), + stress_deopt(false), + interactive_shell(false), + test_shell(false), + use_preemption(true), + preemption_interval(10), + num_isolates(1), + isolate_sources(NULL), + parallel_files(NULL) { } + + bool script_executed; + bool last_run; + bool stress_opt; + bool stress_deopt; + bool interactive_shell; + bool test_shell; + bool use_preemption; + int preemption_interval; + int num_isolates; + SourceGroup* isolate_sources; + i::List< i::Vector<const char> >* parallel_files; +}; + + class Shell: public i::AllStatic { public: static bool ExecuteString(Handle<String> source, @@ -129,12 +210,13 @@ class Shell: public i::AllStatic { static void AddHistogramSample(void* histogram, int sample); static void MapCounters(const char* name); static Handle<String> ReadFile(const char* name); - static void Initialize(bool test_shell); - static void RenewEvaluationContext(); + static void Initialize(); + static Persistent<Context> CreateEvaluationContext(); static void InstallUtilityScript(); static void RunShell(); + static bool SetOptions(int argc, char* argv[]); static int RunScript(char* filename); - static int RunMain(int argc, char* argv[], bool* executed); + static int RunMain(int argc, char* argv[]); static int Main(int argc, char* argv[]); static Handle<ObjectTemplate> CreateGlobalTemplate(); static Handle<Array> GetCompletions(Handle<String> text, @@ -205,6 +287,8 @@ class Shell: public i::AllStatic { static const char* kHistoryFileName; static const char* kPrompt; + static ShellOptions options; + private: static Persistent<Context> utility_context_; static Persistent<Context> evaluation_context_; @@ -214,6 +298,7 @@ class Shell: public i::AllStatic { static CounterCollection local_counters_; static CounterCollection* counters_; static i::OS::MemoryMappedFile* counters_file_; + static i::Mutex* context_mutex_; static Counter* GetCounter(const char* name, bool is_histogram); static Handle<Value> CreateExternalArray(const Arguments& args, ExternalArrayType type, diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc index f341fc6f1f..c48e514ab8 100644 --- a/deps/v8/src/debug.cc +++ b/deps/v8/src/debug.cc @@ -1821,6 +1821,13 @@ void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) { bool Debug::IsBreakAtReturn(JavaScriptFrame* frame) { HandleScope scope(isolate_); + // If there are no break points this cannot be break at return, as + // the debugger statement and stack guard bebug break cannot be at + // return. + if (!has_break_points_) { + return false; + } + // Get the executing function in which the debug break occurred. Handle<SharedFunctionInfo> shared = Handle<SharedFunctionInfo>(JSFunction::cast(frame->function())->shared()); diff --git a/deps/v8/src/debug.h b/deps/v8/src/debug.h index c4d3c7e373..c614844ab5 100644 --- a/deps/v8/src/debug.h +++ b/deps/v8/src/debug.h @@ -1,4 +1,4 @@ -// Copyright 2006-2008 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -35,6 +35,7 @@ #include "execution.h" #include "factory.h" #include "flags.h" +#include "frames-inl.h" #include "hashmap.h" #include "platform.h" #include "string-stream.h" diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc index e8c659718f..175ee6e1fb 100644 --- a/deps/v8/src/deoptimizer.cc +++ b/deps/v8/src/deoptimizer.cc @@ -161,8 +161,7 @@ DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame( // Get the "simulated" top and size for the requested frame. Address top = reinterpret_cast<Address>(deoptimizer->output_[frame_index]->GetTop()); - unsigned size = - deoptimizer->output_[frame_index]->GetFrameSize() / kPointerSize; + uint32_t size = deoptimizer->output_[frame_index]->GetFrameSize(); // Done with the GC-unsafe frame descriptions. This re-enables allocation. deoptimizer->DeleteFrameDescriptions(); @@ -547,7 +546,7 @@ void Deoptimizer::MaterializeHeapNumbers() { #ifdef ENABLE_DEBUGGER_SUPPORT void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame( - Address top, intptr_t size, DeoptimizedFrameInfo* info) { + Address top, uint32_t size, DeoptimizedFrameInfo* info) { ASSERT_EQ(DEBUGGER, bailout_type_); for (int i = 0; i < deferred_heap_numbers_.length(); i++) { HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i]; @@ -557,17 +556,29 @@ void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame( Address slot = d.slot_address(); if (top <= slot && slot < top + size) { Handle<Object> num = isolate_->factory()->NewNumber(d.value()); - int expression_index = static_cast<int>( + // Calculate the index with the botton of the expression stack + // at index 0, and the fixed part (including incoming arguments) + // at negative indexes. + int index = static_cast<int>( info->expression_count_ - (slot - top) / kPointerSize - 1); if (FLAG_trace_deopt) { PrintF("Materializing a new heap number %p [%e] in slot %p" - "for expression stack index %d\n", + "for stack index %d\n", reinterpret_cast<void*>(*num), d.value(), d.slot_address(), - expression_index); + index); + } + if (index >=0) { + info->SetExpression(index, *num); + } else { + // Calculate parameter index subtracting one for the receiver. + int parameter_index = + index + + static_cast<int>(size) / kPointerSize - + info->expression_count_ - 1; + info->SetParameter(parameter_index, *num); } - info->SetExpression(expression_index, *num); } } } @@ -1126,6 +1137,22 @@ unsigned FrameDescription::GetOffsetFromSlotIndex(Deoptimizer* deoptimizer, } +int FrameDescription::ComputeParametersCount() { + return function_->shared()->formal_parameter_count(); +} + + +Object* FrameDescription::GetParameter(Deoptimizer* deoptimizer, int index) { + ASSERT_EQ(Code::FUNCTION, kind_); + ASSERT(index >= 0); + ASSERT(index < ComputeParametersCount()); + // The slot indexes for incoming arguments are negative. + unsigned offset = GetOffsetFromSlotIndex(deoptimizer, + index - ComputeParametersCount()); + return reinterpret_cast<Object*>(*GetFrameSlotPointer(offset)); +} + + unsigned FrameDescription::GetExpressionCount(Deoptimizer* deoptimizer) { ASSERT_EQ(Code::FUNCTION, kind_); unsigned size = GetFrameSize() - deoptimizer->ComputeFixedSize(GetFunction()); @@ -1415,7 +1442,13 @@ void SlotRef::ComputeSlotMappingForArguments(JavaScriptFrame* frame, DeoptimizedFrameInfo::DeoptimizedFrameInfo( Deoptimizer* deoptimizer, int frame_index) { FrameDescription* output_frame = deoptimizer->output_[frame_index]; + SetFunction(output_frame->GetFunction()); expression_count_ = output_frame->GetExpressionCount(deoptimizer); + parameters_count_ = output_frame->ComputeParametersCount(); + parameters_ = new Object*[parameters_count_]; + for (int i = 0; i < parameters_count_; i++) { + SetParameter(i, output_frame->GetParameter(deoptimizer, i)); + } expression_stack_ = new Object*[expression_count_]; for (int i = 0; i < expression_count_; i++) { SetExpression(i, output_frame->GetExpression(deoptimizer, i)); @@ -1424,10 +1457,13 @@ DeoptimizedFrameInfo::DeoptimizedFrameInfo( DeoptimizedFrameInfo::~DeoptimizedFrameInfo() { - delete expression_stack_; + delete[] expression_stack_; + delete[] parameters_; } void DeoptimizedFrameInfo::Iterate(ObjectVisitor* v) { + v->VisitPointer(reinterpret_cast<Object**>(&function_)); + v->VisitPointers(parameters_, parameters_ + parameters_count_); v->VisitPointers(expression_stack_, expression_stack_ + expression_count_); } diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h index 64823183a5..9265905366 100644 --- a/deps/v8/src/deoptimizer.h +++ b/deps/v8/src/deoptimizer.h @@ -194,7 +194,7 @@ class Deoptimizer : public Malloced { void MaterializeHeapNumbers(); #ifdef ENABLE_DEBUGGER_SUPPORT void MaterializeHeapNumbersForDebuggerInspectableFrame( - Address top, intptr_t size, DeoptimizedFrameInfo* info); + Address top, uint32_t size, DeoptimizedFrameInfo* info); #endif static void ComputeOutputFrames(Deoptimizer* deoptimizer); @@ -400,6 +400,12 @@ class FrameDescription { void SetKind(Code::Kind kind) { kind_ = kind; } #endif + // Get the incoming arguments count. + int ComputeParametersCount(); + + // Get a parameter value for an unoptimized frame. + Object* GetParameter(Deoptimizer* deoptimizer, int index); + // Get the expression stack height for a unoptimized frame. unsigned GetExpressionCount(Deoptimizer* deoptimizer); @@ -662,9 +668,23 @@ class DeoptimizedFrameInfo : public Malloced { // GC support. void Iterate(ObjectVisitor* v); + // Return the number of incoming arguments. + int parameters_count() { return parameters_count_; } + // Return the height of the expression stack. int expression_count() { return expression_count_; } + // Get the frame function. + JSFunction* GetFunction() { + return function_; + } + + // Get an incoming argument. + Object* GetParameter(int index) { + ASSERT(0 <= index && index < parameters_count()); + return parameters_[index]; + } + // Get an expression from the expression stack. Object* GetExpression(int index) { ASSERT(0 <= index && index < expression_count()); @@ -672,13 +692,27 @@ class DeoptimizedFrameInfo : public Malloced { } private: + // Set the frame function. + void SetFunction(JSFunction* function) { + function_ = function; + } + + // Set an incoming argument. + void SetParameter(int index, Object* obj) { + ASSERT(0 <= index && index < parameters_count()); + parameters_[index] = obj; + } + // Set an expression on the expression stack. void SetExpression(int index, Object* obj) { ASSERT(0 <= index && index < expression_count()); expression_stack_[index] = obj; } + JSFunction* function_; + int parameters_count_; int expression_count_; + Object** parameters_; Object** expression_stack_; friend class Deoptimizer; diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h index 6e13dd2d0a..2db44c3067 100644 --- a/deps/v8/src/flag-definitions.h +++ b/deps/v8/src/flag-definitions.h @@ -452,14 +452,10 @@ DEFINE_bool(trace_regexp_assembler, "trace regexp macro assembler calls.") // -// Logging and profiling only flags +// Logging and profiling flags // #undef FLAG -#ifdef ENABLE_LOGGING_AND_PROFILING #define FLAG FLAG_FULL -#else -#define FLAG FLAG_READONLY -#endif // log.cc DEFINE_bool(log, false, @@ -491,19 +487,6 @@ DEFINE_string(logfile, "v8.log", "Specify the name of the log file.") DEFINE_bool(ll_prof, false, "Enable low-level linux profiler.") // -// Heap protection flags -// Using heap protection requires ENABLE_LOGGING_AND_PROFILING as well. -// -#ifdef ENABLE_HEAP_PROTECTION -#undef FLAG -#define FLAG FLAG_FULL - -DEFINE_bool(protect_heap, false, - "Protect/unprotect V8's heap when leaving/entring the VM.") - -#endif - -// // Disassembler only flags // #undef FLAG diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc index 4e67463f1f..eaf09ebaf5 100644 --- a/deps/v8/src/frames.cc +++ b/deps/v8/src/frames.cc @@ -36,6 +36,8 @@ #include "scopeinfo.h" #include "string-stream.h" +#include "allocation-inl.h" + namespace v8 { namespace internal { @@ -346,7 +348,6 @@ void SafeStackFrameIterator::Reset() { // ------------------------------------------------------------------------- -#ifdef ENABLE_LOGGING_AND_PROFILING SafeStackTraceFrameIterator::SafeStackTraceFrameIterator( Isolate* isolate, Address fp, Address sp, Address low_bound, Address high_bound) : @@ -362,7 +363,6 @@ void SafeStackTraceFrameIterator::Advance() { if (frame()->is_java_script()) return; } } -#endif Code* StackFrame::GetSafepointData(Isolate* isolate, diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h index 9e93daef3d..f542a92d96 100644 --- a/deps/v8/src/frames.h +++ b/deps/v8/src/frames.h @@ -843,7 +843,6 @@ class SafeStackFrameIterator BASE_EMBEDDED { }; -#ifdef ENABLE_LOGGING_AND_PROFILING typedef JavaScriptFrameIteratorTemp<SafeStackFrameIterator> SafeJavaScriptFrameIterator; @@ -855,7 +854,6 @@ class SafeStackTraceFrameIterator: public SafeJavaScriptFrameIterator { Address low_bound, Address high_bound); void Advance(); }; -#endif class StackFrameLocator BASE_EMBEDDED { diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h index d25ca490f6..6b174f7427 100644 --- a/deps/v8/src/full-codegen.h +++ b/deps/v8/src/full-codegen.h @@ -444,7 +444,7 @@ class FullCodeGenerator: public AstVisitor { TypeofState typeof_state, Label* slow, Label* done); - void EmitVariableLoad(Variable* expr); + void EmitVariableLoad(VariableProxy* proxy); enum ResolveEvalFlag { SKIP_CONTEXT_LOOKUP, diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc index d8cc742576..d73aaf0fca 100644 --- a/deps/v8/src/handles.cc +++ b/deps/v8/src/handles.cc @@ -543,11 +543,6 @@ Handle<Object> SetAccessor(Handle<JSObject> obj, Handle<AccessorInfo> info) { // associated with the wrapper and get rid of both the wrapper and the // handle. static void ClearWrapperCache(Persistent<v8::Value> handle, void*) { -#ifdef ENABLE_HEAP_PROTECTION - // Weak reference callbacks are called as if from outside V8. We - // need to reeenter to unprotect the heap. - VMState state(OTHER); -#endif Handle<Object> cache = Utils::OpenHandle(*handle); JSValue* wrapper = JSValue::cast(*cache); Foreign* foreign = Script::cast(wrapper->value())->wrapper(); diff --git a/deps/v8/src/heap-profiler.cc b/deps/v8/src/heap-profiler.cc index fb1ea8a641..7e613e9173 100644 --- a/deps/v8/src/heap-profiler.cc +++ b/deps/v8/src/heap-profiler.cc @@ -34,7 +34,6 @@ namespace v8 { namespace internal { -#ifdef ENABLE_LOGGING_AND_PROFILING HeapProfiler::HeapProfiler() : snapshots_(new HeapSnapshotsCollection()), next_snapshot_uid_(1) { @@ -52,29 +51,21 @@ void HeapProfiler::ResetSnapshots() { } -#endif // ENABLE_LOGGING_AND_PROFILING - void HeapProfiler::Setup() { -#ifdef ENABLE_LOGGING_AND_PROFILING Isolate* isolate = Isolate::Current(); if (isolate->heap_profiler() == NULL) { isolate->set_heap_profiler(new HeapProfiler()); } -#endif } void HeapProfiler::TearDown() { -#ifdef ENABLE_LOGGING_AND_PROFILING Isolate* isolate = Isolate::Current(); delete isolate->heap_profiler(); isolate->set_heap_profiler(NULL); -#endif } -#ifdef ENABLE_LOGGING_AND_PROFILING - HeapSnapshot* HeapProfiler::TakeSnapshot(const char* name, int type, v8::ActivityControl* control) { @@ -179,7 +170,5 @@ void HeapProfiler::ObjectMoveEvent(Address from, Address to) { snapshots_->ObjectMoveEvent(from, to); } -#endif // ENABLE_LOGGING_AND_PROFILING - } } // namespace v8::internal diff --git a/deps/v8/src/heap-profiler.h b/deps/v8/src/heap-profiler.h index c32f4c425f..b1bc91c307 100644 --- a/deps/v8/src/heap-profiler.h +++ b/deps/v8/src/heap-profiler.h @@ -33,8 +33,6 @@ namespace v8 { namespace internal { -#ifdef ENABLE_LOGGING_AND_PROFILING - class HeapSnapshot; class HeapSnapshotsCollection; @@ -45,9 +43,6 @@ class HeapSnapshotsCollection; profiler->call; \ } \ } while (false) -#else -#define HEAP_PROFILE(heap, call) ((void) 0) -#endif // ENABLE_LOGGING_AND_PROFILING // The HeapProfiler writes data to the log files, which can be postprocessed // to generate .hp files for use by the GHC/Valgrind tool hp2ps. @@ -56,7 +51,6 @@ class HeapProfiler { static void Setup(); static void TearDown(); -#ifdef ENABLE_LOGGING_AND_PROFILING static HeapSnapshot* TakeSnapshot(const char* name, int type, v8::ActivityControl* control); @@ -93,8 +87,6 @@ class HeapProfiler { HeapSnapshotsCollection* snapshots_; unsigned next_snapshot_uid_; List<v8::HeapProfiler::WrapperInfoCallback> wrapper_callbacks_; - -#endif // ENABLE_LOGGING_AND_PROFILING }; } } // namespace v8::internal diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc index 508bdf3c49..98a2d3374b 100644 --- a/deps/v8/src/heap.cc +++ b/deps/v8/src/heap.cc @@ -293,12 +293,11 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) { // TODO(1238405): Combine the infrastructure for --heap-stats and // --log-gc to avoid the complicated preprocessor and flag testing. -#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) void Heap::ReportStatisticsBeforeGC() { // Heap::ReportHeapStatistics will also log NewSpace statistics when - // compiled with ENABLE_LOGGING_AND_PROFILING and --log-gc is set. The - // following logic is used to avoid double logging. -#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING) + // compiled --log-gc is set. The following logic is used to avoid + // double logging. +#ifdef DEBUG if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics(); if (FLAG_heap_stats) { ReportHeapStatistics("Before GC"); @@ -306,23 +305,16 @@ void Heap::ReportStatisticsBeforeGC() { new_space_.ReportStatistics(); } if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms(); -#elif defined(DEBUG) - if (FLAG_heap_stats) { - new_space_.CollectStatistics(); - ReportHeapStatistics("Before GC"); - new_space_.ClearHistograms(); - } -#elif defined(ENABLE_LOGGING_AND_PROFILING) +#else if (FLAG_log_gc) { new_space_.CollectStatistics(); new_space_.ReportStatistics(); new_space_.ClearHistograms(); } -#endif +#endif // DEBUG } -#if defined(ENABLE_LOGGING_AND_PROFILING) void Heap::PrintShortHeapStatistics() { if (!FLAG_trace_gc_verbose) return; PrintF("Memory allocator, used: %8" V8_PTR_PREFIX "d" @@ -368,7 +360,6 @@ void Heap::PrintShortHeapStatistics() { lo_space_->Size(), lo_space_->Available()); } -#endif // TODO(1238405): Combine the infrastructure for --heap-stats and @@ -376,20 +367,17 @@ void Heap::PrintShortHeapStatistics() { void Heap::ReportStatisticsAfterGC() { // Similar to the before GC, we use some complicated logic to ensure that // NewSpace statistics are logged exactly once when --log-gc is turned on. -#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING) +#if defined(DEBUG) if (FLAG_heap_stats) { new_space_.CollectStatistics(); ReportHeapStatistics("After GC"); } else if (FLAG_log_gc) { new_space_.ReportStatistics(); } -#elif defined(DEBUG) - if (FLAG_heap_stats) ReportHeapStatistics("After GC"); -#elif defined(ENABLE_LOGGING_AND_PROFILING) +#else if (FLAG_log_gc) new_space_.ReportStatistics(); -#endif +#endif // DEBUG } -#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) void Heap::GarbageCollectionPrologue() { @@ -406,11 +394,11 @@ void Heap::GarbageCollectionPrologue() { } if (FLAG_gc_verbose) Print(); -#endif +#endif // DEBUG -#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) +#if defined(DEBUG) ReportStatisticsBeforeGC(); -#endif +#endif // DEBUG LiveObjectList::GCPrologue(); } @@ -447,12 +435,10 @@ void Heap::GarbageCollectionEpilogue() { symbol_table()->Capacity()); isolate_->counters()->number_of_symbols()->Set( symbol_table()->NumberOfElements()); -#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) +#if defined(DEBUG) ReportStatisticsAfterGC(); -#endif -#ifdef ENABLE_DEBUGGER_SUPPORT +#endif // DEBUG isolate_->debug()->AfterGarbageCollection(); -#endif } @@ -1335,15 +1321,12 @@ class ScavengingVisitor : public StaticVisitorBase { enum ObjectContents { DATA_OBJECT, POINTER_OBJECT }; enum SizeRestriction { SMALL, UNKNOWN_SIZE }; -#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) static void RecordCopiedObject(Heap* heap, HeapObject* obj) { bool should_record = false; #ifdef DEBUG should_record = FLAG_heap_stats; #endif -#ifdef ENABLE_LOGGING_AND_PROFILING should_record = should_record || FLAG_log_gc; -#endif if (should_record) { if (heap->new_space()->Contains(obj)) { heap->new_space()->RecordAllocation(obj); @@ -1352,7 +1335,6 @@ class ScavengingVisitor : public StaticVisitorBase { } } } -#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) // Helper function used by CopyObject to copy a source object to an // allocated target object and update the forwarding pointer in the source @@ -1368,12 +1350,9 @@ class ScavengingVisitor : public StaticVisitorBase { source->set_map_word(MapWord::FromForwardingAddress(target)); if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) { -#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) // Update NewSpace stats if necessary. RecordCopiedObject(heap, target); -#endif HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address())); -#if defined(ENABLE_LOGGING_AND_PROFILING) Isolate* isolate = heap->isolate(); if (isolate->logger()->is_logging() || CpuProfiler::is_profiling(isolate)) { @@ -1382,7 +1361,6 @@ class ScavengingVisitor : public StaticVisitorBase { source->address(), target->address())); } } -#endif } return target; @@ -1558,7 +1536,6 @@ static void InitializeScavengingVisitorsTables() { void Heap::SwitchScavengingVisitorsTableIfProfilingWasEnabled() { -#ifdef ENABLE_LOGGING_AND_PROFILING if (scavenging_visitors_table_mode_ == LOGGING_AND_PROFILING_ENABLED) { // Table was already updated by some isolate. return; @@ -1584,7 +1561,6 @@ void Heap::SwitchScavengingVisitorsTableIfProfilingWasEnabled() { Release_Store(&scavenging_visitors_table_mode_, LOGGING_AND_PROFILING_ENABLED); } -#endif } @@ -5213,28 +5189,6 @@ void Heap::Shrink() { } -#ifdef ENABLE_HEAP_PROTECTION - -void Heap::Protect() { - if (HasBeenSetup()) { - AllSpaces spaces; - for (Space* space = spaces.next(); space != NULL; space = spaces.next()) - space->Protect(); - } -} - - -void Heap::Unprotect() { - if (HasBeenSetup()) { - AllSpaces spaces; - for (Space* space = spaces.next(); space != NULL; space = spaces.next()) - space->Unprotect(); - } -} - -#endif - - void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) { ASSERT(callback != NULL); GCPrologueCallbackPair pair(callback, gc_type); @@ -5930,9 +5884,7 @@ GCTracer::~GCTracer() { PrintF("\n"); } -#if defined(ENABLE_LOGGING_AND_PROFILING) heap_->PrintShortHeapStatistics(); -#endif } diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h index 5aba05d9e7..d90a681d43 100644 --- a/deps/v8/src/heap.h +++ b/deps/v8/src/heap.h @@ -409,12 +409,6 @@ class Heap { // Uncommit unused semi space. bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); } -#ifdef ENABLE_HEAP_PROTECTION - // Protect/unprotect the heap by marking all spaces read-only/writable. - void Protect(); - void Unprotect(); -#endif - // Allocates and initializes a new JavaScript object based on a // constructor. // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation @@ -1052,10 +1046,8 @@ class Heap { void ZapFromSpace(); #endif -#if defined(ENABLE_LOGGING_AND_PROFILING) // Print short heap statistics. void PrintShortHeapStatistics(); -#endif // Makes a new symbol object // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation @@ -1514,11 +1506,9 @@ class Heap { // around a GC). inline void CompletelyClearInstanceofCache(); -#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) // Record statistics before and after garbage collection. void ReportStatisticsBeforeGC(); void ReportStatisticsAfterGC(); -#endif // Slow part of scavenge object. static void ScavengeObjectSlow(HeapObject** p, HeapObject* object); diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc index e28b9efd8f..50f2f6df6c 100644 --- a/deps/v8/src/hydrogen-instructions.cc +++ b/deps/v8/src/hydrogen-instructions.cc @@ -784,6 +784,21 @@ void HChange::PrintDataTo(StringStream* stream) { } +HValue* HCheckInstanceType::Canonicalize() { + if (check_ == IS_STRING && + !value()->type().IsUninitialized() && + value()->type().IsString()) { + return NULL; + } + if (check_ == IS_SYMBOL && + value()->IsConstant() && + HConstant::cast(value())->handle()->IsSymbol()) { + return NULL; + } + return this; +} + + void HCheckInstanceType::GetCheckInterval(InstanceType* first, InstanceType* last) { ASSERT(is_interval_check()); diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h index cc32b93831..401c2e4a03 100644 --- a/deps/v8/src/hydrogen-instructions.h +++ b/deps/v8/src/hydrogen-instructions.h @@ -2003,14 +2003,7 @@ class HCheckInstanceType: public HUnaryOperation { virtual void Verify(); #endif - virtual HValue* Canonicalize() { - if (!value()->type().IsUninitialized() && - value()->type().IsString() && - check_ == IS_STRING) { - return NULL; - } - return this; - } + virtual HValue* Canonicalize(); bool is_interval_check() const { return check_ <= LAST_INTERVAL_CHECK; } void GetCheckInterval(InstanceType* first, InstanceType* last); @@ -3362,8 +3355,9 @@ class HLoadContextSlot: public HUnaryOperation { static inline bool StoringValueNeedsWriteBarrier(HValue* value) { - return !value->type().IsSmi() && - !(value->IsConstant() && HConstant::cast(value)->InOldSpace()); + return !value->type().IsBoolean() + && !value->type().IsSmi() + && !(value->IsConstant() && HConstant::cast(value)->InOldSpace()); } diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc index e3eb122bc6..48bd8b1a57 100644 --- a/deps/v8/src/ia32/code-stubs-ia32.cc +++ b/deps/v8/src/ia32/code-stubs-ia32.cc @@ -511,25 +511,17 @@ static void IntegerConvert(MacroAssembler* masm, } -const char* UnaryOpStub::GetName() { - if (name_ != NULL) return name_; - const int kMaxNameLength = 100; - name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray( - kMaxNameLength); - if (name_ == NULL) return "OOM"; +void UnaryOpStub::PrintName(StringStream* stream) { const char* op_name = Token::Name(op_); const char* overwrite_name = NULL; // Make g++ happy. switch (mode_) { case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break; case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break; } - - OS::SNPrintF(Vector<char>(name_, kMaxNameLength), - "UnaryOpStub_%s_%s_%s", - op_name, - overwrite_name, - UnaryOpIC::GetName(operand_type_)); - return name_; + stream->Add("UnaryOpStub_%s_%s_%s", + op_name, + overwrite_name, + UnaryOpIC::GetName(operand_type_)); } @@ -914,12 +906,7 @@ void BinaryOpStub::Generate(MacroAssembler* masm) { } -const char* BinaryOpStub::GetName() { - if (name_ != NULL) return name_; - const int kMaxNameLength = 100; - name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray( - kMaxNameLength); - if (name_ == NULL) return "OOM"; +void BinaryOpStub::PrintName(StringStream* stream) { const char* op_name = Token::Name(op_); const char* overwrite_name; switch (mode_) { @@ -928,13 +915,10 @@ const char* BinaryOpStub::GetName() { case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; default: overwrite_name = "UnknownOverwrite"; break; } - - OS::SNPrintF(Vector<char>(name_, kMaxNameLength), - "BinaryOpStub_%s_%s_%s", - op_name, - overwrite_name, - BinaryOpIC::GetName(operands_type_)); - return name_; + stream->Add("BinaryOpStub_%s_%s_%s", + op_name, + overwrite_name, + BinaryOpIC::GetName(operands_type_)); } @@ -4380,9 +4364,7 @@ void CEntryStub::Generate(MacroAssembler* masm) { void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { Label invoke, exit; -#ifdef ENABLE_LOGGING_AND_PROFILING Label not_outermost_js, not_outermost_js_2; -#endif // Setup frame. __ push(ebp); @@ -4401,7 +4383,6 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { ExternalReference c_entry_fp(Isolate::k_c_entry_fp_address, masm->isolate()); __ push(Operand::StaticVariable(c_entry_fp)); -#ifdef ENABLE_LOGGING_AND_PROFILING // If this is the outermost JS call, set js_entry_sp value. ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address, masm->isolate()); @@ -4414,7 +4395,6 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { __ bind(¬_outermost_js); __ push(Immediate(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME))); __ bind(&cont); -#endif // Call a faked try-block that does the invoke. __ call(&invoke); @@ -4462,7 +4442,6 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { __ PopTryHandler(); __ bind(&exit); -#ifdef ENABLE_LOGGING_AND_PROFILING // Check if the current stack frame is marked as the outermost JS frame. __ pop(ebx); __ cmp(Operand(ebx), @@ -4470,7 +4449,6 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { __ j(not_equal, ¬_outermost_js_2); __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0)); __ bind(¬_outermost_js_2); -#endif // Restore the top frame descriptor from the stack. __ pop(Operand::StaticVariable(ExternalReference( @@ -4732,15 +4710,8 @@ int CompareStub::MinorKey() { // Unfortunately you have to run without snapshots to see most of these // names in the profile since most compare stubs end up in the snapshot. -const char* CompareStub::GetName() { +void CompareStub::PrintName(StringStream* stream) { ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg)); - - if (name_ != NULL) return name_; - const int kMaxNameLength = 100; - name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray( - kMaxNameLength); - if (name_ == NULL) return "OOM"; - const char* cc_name; switch (cc_) { case less: cc_name = "LT"; break; @@ -4751,35 +4722,12 @@ const char* CompareStub::GetName() { case not_equal: cc_name = "NE"; break; default: cc_name = "UnknownCondition"; break; } - - const char* strict_name = ""; - if (strict_ && (cc_ == equal || cc_ == not_equal)) { - strict_name = "_STRICT"; - } - - const char* never_nan_nan_name = ""; - if (never_nan_nan_ && (cc_ == equal || cc_ == not_equal)) { - never_nan_nan_name = "_NO_NAN"; - } - - const char* include_number_compare_name = ""; - if (!include_number_compare_) { - include_number_compare_name = "_NO_NUMBER"; - } - - const char* include_smi_compare_name = ""; - if (!include_smi_compare_) { - include_smi_compare_name = "_NO_SMI"; - } - - OS::SNPrintF(Vector<char>(name_, kMaxNameLength), - "CompareStub_%s%s%s%s%s", - cc_name, - strict_name, - never_nan_nan_name, - include_number_compare_name, - include_smi_compare_name); - return name_; + bool is_equality = cc_ == equal || cc_ == not_equal; + stream->Add("CompareStub_%s", cc_name); + if (strict_ && is_equality) stream->Add("_STRICT"); + if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN"); + if (!include_number_compare_) stream->Add("_NO_NUMBER"); + if (!include_smi_compare_) stream->Add("_NO_SMI"); } diff --git a/deps/v8/src/ia32/code-stubs-ia32.h b/deps/v8/src/ia32/code-stubs-ia32.h index d02aa01d7b..fa255da1fd 100644 --- a/deps/v8/src/ia32/code-stubs-ia32.h +++ b/deps/v8/src/ia32/code-stubs-ia32.h @@ -67,8 +67,7 @@ class UnaryOpStub: public CodeStub { UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED) : op_(op), mode_(mode), - operand_type_(operand_type), - name_(NULL) { + operand_type_(operand_type) { } private: @@ -78,19 +77,7 @@ class UnaryOpStub: public CodeStub { // Operand type information determined at runtime. UnaryOpIC::TypeInfo operand_type_; - char* name_; - - const char* GetName(); - -#ifdef DEBUG - void Print() { - PrintF("UnaryOpStub %d (op %s), (mode %d, runtime_type_info %s)\n", - MinorKey(), - Token::String(op_), - static_cast<int>(mode_), - UnaryOpIC::GetName(operand_type_)); - } -#endif + virtual void PrintName(StringStream* stream); class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {}; class OpBits: public BitField<Token::Value, 1, 7> {}; @@ -153,8 +140,7 @@ class BinaryOpStub: public CodeStub { : op_(op), mode_(mode), operands_type_(BinaryOpIC::UNINITIALIZED), - result_type_(BinaryOpIC::UNINITIALIZED), - name_(NULL) { + result_type_(BinaryOpIC::UNINITIALIZED) { use_sse3_ = CpuFeatures::IsSupported(SSE3); ASSERT(OpBits::is_valid(Token::NUM_TOKENS)); } @@ -167,8 +153,7 @@ class BinaryOpStub: public CodeStub { mode_(ModeBits::decode(key)), use_sse3_(SSE3Bits::decode(key)), operands_type_(operands_type), - result_type_(result_type), - name_(NULL) { } + result_type_(result_type) { } private: enum SmiCodeGenerateHeapNumberResults { @@ -184,20 +169,7 @@ class BinaryOpStub: public CodeStub { BinaryOpIC::TypeInfo operands_type_; BinaryOpIC::TypeInfo result_type_; - char* name_; - - const char* GetName(); - -#ifdef DEBUG - void Print() { - PrintF("BinaryOpStub %d (op %s), " - "(mode %d, runtime_type_info %s)\n", - MinorKey(), - Token::String(op_), - static_cast<int>(mode_), - BinaryOpIC::GetName(operands_type_)); - } -#endif + virtual void PrintName(StringStream* stream); // Minor key encoding in 16 bits RRRTTTSOOOOOOOMM. class ModeBits: public BitField<OverwriteMode, 0, 2> {}; @@ -415,14 +387,6 @@ class NumberToStringStub: public CodeStub { int MinorKey() { return 0; } void Generate(MacroAssembler* masm); - - const char* GetName() { return "NumberToStringStub"; } - -#ifdef DEBUG - void Print() { - PrintF("NumberToStringStub\n"); - } -#endif }; @@ -466,13 +430,6 @@ class StringDictionaryLookupStub: public CodeStub { StringDictionary::kHeaderSize + StringDictionary::kElementsStartIndex * kPointerSize; - -#ifdef DEBUG - void Print() { - PrintF("StringDictionaryLookupStub\n"); - } -#endif - Major MajorKey() { return StringDictionaryNegativeLookup; } int MinorKey() { diff --git a/deps/v8/src/ia32/codegen-ia32.h b/deps/v8/src/ia32/codegen-ia32.h index 8f090b124e..c85fa83e9e 100644 --- a/deps/v8/src/ia32/codegen-ia32.h +++ b/deps/v8/src/ia32/codegen-ia32.h @@ -53,9 +53,7 @@ class CodeGenerator { // Print the code after compiling it. static void PrintCode(Handle<Code> code, CompilationInfo* info); -#ifdef ENABLE_LOGGING_AND_PROFILING static bool ShouldGenerateLog(Expression* type); -#endif static bool RecordPositions(MacroAssembler* masm, int pos, diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc index 75cc4b8608..f9f63a70ed 100644 --- a/deps/v8/src/ia32/full-codegen-ia32.cc +++ b/deps/v8/src/ia32/full-codegen-ia32.cc @@ -744,7 +744,7 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable, // IDs for bailouts from optimized code. ASSERT(prop->obj()->AsVariableProxy() != NULL); { AccumulatorValueContext for_object(this); - EmitVariableLoad(prop->obj()->AsVariableProxy()->var()); + EmitVariableLoad(prop->obj()->AsVariableProxy()); } __ push(eax); @@ -1064,7 +1064,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info, void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) { Comment cmnt(masm_, "[ VariableProxy"); - EmitVariableLoad(expr->var()); + EmitVariableLoad(expr); } @@ -1214,7 +1214,11 @@ void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase( } -void FullCodeGenerator::EmitVariableLoad(Variable* var) { +void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) { + // Record position before possible IC call. + SetSourcePosition(proxy->position()); + Variable* var = proxy->var(); + // Three cases: non-this global variables, lookup slots, and all other // types of slots. Slot* slot = var->AsSlot(); @@ -1540,7 +1544,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) { { AccumulatorValueContext context(this); switch (assign_type) { case VARIABLE: - EmitVariableLoad(expr->target()->AsVariableProxy()->var()); + EmitVariableLoad(expr->target()->AsVariableProxy()); PrepareForBailout(expr->target(), TOS_REG); break; case NAMED_PROPERTY: @@ -1769,7 +1773,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) { ASSERT(prop->obj()->AsVariableProxy() != NULL); ASSERT(prop->key()->AsLiteral() != NULL); { AccumulatorValueContext for_object(this); - EmitVariableLoad(prop->obj()->AsVariableProxy()->var()); + EmitVariableLoad(prop->obj()->AsVariableProxy()); } __ mov(edx, eax); __ SafeSet(ecx, Immediate(prop->key()->AsLiteral()->handle())); @@ -2701,13 +2705,11 @@ void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) { // with '%2s' (see Logger::LogRuntime for all the formats). // 2 (array): Arguments to the format string. ASSERT_EQ(args->length(), 3); -#ifdef ENABLE_LOGGING_AND_PROFILING if (CodeGenerator::ShouldGenerateLog(args->at(0))) { VisitForStackValue(args->at(1)); VisitForStackValue(args->at(2)); __ CallRuntime(Runtime::kLog, 2); } -#endif // Finally, we're expected to leave a value on the top of the stack. __ mov(eax, isolate()->factory()->undefined_value()); context()->Plug(eax); @@ -3768,7 +3770,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { if (assign_type == VARIABLE) { ASSERT(expr->expression()->AsVariableProxy()->var() != NULL); AccumulatorValueContext context(this); - EmitVariableLoad(expr->expression()->AsVariableProxy()->var()); + EmitVariableLoad(expr->expression()->AsVariableProxy()); } else { // Reserve space for result of postfix operation. if (expr->is_postfix() && !context()->IsEffect()) { diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc index be5910a124..5f143b104f 100644 --- a/deps/v8/src/ia32/ic-ia32.cc +++ b/deps/v8/src/ia32/ic-ia32.cc @@ -216,105 +216,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm, } -static void GenerateNumberDictionaryLoad(MacroAssembler* masm, - Label* miss, - Register elements, - Register key, - Register r0, - Register r1, - Register r2, - Register result) { - // Register use: - // - // elements - holds the slow-case elements of the receiver and is unchanged. - // - // key - holds the smi key on entry and is unchanged. - // - // Scratch registers: - // - // r0 - holds the untagged key on entry and holds the hash once computed. - // - // r1 - used to hold the capacity mask of the dictionary - // - // r2 - used for the index into the dictionary. - // - // result - holds the result on exit if the load succeeds and we fall through. - - Label done; - - // Compute the hash code from the untagged key. This must be kept in sync - // with ComputeIntegerHash in utils.h. - // - // hash = ~hash + (hash << 15); - __ mov(r1, r0); - __ not_(r0); - __ shl(r1, 15); - __ add(r0, Operand(r1)); - // hash = hash ^ (hash >> 12); - __ mov(r1, r0); - __ shr(r1, 12); - __ xor_(r0, Operand(r1)); - // hash = hash + (hash << 2); - __ lea(r0, Operand(r0, r0, times_4, 0)); - // hash = hash ^ (hash >> 4); - __ mov(r1, r0); - __ shr(r1, 4); - __ xor_(r0, Operand(r1)); - // hash = hash * 2057; - __ imul(r0, r0, 2057); - // hash = hash ^ (hash >> 16); - __ mov(r1, r0); - __ shr(r1, 16); - __ xor_(r0, Operand(r1)); - - // Compute capacity mask. - __ mov(r1, FieldOperand(elements, NumberDictionary::kCapacityOffset)); - __ shr(r1, kSmiTagSize); // convert smi to int - __ dec(r1); - - // Generate an unrolled loop that performs a few probes before giving up. - const int kProbes = 4; - for (int i = 0; i < kProbes; i++) { - // Use r2 for index calculations and keep the hash intact in r0. - __ mov(r2, r0); - // Compute the masked index: (hash + i + i * i) & mask. - if (i > 0) { - __ add(Operand(r2), Immediate(NumberDictionary::GetProbeOffset(i))); - } - __ and_(r2, Operand(r1)); - - // Scale the index by multiplying by the entry size. - ASSERT(NumberDictionary::kEntrySize == 3); - __ lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3 - - // Check if the key matches. - __ cmp(key, FieldOperand(elements, - r2, - times_pointer_size, - NumberDictionary::kElementsStartOffset)); - if (i != (kProbes - 1)) { - __ j(equal, &done); - } else { - __ j(not_equal, miss); - } - } - - __ bind(&done); - // Check that the value is a normal propety. - const int kDetailsOffset = - NumberDictionary::kElementsStartOffset + 2 * kPointerSize; - ASSERT_EQ(NORMAL, 0); - __ test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset), - Immediate(PropertyDetails::TypeField::mask() << kSmiTagSize)); - __ j(not_zero, miss); - - // Get the value at the masked, scaled index. - const int kValueOffset = - NumberDictionary::kElementsStartOffset + kPointerSize; - __ mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset)); -} - - void LoadIC::GenerateArrayLength(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- eax : receiver @@ -591,14 +492,13 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { // Push receiver on the stack to free up a register for the dictionary // probing. __ push(edx); - GenerateNumberDictionaryLoad(masm, - &slow_pop_receiver, - ecx, - eax, - ebx, - edx, - edi, - eax); + __ LoadFromNumberDictionary(&slow_pop_receiver, + ecx, + eax, + ebx, + edx, + edi, + eax); // Pop receiver before returning. __ pop(edx); __ ret(0); @@ -1200,8 +1100,8 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) { __ SmiUntag(ebx); // ebx: untagged index // Receiver in edx will be clobbered, need to reload it on miss. - GenerateNumberDictionaryLoad( - masm, &slow_reload_receiver, eax, ecx, ebx, edx, edi, edi); + __ LoadFromNumberDictionary( + &slow_reload_receiver, eax, ecx, ebx, edx, edi, edi); __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1); __ jmp(&do_call); diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc index 2f1b88e789..6293718f62 100644 --- a/deps/v8/src/ia32/lithium-codegen-ia32.cc +++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc @@ -1345,6 +1345,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) { BinaryOpStub stub(instr->op(), NO_OVERWRITE); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + __ nop(); // Signals no inlined code. } diff --git a/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc b/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc index 9d91c61840..fcf1f91378 100644 --- a/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc +++ b/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc @@ -305,8 +305,13 @@ void LGapResolver::EmitMove(int index) { } else if (source->IsConstantOperand()) { ASSERT(destination->IsRegister() || destination->IsStackSlot()); Immediate src = cgen_->ToImmediate(source); - Operand dst = cgen_->ToOperand(destination); - __ mov(dst, src); + if (destination->IsRegister()) { + Register dst = cgen_->ToRegister(destination); + __ Set(dst, src); + } else { + Operand dst = cgen_->ToOperand(destination); + __ Set(dst, src); + } } else if (source->IsDoubleRegister()) { XMMRegister src = cgen_->ToDoubleRegister(source); diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc index 020acded7d..136b24c981 100644 --- a/deps/v8/src/ia32/macro-assembler-ia32.cc +++ b/deps/v8/src/ia32/macro-assembler-ia32.cc @@ -734,6 +734,104 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, } +void MacroAssembler::LoadFromNumberDictionary(Label* miss, + Register elements, + Register key, + Register r0, + Register r1, + Register r2, + Register result) { + // Register use: + // + // elements - holds the slow-case elements of the receiver and is unchanged. + // + // key - holds the smi key on entry and is unchanged. + // + // Scratch registers: + // + // r0 - holds the untagged key on entry and holds the hash once computed. + // + // r1 - used to hold the capacity mask of the dictionary + // + // r2 - used for the index into the dictionary. + // + // result - holds the result on exit if the load succeeds and we fall through. + + Label done; + + // Compute the hash code from the untagged key. This must be kept in sync + // with ComputeIntegerHash in utils.h. + // + // hash = ~hash + (hash << 15); + mov(r1, r0); + not_(r0); + shl(r1, 15); + add(r0, Operand(r1)); + // hash = hash ^ (hash >> 12); + mov(r1, r0); + shr(r1, 12); + xor_(r0, Operand(r1)); + // hash = hash + (hash << 2); + lea(r0, Operand(r0, r0, times_4, 0)); + // hash = hash ^ (hash >> 4); + mov(r1, r0); + shr(r1, 4); + xor_(r0, Operand(r1)); + // hash = hash * 2057; + imul(r0, r0, 2057); + // hash = hash ^ (hash >> 16); + mov(r1, r0); + shr(r1, 16); + xor_(r0, Operand(r1)); + + // Compute capacity mask. + mov(r1, FieldOperand(elements, NumberDictionary::kCapacityOffset)); + shr(r1, kSmiTagSize); // convert smi to int + dec(r1); + + // Generate an unrolled loop that performs a few probes before giving up. + const int kProbes = 4; + for (int i = 0; i < kProbes; i++) { + // Use r2 for index calculations and keep the hash intact in r0. + mov(r2, r0); + // Compute the masked index: (hash + i + i * i) & mask. + if (i > 0) { + add(Operand(r2), Immediate(NumberDictionary::GetProbeOffset(i))); + } + and_(r2, Operand(r1)); + + // Scale the index by multiplying by the entry size. + ASSERT(NumberDictionary::kEntrySize == 3); + lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3 + + // Check if the key matches. + cmp(key, FieldOperand(elements, + r2, + times_pointer_size, + NumberDictionary::kElementsStartOffset)); + if (i != (kProbes - 1)) { + j(equal, &done); + } else { + j(not_equal, miss); + } + } + + bind(&done); + // Check that the value is a normal propety. + const int kDetailsOffset = + NumberDictionary::kElementsStartOffset + 2 * kPointerSize; + ASSERT_EQ(NORMAL, 0); + test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset), + Immediate(PropertyDetails::TypeField::mask() << kSmiTagSize)); + j(not_zero, miss); + + // Get the value at the masked, scaled index. + const int kValueOffset = + NumberDictionary::kElementsStartOffset + kPointerSize; + mov(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset)); +} + + void MacroAssembler::LoadAllocationTopHelper(Register result, Register scratch, AllocationFlags flags) { diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h index 837c500e9a..dac22731a9 100644 --- a/deps/v8/src/ia32/macro-assembler-ia32.h +++ b/deps/v8/src/ia32/macro-assembler-ia32.h @@ -352,6 +352,15 @@ class MacroAssembler: public Assembler { Label* miss); + void LoadFromNumberDictionary(Label* miss, + Register elements, + Register key, + Register r0, + Register r1, + Register r2, + Register result); + + // --------------------------------------------------------------------------- // Allocation support diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.h b/deps/v8/src/ia32/regexp-macro-assembler-ia32.h index 21c86d050a..d504470280 100644 --- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.h +++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.h @@ -28,6 +28,9 @@ #ifndef V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_ #define V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_ +#include "ia32/assembler-ia32.h" +#include "ia32/assembler-ia32-inl.h" + namespace v8 { namespace internal { diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc index e53cc0839b..2660850889 100644 --- a/deps/v8/src/ia32/stub-cache-ia32.cc +++ b/deps/v8/src/ia32/stub-cache-ia32.cc @@ -2679,7 +2679,10 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) { // -- esp[0] : return address // ----------------------------------- Code* stub; - MaybeObject* maybe_stub = ComputeSharedKeyedStoreElementStub(receiver_map); + JSObject::ElementsKind elements_kind = receiver_map->elements_kind(); + bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE; + MaybeObject* maybe_stub = + KeyedStoreElementStub(is_jsarray, elements_kind).TryGetCode(); if (!maybe_stub->To(&stub)) return maybe_stub; __ DispatchMap(edx, Handle<Map>(receiver_map), @@ -3137,7 +3140,8 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) { // -- esp[0] : return address // ----------------------------------- Code* stub; - MaybeObject* maybe_stub = ComputeSharedKeyedLoadElementStub(receiver_map); + JSObject::ElementsKind elements_kind = receiver_map->elements_kind(); + MaybeObject* maybe_stub = KeyedLoadElementStub(elements_kind).TryGetCode(); if (!maybe_stub->To(&stub)) return maybe_stub; __ DispatchMap(edx, Handle<Map>(receiver_map), @@ -3321,6 +3325,64 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) { #define __ ACCESS_MASM(masm) +void KeyedLoadStubCompiler::GenerateLoadDictionaryElement( + MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- eax : key + // -- edx : receiver + // -- esp[0] : return address + // ----------------------------------- + Label slow, miss_force_generic; + + // This stub is meant to be tail-jumped to, the receiver must already + // have been verified by the caller to not be a smi. + __ JumpIfNotSmi(eax, &miss_force_generic); + __ mov(ebx, eax); + __ SmiUntag(ebx); + __ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset)); + + // Push receiver on the stack to free up a register for the dictionary + // probing. + __ push(edx); + __ LoadFromNumberDictionary(&slow, + ecx, + eax, + ebx, + edx, + edi, + eax); + // Pop receiver before returning. + __ pop(edx); + __ ret(0); + + __ bind(&slow); + __ pop(edx); + + // ----------- S t a t e ------------- + // -- eax : value + // -- ecx : key + // -- edx : receiver + // -- esp[0] : return address + // ----------------------------------- + + Handle<Code> slow_ic = + masm->isolate()->builtins()->KeyedLoadIC_Slow(); + __ jmp(slow_ic, RelocInfo::CODE_TARGET); + + __ bind(&miss_force_generic); + // ----------- S t a t e ------------- + // -- eax : value + // -- ecx : key + // -- edx : receiver + // -- esp[0] : return address + // ----------------------------------- + + Handle<Code> miss_force_generic_ic = + masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric(); + __ jmp(miss_force_generic_ic, RelocInfo::CODE_TARGET); +} + + void KeyedLoadStubCompiler::GenerateLoadExternalArray( MacroAssembler* masm, JSObject::ElementsKind elements_kind) { @@ -3731,7 +3793,8 @@ void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) { void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm, bool is_js_array) { // ----------- S t a t e ------------- - // -- eax : key + // -- eax : value + // -- ecx : key // -- edx : receiver // -- esp[0] : return address // ----------------------------------- diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc index eb0f12a394..f70f75a7f6 100644 --- a/deps/v8/src/ic.cc +++ b/deps/v8/src/ic.cc @@ -1097,15 +1097,10 @@ void LoadIC::UpdateCaches(LookupResult* lookup, } -MaybeObject* KeyedLoadIC::GetFastElementStubWithoutMapCheck( - bool is_js_array) { - return KeyedLoadFastElementStub().TryGetCode(); -} - - -MaybeObject* KeyedLoadIC::GetExternalArrayStubWithoutMapCheck( +MaybeObject* KeyedLoadIC::GetElementStubWithoutMapCheck( + bool is_js_array, JSObject::ElementsKind elements_kind) { - return KeyedLoadExternalArrayStub(elements_kind).TryGetCode(); + return KeyedLoadElementStub(elements_kind).TryGetCode(); } @@ -1675,7 +1670,7 @@ MaybeObject* KeyedIC::ComputeStub(JSObject* receiver, for (int i = 0; i < target_receiver_maps.length(); ++i) { Map* receiver_map(target_receiver_maps.at(i)); MaybeObject* maybe_cached_stub = ComputeMonomorphicStubWithoutMapCheck( - receiver_map, strict_mode, generic_stub); + receiver_map, strict_mode); Code* cached_stub; if (!maybe_cached_stub->To(&cached_stub)) return maybe_cached_stub; handler_ics.Add(cached_stub); @@ -1694,18 +1689,18 @@ MaybeObject* KeyedIC::ComputeStub(JSObject* receiver, MaybeObject* KeyedIC::ComputeMonomorphicStubWithoutMapCheck( Map* receiver_map, - StrictModeFlag strict_mode, - Code* generic_stub) { + StrictModeFlag strict_mode) { if ((receiver_map->instance_type() & kNotStringTag) == 0) { ASSERT(string_stub() != NULL); return string_stub(); - } else if (receiver_map->has_external_array_elements()) { - return GetExternalArrayStubWithoutMapCheck(receiver_map->elements_kind()); - } else if (receiver_map->has_fast_elements()) { - bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE; - return GetFastElementStubWithoutMapCheck(is_js_array); } else { - return generic_stub; + ASSERT(receiver_map->has_dictionary_elements() || + receiver_map->has_fast_elements() || + receiver_map->has_fast_double_elements() || + receiver_map->has_external_array_elements()); + bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE; + return GetElementStubWithoutMapCheck(is_js_array, + receiver_map->elements_kind()); } } @@ -1717,6 +1712,7 @@ MaybeObject* KeyedIC::ComputeMonomorphicStub(JSObject* receiver, Code* result = NULL; if (receiver->HasFastElements() || receiver->HasExternalArrayElements() || + receiver->HasFastDoubleElements() || receiver->HasDictionaryElements()) { MaybeObject* maybe_stub = isolate()->stub_cache()->ComputeKeyedLoadOrStoreElement( @@ -1729,15 +1725,10 @@ MaybeObject* KeyedIC::ComputeMonomorphicStub(JSObject* receiver, } -MaybeObject* KeyedStoreIC::GetFastElementStubWithoutMapCheck( - bool is_js_array) { - return KeyedStoreFastElementStub(is_js_array).TryGetCode(); -} - - -MaybeObject* KeyedStoreIC::GetExternalArrayStubWithoutMapCheck( +MaybeObject* KeyedStoreIC::GetElementStubWithoutMapCheck( + bool is_js_array, JSObject::ElementsKind elements_kind) { - return KeyedStoreExternalArrayStub(elements_kind).TryGetCode(); + return KeyedStoreElementStub(is_js_array, elements_kind).TryGetCode(); } diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h index 9a663ba6aa..11c2e3af45 100644 --- a/deps/v8/src/ic.h +++ b/deps/v8/src/ic.h @@ -345,10 +345,8 @@ class KeyedIC: public IC { explicit KeyedIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) {} virtual ~KeyedIC() {} - virtual MaybeObject* GetFastElementStubWithoutMapCheck( - bool is_js_array) = 0; - - virtual MaybeObject* GetExternalArrayStubWithoutMapCheck( + virtual MaybeObject* GetElementStubWithoutMapCheck( + bool is_js_array, JSObject::ElementsKind elements_kind) = 0; protected: @@ -373,8 +371,7 @@ class KeyedIC: public IC { MaybeObject* ComputeMonomorphicStubWithoutMapCheck( Map* receiver_map, - StrictModeFlag strict_mode, - Code* generic_stub); + StrictModeFlag strict_mode); MaybeObject* ComputeMonomorphicStub(JSObject* receiver, bool is_store, @@ -415,10 +412,8 @@ class KeyedLoadIC: public KeyedIC { static const int kSlowCaseBitFieldMask = (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor); - virtual MaybeObject* GetFastElementStubWithoutMapCheck( - bool is_js_array); - - virtual MaybeObject* GetExternalArrayStubWithoutMapCheck( + virtual MaybeObject* GetElementStubWithoutMapCheck( + bool is_js_array, JSObject::ElementsKind elements_kind); protected: @@ -568,10 +563,8 @@ class KeyedStoreIC: public KeyedIC { static void GenerateGeneric(MacroAssembler* masm, StrictModeFlag strict_mode); static void GenerateNonStrictArguments(MacroAssembler* masm); - virtual MaybeObject* GetFastElementStubWithoutMapCheck( - bool is_js_array); - - virtual MaybeObject* GetExternalArrayStubWithoutMapCheck( + virtual MaybeObject* GetElementStubWithoutMapCheck( + bool is_js_array, JSObject::ElementsKind elements_kind); protected: diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc index 7423274a1e..8a30e7924f 100644 --- a/deps/v8/src/isolate.cc +++ b/deps/v8/src/isolate.cc @@ -85,13 +85,9 @@ void ThreadLocalTop::InitializeInternal() { #ifdef USE_SIMULATOR simulator_ = NULL; #endif -#ifdef ENABLE_LOGGING_AND_PROFILING js_entry_sp_ = NULL; external_callback_ = NULL; -#endif -#ifdef ENABLE_VMSTATE_TRACKING current_vm_state_ = EXTERNAL; -#endif try_catch_handler_address_ = NULL; context_ = NULL; thread_id_ = ThreadId::Invalid(); @@ -1279,11 +1275,9 @@ Handle<Context> Isolate::GetCallingGlobalContext() { char* Isolate::ArchiveThread(char* to) { -#ifdef ENABLE_LOGGING_AND_PROFILING if (RuntimeProfiler::IsEnabled() && current_vm_state() == JS) { RuntimeProfiler::IsolateExitedJS(this); } -#endif memcpy(to, reinterpret_cast<char*>(thread_local_top()), sizeof(ThreadLocalTop)); InitializeThreadLocal(); @@ -1303,12 +1297,10 @@ char* Isolate::RestoreThread(char* from) { thread_local_top()->simulator_ = Simulator::current(this); #endif #endif -#ifdef ENABLE_LOGGING_AND_PROFILING if (RuntimeProfiler::IsEnabled() && current_vm_state() == JS) { RuntimeProfiler::IsolateEnteredJS(this); } ASSERT(context() == NULL || context()->IsContext()); -#endif return from + sizeof(ThreadLocalTop); } @@ -1627,7 +1619,6 @@ bool Isolate::PreInit() { #define C(name) isolate_addresses_[Isolate::k_##name] = \ reinterpret_cast<Address>(name()); ISOLATE_ADDRESS_LIST(C) - ISOLATE_ADDRESS_LIST_PROF(C) #undef C string_tracker_ = new StringTracker(); diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h index a4af1362a3..f2281aa418 100644 --- a/deps/v8/src/isolate.h +++ b/deps/v8/src/isolate.h @@ -125,14 +125,8 @@ typedef ZoneList<Handle<Object> > ZoneObjectList; C(c_entry_fp_address) \ C(context_address) \ C(pending_exception_address) \ - C(external_caught_exception_address) - -#ifdef ENABLE_LOGGING_AND_PROFILING -#define ISOLATE_ADDRESS_LIST_PROF(C) \ + C(external_caught_exception_address) \ C(js_entry_sp_address) -#else -#define ISOLATE_ADDRESS_LIST_PROF(C) -#endif // Platform-independent, reliable thread identifier. @@ -252,14 +246,9 @@ class ThreadLocalTop BASE_EMBEDDED { #endif #endif // USE_SIMULATOR -#ifdef ENABLE_LOGGING_AND_PROFILING Address js_entry_sp_; // the stack pointer of the bottom js entry frame Address external_callback_; // the external callback we're currently in -#endif - -#ifdef ENABLE_VMSTATE_TRACKING StateTag current_vm_state_; -#endif // Generated code scratch locations. int32_t formal_count_; @@ -313,18 +302,6 @@ class HashMap; #endif -#ifdef ENABLE_LOGGING_AND_PROFILING - -#define ISOLATE_LOGGING_INIT_LIST(V) \ - V(CpuProfiler*, cpu_profiler, NULL) \ - V(HeapProfiler*, heap_profiler, NULL) - -#else - -#define ISOLATE_LOGGING_INIT_LIST(V) - -#endif - #define ISOLATE_INIT_ARRAY_LIST(V) \ /* SerializerDeserializer state. */ \ V(Object*, serialize_partial_snapshot_cache, kPartialSnapshotCacheCapacity) \ @@ -373,8 +350,9 @@ typedef List<HeapObject*, PreallocatedStorage> DebugObjectCache; /* SafeStackFrameIterator activations count. */ \ V(int, safe_stack_iterator_counter, 0) \ V(uint64_t, enabled_cpu_features, 0) \ + V(CpuProfiler*, cpu_profiler, NULL) \ + V(HeapProfiler*, heap_profiler, NULL) \ ISOLATE_PLATFORM_INIT_LIST(V) \ - ISOLATE_LOGGING_INIT_LIST(V) \ ISOLATE_DEBUGGER_INIT_LIST(V) class Isolate { @@ -445,7 +423,6 @@ class Isolate { enum AddressId { #define C(name) k_##name, ISOLATE_ADDRESS_LIST(C) - ISOLATE_ADDRESS_LIST_PROF(C) #undef C k_isolate_address_count }; @@ -620,7 +597,6 @@ class Isolate { } inline Address* handler_address() { return &thread_local_top_.handler_; } -#ifdef ENABLE_LOGGING_AND_PROFILING // Bottom JS entry (see StackTracer::Trace in log.cc). static Address js_entry_sp(ThreadLocalTop* thread) { return thread->js_entry_sp_; @@ -628,7 +604,6 @@ class Isolate { inline Address* js_entry_sp_address() { return &thread_local_top_.js_entry_sp_; } -#endif // Generated code scratch locations. void* formal_count_address() { return &thread_local_top_.formal_count_; } @@ -945,16 +920,13 @@ class Isolate { static const int kJSRegexpStaticOffsetsVectorSize = 50; -#ifdef ENABLE_LOGGING_AND_PROFILING Address external_callback() { return thread_local_top_.external_callback_; } void set_external_callback(Address callback) { thread_local_top_.external_callback_ = callback; } -#endif -#ifdef ENABLE_VMSTATE_TRACKING StateTag current_vm_state() { return thread_local_top_.current_vm_state_; } @@ -980,7 +952,6 @@ class Isolate { } thread_local_top_.current_vm_state_ = state; } -#endif void SetData(void* data) { embedder_data_ = data; } void* GetData() { return embedder_data_; } @@ -1356,10 +1327,4 @@ inline void Context::mark_out_of_memory() { } } // namespace v8::internal -// TODO(isolates): Get rid of these -inl.h includes and place them only where -// they're needed. -#include "allocation-inl.h" -#include "zone-inl.h" -#include "frames-inl.h" - #endif // V8_ISOLATE_H_ diff --git a/deps/v8/src/jsregexp.cc b/deps/v8/src/jsregexp.cc index 3b521f648f..45a39ffbc2 100644 --- a/deps/v8/src/jsregexp.cc +++ b/deps/v8/src/jsregexp.cc @@ -491,6 +491,7 @@ RegExpImpl::IrregexpResult RegExpImpl::IrregexpExecOnce( ASSERT(output.length() >= (IrregexpNumberOfCaptures(*irregexp) + 1) * 2); do { bool is_ascii = subject->IsAsciiRepresentation(); + EnsureCompiledIrregexp(regexp, is_ascii); Handle<Code> code(IrregexpNativeCode(*irregexp, is_ascii), isolate); NativeRegExpMacroAssembler::Result res = NativeRegExpMacroAssembler::Match(code, diff --git a/deps/v8/src/jsregexp.h b/deps/v8/src/jsregexp.h index 58958d8513..13f9e2ea06 100644 --- a/deps/v8/src/jsregexp.h +++ b/deps/v8/src/jsregexp.h @@ -29,7 +29,6 @@ #define V8_JSREGEXP_H_ #include "allocation.h" -#include "macro-assembler.h" #include "zone-inl.h" namespace v8 { diff --git a/deps/v8/src/lithium-allocator.cc b/deps/v8/src/lithium-allocator.cc index dcdc5d9b7d..466110678a 100644 --- a/deps/v8/src/lithium-allocator.cc +++ b/deps/v8/src/lithium-allocator.cc @@ -1024,7 +1024,7 @@ void LAllocator::ResolvePhis(HBasicBlock* block) { operand = chunk_->DefineConstantOperand(constant); } else { ASSERT(!op->EmitAtUses()); - LUnallocated* unalloc = new LUnallocated(LUnallocated::NONE); + LUnallocated* unalloc = new LUnallocated(LUnallocated::ANY); unalloc->set_virtual_register(op->id()); operand = unalloc; } diff --git a/deps/v8/src/log-inl.h b/deps/v8/src/log-inl.h index 02238fe921..8aebbc7dde 100644 --- a/deps/v8/src/log-inl.h +++ b/deps/v8/src/log-inl.h @@ -34,8 +34,6 @@ namespace v8 { namespace internal { -#ifdef ENABLE_LOGGING_AND_PROFILING - Logger::LogEventsAndTags Logger::ToNativeByScript(Logger::LogEventsAndTags tag, Script* script) { if ((tag == FUNCTION_TAG || tag == LAZY_COMPILE_TAG || tag == SCRIPT_TAG) @@ -51,8 +49,6 @@ Logger::LogEventsAndTags Logger::ToNativeByScript(Logger::LogEventsAndTags tag, } } -#endif // ENABLE_LOGGING_AND_PROFILING - } } // namespace v8::internal diff --git a/deps/v8/src/log-utils.cc b/deps/v8/src/log-utils.cc index 1bba7cd54e..2d1ce23dc0 100644 --- a/deps/v8/src/log-utils.cc +++ b/deps/v8/src/log-utils.cc @@ -33,101 +33,14 @@ namespace v8 { namespace internal { -#ifdef ENABLE_LOGGING_AND_PROFILING - -LogDynamicBuffer::LogDynamicBuffer( - int block_size, int max_size, const char* seal, int seal_size) - : block_size_(block_size), - max_size_(max_size - (max_size % block_size_)), - seal_(seal), - seal_size_(seal_size), - blocks_(max_size_ / block_size_ + 1), - write_pos_(0), block_index_(0), block_write_pos_(0), is_sealed_(false) { - ASSERT(BlocksCount() > 0); - AllocateBlock(0); - for (int i = 1; i < BlocksCount(); ++i) { - blocks_[i] = NULL; - } -} - -LogDynamicBuffer::~LogDynamicBuffer() { - for (int i = 0; i < BlocksCount(); ++i) { - DeleteArray(blocks_[i]); - } -} +const char* Log::kLogToTemporaryFile = "&"; -int LogDynamicBuffer::Read(int from_pos, char* dest_buf, int buf_size) { - if (buf_size == 0) return 0; - int read_pos = from_pos; - int block_read_index = BlockIndex(from_pos); - int block_read_pos = PosInBlock(from_pos); - int dest_buf_pos = 0; - // Read until dest_buf is filled, or write_pos_ encountered. - while (read_pos < write_pos_ && dest_buf_pos < buf_size) { - const int read_size = Min(write_pos_ - read_pos, - Min(buf_size - dest_buf_pos, block_size_ - block_read_pos)); - memcpy(dest_buf + dest_buf_pos, - blocks_[block_read_index] + block_read_pos, read_size); - block_read_pos += read_size; - dest_buf_pos += read_size; - read_pos += read_size; - if (block_read_pos == block_size_) { - block_read_pos = 0; - ++block_read_index; - } - } - return dest_buf_pos; -} - - -int LogDynamicBuffer::Seal() { - WriteInternal(seal_, seal_size_); - is_sealed_ = true; - return 0; -} - - -int LogDynamicBuffer::Write(const char* data, int data_size) { - if (is_sealed_) { - return 0; - } - if ((write_pos_ + data_size) <= (max_size_ - seal_size_)) { - return WriteInternal(data, data_size); - } else { - return Seal(); - } -} - - -int LogDynamicBuffer::WriteInternal(const char* data, int data_size) { - int data_pos = 0; - while (data_pos < data_size) { - const int write_size = - Min(data_size - data_pos, block_size_ - block_write_pos_); - memcpy(blocks_[block_index_] + block_write_pos_, data + data_pos, - write_size); - block_write_pos_ += write_size; - data_pos += write_size; - if (block_write_pos_ == block_size_) { - block_write_pos_ = 0; - AllocateBlock(++block_index_); - } - } - write_pos_ += data_size; - return data_size; -} - -// Must be the same message as in Logger::PauseProfiler. -const char* const Log::kDynamicBufferSeal = "profiler,\"pause\"\n"; - Log::Log(Logger* logger) - : write_to_file_(false), - is_stopped_(false), + : is_stopped_(false), output_handle_(NULL), ll_output_handle_(NULL), - output_buffer_(NULL), mutex_(NULL), message_buffer_(NULL), logger_(logger) { @@ -142,7 +55,6 @@ static void AddIsolateIdIfNeeded(StringStream* stream) { void Log::Initialize() { -#ifdef ENABLE_LOGGING_AND_PROFILING mutex_ = OS::CreateMutex(); message_buffer_ = NewArray<char>(kMessageBufferSize); @@ -166,19 +78,19 @@ void Log::Initialize() { FLAG_prof_auto = false; } - bool start_logging = FLAG_log || FLAG_log_runtime || FLAG_log_api + bool open_log_file = FLAG_log || FLAG_log_runtime || FLAG_log_api || FLAG_log_code || FLAG_log_gc || FLAG_log_handles || FLAG_log_suspect || FLAG_log_regexp || FLAG_log_state_changes || FLAG_ll_prof; - bool open_log_file = start_logging || FLAG_prof_lazy; - // If we're logging anything, we need to open the log file. if (open_log_file) { if (strcmp(FLAG_logfile, "-") == 0) { OpenStdout(); } else if (strcmp(FLAG_logfile, "*") == 0) { - OpenMemoryBuffer(); - } else { + // Does nothing for now. Will be removed. + } else if (strcmp(FLAG_logfile, kLogToTemporaryFile) == 0) { + OpenTemporaryFile(); + } else { if (strchr(FLAG_logfile, '%') != NULL || !Isolate::Current()->IsDefaultIsolate()) { // If there's a '%' in the log file name we have to expand @@ -222,14 +134,18 @@ void Log::Initialize() { } } } -#endif } void Log::OpenStdout() { ASSERT(!IsEnabled()); output_handle_ = stdout; - write_to_file_ = true; +} + + +void Log::OpenTemporaryFile() { + ASSERT(!IsEnabled()); + output_handle_ = i::OS::OpenTemporaryFile(); } @@ -244,7 +160,6 @@ static const int kLowLevelLogBufferSize = 2 * MB; void Log::OpenFile(const char* name) { ASSERT(!IsEnabled()); output_handle_ = OS::FOpen(name, OS::LogFileOpenMode); - write_to_file_ = true; if (FLAG_ll_prof) { // Open the low-level log file. size_t len = strlen(name); @@ -257,25 +172,18 @@ void Log::OpenFile(const char* name) { } -void Log::OpenMemoryBuffer() { - ASSERT(!IsEnabled()); - output_buffer_ = new LogDynamicBuffer( - kDynamicBufferBlockSize, kMaxDynamicBufferSize, - kDynamicBufferSeal, StrLength(kDynamicBufferSeal)); - write_to_file_ = false; -} - - -void Log::Close() { - if (write_to_file_) { - if (output_handle_ != NULL) fclose(output_handle_); - output_handle_ = NULL; - if (ll_output_handle_ != NULL) fclose(ll_output_handle_); - ll_output_handle_ = NULL; - } else { - delete output_buffer_; - output_buffer_ = NULL; +FILE* Log::Close() { + FILE* result = NULL; + if (output_handle_ != NULL) { + if (strcmp(FLAG_logfile, kLogToTemporaryFile) != 0) { + fclose(output_handle_); + } else { + result = output_handle_; + } } + output_handle_ = NULL; + if (ll_output_handle_ != NULL) fclose(ll_output_handle_); + ll_output_handle_ = NULL; DeleteArray(message_buffer_); message_buffer_ = NULL; @@ -284,27 +192,7 @@ void Log::Close() { mutex_ = NULL; is_stopped_ = false; -} - - -int Log::GetLogLines(int from_pos, char* dest_buf, int max_size) { - if (write_to_file_) return 0; - ASSERT(output_buffer_ != NULL); - ASSERT(from_pos >= 0); - ASSERT(max_size >= 0); - int actual_size = output_buffer_->Read(from_pos, dest_buf, max_size); - ASSERT(actual_size <= max_size); - if (actual_size == 0) return 0; - - // Find previous log line boundary. - char* end_pos = dest_buf + actual_size - 1; - while (end_pos >= dest_buf && *end_pos != '\n') --end_pos; - actual_size = static_cast<int>(end_pos - dest_buf + 1); - // If the assertion below is hit, it means that there was no line end - // found --- something wrong has happened. - ASSERT(actual_size > 0); - ASSERT(actual_size <= max_size); - return actual_size; + return result; } @@ -413,9 +301,7 @@ void LogMessageBuilder::AppendStringPart(const char* str, int len) { void LogMessageBuilder::WriteToLogFile() { ASSERT(pos_ <= Log::kMessageBufferSize); - const int written = log_->write_to_file_ ? - log_->WriteToFile(log_->message_buffer_, pos_) : - log_->WriteToMemory(log_->message_buffer_, pos_); + const int written = log_->WriteToFile(log_->message_buffer_, pos_); if (written != pos_) { log_->stop(); log_->logger_->LogFailure(); @@ -423,6 +309,4 @@ void LogMessageBuilder::WriteToLogFile() { } -#endif // ENABLE_LOGGING_AND_PROFILING - } } // namespace v8::internal diff --git a/deps/v8/src/log-utils.h b/deps/v8/src/log-utils.h index 81bbf779f3..d336d714b9 100644 --- a/deps/v8/src/log-utils.h +++ b/deps/v8/src/log-utils.h @@ -33,69 +33,11 @@ namespace v8 { namespace internal { -#ifdef ENABLE_LOGGING_AND_PROFILING - class Logger; -// A memory buffer that increments its size as you write in it. Size -// is incremented with 'block_size' steps, never exceeding 'max_size'. -// During growth, memory contents are never copied. At the end of the -// buffer an amount of memory specified in 'seal_size' is reserved. -// When writing position reaches max_size - seal_size, buffer auto-seals -// itself with 'seal' and allows no further writes. Data pointed by -// 'seal' must be available during entire LogDynamicBuffer lifetime. -// -// An instance of this class is created dynamically by Log. -class LogDynamicBuffer { - public: - LogDynamicBuffer( - int block_size, int max_size, const char* seal, int seal_size); - - ~LogDynamicBuffer(); - - // Reads contents of the buffer starting from 'from_pos'. Upon - // return, 'dest_buf' is filled with the data. Actual amount of data - // filled is returned, it is <= 'buf_size'. - int Read(int from_pos, char* dest_buf, int buf_size); - - // Writes 'data' to the buffer, making it larger if necessary. If - // data is too big to fit in the buffer, it doesn't get written at - // all. In that case, buffer auto-seals itself and stops to accept - // any incoming writes. Returns amount of data written (it is either - // 'data_size', or 0, if 'data' is too big). - int Write(const char* data, int data_size); - - private: - void AllocateBlock(int index) { - blocks_[index] = NewArray<char>(block_size_); - } - - int BlockIndex(int pos) const { return pos / block_size_; } - - int BlocksCount() const { return BlockIndex(max_size_) + 1; } - - int PosInBlock(int pos) const { return pos % block_size_; } - - int Seal(); - - int WriteInternal(const char* data, int data_size); - - const int block_size_; - const int max_size_; - const char* seal_; - const int seal_size_; - ScopedVector<char*> blocks_; - int write_pos_; - int block_index_; - int block_write_pos_; - bool is_sealed_; -}; - - // Functions and data for performing output of log messages. class Log { public: - // Performs process-wide initialization. void Initialize(); @@ -103,18 +45,21 @@ class Log { void stop() { is_stopped_ = true; } // Frees all resources acquired in Initialize and Open... functions. - void Close(); - - // See description in include/v8.h. - int GetLogLines(int from_pos, char* dest_buf, int max_size); + // When a temporary file is used for the log, returns its stream descriptor, + // leaving the file open. + FILE* Close(); // Returns whether logging is enabled. bool IsEnabled() { - return !is_stopped_ && (output_handle_ != NULL || output_buffer_ != NULL); + return !is_stopped_ && output_handle_ != NULL; } // Size of buffer used for formatting log messages. - static const int kMessageBufferSize = v8::V8::kMinimumSizeForLogLinesBuffer; + static const int kMessageBufferSize = 2048; + + // This mode is only used in tests, as temporary files are automatically + // deleted on close and thus can't be accessed afterwards. + static const char* kLogToTemporaryFile; private: explicit Log(Logger* logger); @@ -125,8 +70,8 @@ class Log { // Opens file for logging. void OpenFile(const char* name); - // Opens memory buffer for logging. - void OpenMemoryBuffer(); + // Opens a temporary file for logging. + void OpenTemporaryFile(); // Implementation of writing to a log file. int WriteToFile(const char* msg, int length) { @@ -138,38 +83,16 @@ class Log { return length; } - // Implementation of writing to a memory buffer. - int WriteToMemory(const char* msg, int length) { - ASSERT(output_buffer_ != NULL); - return output_buffer_->Write(msg, length); - } - - bool write_to_file_; - // Whether logging is stopped (e.g. due to insufficient resources). bool is_stopped_; - // When logging is active, either output_handle_ or output_buffer_ is used - // to store a pointer to log destination. If logging was opened via OpenStdout - // or OpenFile, then output_handle_ is used. If logging was opened - // via OpenMemoryBuffer, then output_buffer_ is used. - // mutex_ should be acquired before using output_handle_ or output_buffer_. + // When logging is active output_handle_ is used to store a pointer to log + // destination. mutex_ should be acquired before using output_handle_. FILE* output_handle_; // Used when low-level profiling is active. FILE* ll_output_handle_; - LogDynamicBuffer* output_buffer_; - - // Size of dynamic buffer block (and dynamic buffer initial size). - static const int kDynamicBufferBlockSize = 65536; - - // Maximum size of dynamic buffer. - static const int kMaxDynamicBufferSize = 50 * 1024 * 1024; - - // Message to "seal" dynamic buffer with. - static const char* const kDynamicBufferSeal; - // mutex_ is a Mutex used for enforcing exclusive // access to the formatting buffer and the log file or log memory buffer. Mutex* mutex_; @@ -224,8 +147,6 @@ class LogMessageBuilder BASE_EMBEDDED { int pos_; }; -#endif // ENABLE_LOGGING_AND_PROFILING - } } // namespace v8::internal #endif // V8_LOG_UTILS_H_ diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc index 004e21a650..04fd22ef5c 100644 --- a/deps/v8/src/log.cc +++ b/deps/v8/src/log.cc @@ -43,8 +43,6 @@ namespace v8 { namespace internal { -#ifdef ENABLE_LOGGING_AND_PROFILING - // // Sliding state window. Updates counters to keep track of the last // window of kBufferSize states. This is useful to track where we @@ -554,71 +552,54 @@ void Logger::ProfilerBeginEvent() { msg.WriteToLogFile(); } -#endif // ENABLE_LOGGING_AND_PROFILING - void Logger::StringEvent(const char* name, const char* value) { -#ifdef ENABLE_LOGGING_AND_PROFILING if (FLAG_log) UncheckedStringEvent(name, value); -#endif } -#ifdef ENABLE_LOGGING_AND_PROFILING void Logger::UncheckedStringEvent(const char* name, const char* value) { if (!log_->IsEnabled()) return; LogMessageBuilder msg(this); msg.Append("%s,\"%s\"\n", name, value); msg.WriteToLogFile(); } -#endif void Logger::IntEvent(const char* name, int value) { -#ifdef ENABLE_LOGGING_AND_PROFILING if (FLAG_log) UncheckedIntEvent(name, value); -#endif } void Logger::IntPtrTEvent(const char* name, intptr_t value) { -#ifdef ENABLE_LOGGING_AND_PROFILING if (FLAG_log) UncheckedIntPtrTEvent(name, value); -#endif } -#ifdef ENABLE_LOGGING_AND_PROFILING void Logger::UncheckedIntEvent(const char* name, int value) { if (!log_->IsEnabled()) return; LogMessageBuilder msg(this); msg.Append("%s,%d\n", name, value); msg.WriteToLogFile(); } -#endif -#ifdef ENABLE_LOGGING_AND_PROFILING void Logger::UncheckedIntPtrTEvent(const char* name, intptr_t value) { if (!log_->IsEnabled()) return; LogMessageBuilder msg(this); msg.Append("%s,%" V8_PTR_PREFIX "d\n", name, value); msg.WriteToLogFile(); } -#endif void Logger::HandleEvent(const char* name, Object** location) { -#ifdef ENABLE_LOGGING_AND_PROFILING if (!log_->IsEnabled() || !FLAG_log_handles) return; LogMessageBuilder msg(this); msg.Append("%s,0x%" V8PRIxPTR "\n", name, location); msg.WriteToLogFile(); -#endif } -#ifdef ENABLE_LOGGING_AND_PROFILING // ApiEvent is private so all the calls come from the Logger class. It is the // caller's responsibility to ensure that log is enabled and that // FLAG_log_api is true. @@ -631,11 +612,9 @@ void Logger::ApiEvent(const char* format, ...) { va_end(ap); msg.WriteToLogFile(); } -#endif void Logger::ApiNamedSecurityCheck(Object* key) { -#ifdef ENABLE_LOGGING_AND_PROFILING if (!log_->IsEnabled() || !FLAG_log_api) return; if (key->IsString()) { SmartPointer<char> str = @@ -646,14 +625,12 @@ void Logger::ApiNamedSecurityCheck(Object* key) { } else { ApiEvent("api,check-security,['no-name']\n"); } -#endif } void Logger::SharedLibraryEvent(const char* library_path, uintptr_t start, uintptr_t end) { -#ifdef ENABLE_LOGGING_AND_PROFILING if (!log_->IsEnabled() || !FLAG_prof) return; LogMessageBuilder msg(this); msg.Append("shared-library,\"%s\",0x%08" V8PRIxPTR ",0x%08" V8PRIxPTR "\n", @@ -661,14 +638,12 @@ void Logger::SharedLibraryEvent(const char* library_path, start, end); msg.WriteToLogFile(); -#endif } void Logger::SharedLibraryEvent(const wchar_t* library_path, uintptr_t start, uintptr_t end) { -#ifdef ENABLE_LOGGING_AND_PROFILING if (!log_->IsEnabled() || !FLAG_prof) return; LogMessageBuilder msg(this); msg.Append("shared-library,\"%ls\",0x%08" V8PRIxPTR ",0x%08" V8PRIxPTR "\n", @@ -676,11 +651,9 @@ void Logger::SharedLibraryEvent(const wchar_t* library_path, start, end); msg.WriteToLogFile(); -#endif } -#ifdef ENABLE_LOGGING_AND_PROFILING void Logger::LogRegExpSource(Handle<JSRegExp> regexp) { // Prints "/" + re.source + "/" + // (re.global?"g":"") + (re.ignorecase?"i":"") + (re.multiline?"m":"") @@ -721,23 +694,19 @@ void Logger::LogRegExpSource(Handle<JSRegExp> regexp) { msg.WriteToLogFile(); } -#endif // ENABLE_LOGGING_AND_PROFILING void Logger::RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache) { -#ifdef ENABLE_LOGGING_AND_PROFILING if (!log_->IsEnabled() || !FLAG_log_regexp) return; LogMessageBuilder msg(this); msg.Append("regexp-compile,"); LogRegExpSource(regexp); msg.Append(in_cache ? ",hit\n" : ",miss\n"); msg.WriteToLogFile(); -#endif } void Logger::LogRuntime(Vector<const char> format, JSArray* args) { -#ifdef ENABLE_LOGGING_AND_PROFILING if (!log_->IsEnabled() || !FLAG_log_runtime) return; HandleScope scope; LogMessageBuilder msg(this); @@ -778,22 +747,18 @@ void Logger::LogRuntime(Vector<const char> format, JSArray* args) { } msg.Append('\n'); msg.WriteToLogFile(); -#endif } void Logger::ApiIndexedSecurityCheck(uint32_t index) { -#ifdef ENABLE_LOGGING_AND_PROFILING if (!log_->IsEnabled() || !FLAG_log_api) return; ApiEvent("api,check-security,%u\n", index); -#endif } void Logger::ApiNamedPropertyAccess(const char* tag, JSObject* holder, Object* name) { -#ifdef ENABLE_LOGGING_AND_PROFILING ASSERT(name->IsString()); if (!log_->IsEnabled() || !FLAG_log_api) return; String* class_name_obj = holder->class_name(); @@ -802,58 +767,47 @@ void Logger::ApiNamedPropertyAccess(const char* tag, SmartPointer<char> property_name = String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); ApiEvent("api,%s,\"%s\",\"%s\"\n", tag, *class_name, *property_name); -#endif } void Logger::ApiIndexedPropertyAccess(const char* tag, JSObject* holder, uint32_t index) { -#ifdef ENABLE_LOGGING_AND_PROFILING if (!log_->IsEnabled() || !FLAG_log_api) return; String* class_name_obj = holder->class_name(); SmartPointer<char> class_name = class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); ApiEvent("api,%s,\"%s\",%u\n", tag, *class_name, index); -#endif } void Logger::ApiObjectAccess(const char* tag, JSObject* object) { -#ifdef ENABLE_LOGGING_AND_PROFILING if (!log_->IsEnabled() || !FLAG_log_api) return; String* class_name_obj = object->class_name(); SmartPointer<char> class_name = class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); ApiEvent("api,%s,\"%s\"\n", tag, *class_name); -#endif } void Logger::ApiEntryCall(const char* name) { -#ifdef ENABLE_LOGGING_AND_PROFILING if (!log_->IsEnabled() || !FLAG_log_api) return; ApiEvent("api,%s\n", name); -#endif } void Logger::NewEvent(const char* name, void* object, size_t size) { -#ifdef ENABLE_LOGGING_AND_PROFILING if (!log_->IsEnabled() || !FLAG_log) return; LogMessageBuilder msg(this); msg.Append("new,%s,0x%" V8PRIxPTR ",%u\n", name, object, static_cast<unsigned int>(size)); msg.WriteToLogFile(); -#endif } void Logger::DeleteEvent(const char* name, void* object) { -#ifdef ENABLE_LOGGING_AND_PROFILING if (!log_->IsEnabled() || !FLAG_log) return; LogMessageBuilder msg(this); msg.Append("delete,%s,0x%" V8PRIxPTR "\n", name, object); msg.WriteToLogFile(); -#endif } @@ -866,7 +820,6 @@ void Logger::DeleteEventStatic(const char* name, void* object) { LOGGER->DeleteEvent(name, object); } -#ifdef ENABLE_LOGGING_AND_PROFILING void Logger::CallbackEventInternal(const char* prefix, const char* name, Address entry_point) { if (!log_->IsEnabled() || !FLAG_log_code) return; @@ -879,43 +832,35 @@ void Logger::CallbackEventInternal(const char* prefix, const char* name, msg.Append('\n'); msg.WriteToLogFile(); } -#endif void Logger::CallbackEvent(String* name, Address entry_point) { -#ifdef ENABLE_LOGGING_AND_PROFILING if (!log_->IsEnabled() || !FLAG_log_code) return; SmartPointer<char> str = name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); CallbackEventInternal("", *str, entry_point); -#endif } void Logger::GetterCallbackEvent(String* name, Address entry_point) { -#ifdef ENABLE_LOGGING_AND_PROFILING if (!log_->IsEnabled() || !FLAG_log_code) return; SmartPointer<char> str = name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); CallbackEventInternal("get ", *str, entry_point); -#endif } void Logger::SetterCallbackEvent(String* name, Address entry_point) { -#ifdef ENABLE_LOGGING_AND_PROFILING if (!log_->IsEnabled() || !FLAG_log_code) return; SmartPointer<char> str = name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); CallbackEventInternal("set ", *str, entry_point); -#endif } void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, const char* comment) { -#ifdef ENABLE_LOGGING_AND_PROFILING if (!log_->IsEnabled()) return; if (FLAG_ll_prof || Serializer::enabled()) { name_buffer_->Reset(); @@ -945,14 +890,12 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, msg.Append('"'); msg.Append('\n'); msg.WriteToLogFile(); -#endif } void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, String* name) { -#ifdef ENABLE_LOGGING_AND_PROFILING if (!log_->IsEnabled()) return; if (FLAG_ll_prof || Serializer::enabled()) { name_buffer_->Reset(); @@ -977,11 +920,9 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, msg.Append('"'); msg.Append('\n'); msg.WriteToLogFile(); -#endif } -#ifdef ENABLE_LOGGING_AND_PROFILING // ComputeMarker must only be used when SharedFunctionInfo is known. static const char* ComputeMarker(Code* code) { switch (code->kind()) { @@ -990,14 +931,12 @@ static const char* ComputeMarker(Code* code) { default: return ""; } } -#endif void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, SharedFunctionInfo* shared, String* name) { -#ifdef ENABLE_LOGGING_AND_PROFILING if (!log_->IsEnabled()) return; if (FLAG_ll_prof || Serializer::enabled()) { name_buffer_->Reset(); @@ -1029,7 +968,6 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, msg.Append(",%s", ComputeMarker(code)); msg.Append('\n'); msg.WriteToLogFile(); -#endif } @@ -1040,7 +978,6 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, SharedFunctionInfo* shared, String* source, int line) { -#ifdef ENABLE_LOGGING_AND_PROFILING if (!log_->IsEnabled()) return; if (FLAG_ll_prof || Serializer::enabled()) { name_buffer_->Reset(); @@ -1078,12 +1015,10 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, msg.Append(",%s", ComputeMarker(code)); msg.Append('\n'); msg.WriteToLogFile(); -#endif } void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count) { -#ifdef ENABLE_LOGGING_AND_PROFILING if (!log_->IsEnabled()) return; if (FLAG_ll_prof || Serializer::enabled()) { name_buffer_->Reset(); @@ -1106,21 +1041,17 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count) { msg.Append(",%d,\"args_count: %d\"", code->ExecutableSize(), args_count); msg.Append('\n'); msg.WriteToLogFile(); -#endif } void Logger::CodeMovingGCEvent() { -#ifdef ENABLE_LOGGING_AND_PROFILING if (!log_->IsEnabled() || !FLAG_ll_prof) return; LowLevelLogWriteBytes(&kCodeMovingGCTag, sizeof(kCodeMovingGCTag)); OS::SignalCodeMovingGC(); -#endif } void Logger::RegExpCodeCreateEvent(Code* code, String* source) { -#ifdef ENABLE_LOGGING_AND_PROFILING if (!log_->IsEnabled()) return; if (FLAG_ll_prof || Serializer::enabled()) { name_buffer_->Reset(); @@ -1145,36 +1076,30 @@ void Logger::RegExpCodeCreateEvent(Code* code, String* source) { msg.Append('\"'); msg.Append('\n'); msg.WriteToLogFile(); -#endif } void Logger::CodeMoveEvent(Address from, Address to) { -#ifdef ENABLE_LOGGING_AND_PROFILING if (!log_->IsEnabled()) return; if (FLAG_ll_prof) LowLevelCodeMoveEvent(from, to); if (Serializer::enabled() && address_to_name_map_ != NULL) { address_to_name_map_->Move(from, to); } MoveEventInternal(CODE_MOVE_EVENT, from, to); -#endif } void Logger::CodeDeleteEvent(Address from) { -#ifdef ENABLE_LOGGING_AND_PROFILING if (!log_->IsEnabled()) return; if (FLAG_ll_prof) LowLevelCodeDeleteEvent(from); if (Serializer::enabled() && address_to_name_map_ != NULL) { address_to_name_map_->Remove(from); } DeleteEventInternal(CODE_DELETE_EVENT, from); -#endif } void Logger::SnapshotPositionEvent(Address addr, int pos) { -#ifdef ENABLE_LOGGING_AND_PROFILING if (!log_->IsEnabled()) return; if (FLAG_ll_prof) LowLevelSnapshotPositionEvent(addr, pos); if (Serializer::enabled() && address_to_name_map_ != NULL) { @@ -1196,18 +1121,14 @@ void Logger::SnapshotPositionEvent(Address addr, int pos) { msg.Append(",%d", pos); msg.Append('\n'); msg.WriteToLogFile(); -#endif } void Logger::SharedFunctionInfoMoveEvent(Address from, Address to) { -#ifdef ENABLE_LOGGING_AND_PROFILING MoveEventInternal(SHARED_FUNC_MOVE_EVENT, from, to); -#endif } -#ifdef ENABLE_LOGGING_AND_PROFILING void Logger::MoveEventInternal(LogEventsAndTags event, Address from, Address to) { @@ -1220,10 +1141,8 @@ void Logger::MoveEventInternal(LogEventsAndTags event, msg.Append('\n'); msg.WriteToLogFile(); } -#endif -#ifdef ENABLE_LOGGING_AND_PROFILING void Logger::DeleteEventInternal(LogEventsAndTags event, Address from) { if (!log_->IsEnabled() || !FLAG_log_code) return; LogMessageBuilder msg(this); @@ -1232,11 +1151,9 @@ void Logger::DeleteEventInternal(LogEventsAndTags event, Address from) { msg.Append('\n'); msg.WriteToLogFile(); } -#endif void Logger::ResourceEvent(const char* name, const char* tag) { -#ifdef ENABLE_LOGGING_AND_PROFILING if (!log_->IsEnabled() || !FLAG_log) return; LogMessageBuilder msg(this); msg.Append("%s,%s,", name, tag); @@ -1249,12 +1166,10 @@ void Logger::ResourceEvent(const char* name, const char* tag) { msg.Append('\n'); msg.WriteToLogFile(); -#endif } void Logger::SuspectReadEvent(String* name, Object* obj) { -#ifdef ENABLE_LOGGING_AND_PROFILING if (!log_->IsEnabled() || !FLAG_log_suspect) return; LogMessageBuilder msg(this); String* class_name = obj->IsJSObject() @@ -1268,12 +1183,10 @@ void Logger::SuspectReadEvent(String* name, Object* obj) { msg.Append('"'); msg.Append('\n'); msg.WriteToLogFile(); -#endif } void Logger::HeapSampleBeginEvent(const char* space, const char* kind) { -#ifdef ENABLE_LOGGING_AND_PROFILING if (!log_->IsEnabled() || !FLAG_log_gc) return; LogMessageBuilder msg(this); // Using non-relative system time in order to be able to synchronize with @@ -1281,42 +1194,34 @@ void Logger::HeapSampleBeginEvent(const char* space, const char* kind) { msg.Append("heap-sample-begin,\"%s\",\"%s\",%.0f\n", space, kind, OS::TimeCurrentMillis()); msg.WriteToLogFile(); -#endif } void Logger::HeapSampleEndEvent(const char* space, const char* kind) { -#ifdef ENABLE_LOGGING_AND_PROFILING if (!log_->IsEnabled() || !FLAG_log_gc) return; LogMessageBuilder msg(this); msg.Append("heap-sample-end,\"%s\",\"%s\"\n", space, kind); msg.WriteToLogFile(); -#endif } void Logger::HeapSampleItemEvent(const char* type, int number, int bytes) { -#ifdef ENABLE_LOGGING_AND_PROFILING if (!log_->IsEnabled() || !FLAG_log_gc) return; LogMessageBuilder msg(this); msg.Append("heap-sample-item,%s,%d,%d\n", type, number, bytes); msg.WriteToLogFile(); -#endif } void Logger::DebugTag(const char* call_site_tag) { -#ifdef ENABLE_LOGGING_AND_PROFILING if (!log_->IsEnabled() || !FLAG_log) return; LogMessageBuilder msg(this); msg.Append("debug-tag,%s\n", call_site_tag); msg.WriteToLogFile(); -#endif } void Logger::DebugEvent(const char* event_type, Vector<uint16_t> parameter) { -#ifdef ENABLE_LOGGING_AND_PROFILING if (!log_->IsEnabled() || !FLAG_log) return; StringBuilder s(parameter.length() + 1); for (int i = 0; i < parameter.length(); ++i) { @@ -1330,11 +1235,9 @@ void Logger::DebugEvent(const char* event_type, Vector<uint16_t> parameter) { parameter_string); DeleteArray(parameter_string); msg.WriteToLogFile(); -#endif } -#ifdef ENABLE_LOGGING_AND_PROFILING void Logger::TickEvent(TickSample* sample, bool overflow) { if (!log_->IsEnabled() || !FLAG_prof) return; LogMessageBuilder msg(this); @@ -1378,7 +1281,6 @@ void Logger::PauseProfiler() { ticker_->Stop(); } FLAG_log_code = false; - // Must be the same message as Log::kDynamicBufferSeal. LOG(ISOLATE, UncheckedStringEvent("profiler", "pause")); } --logging_nesting_; @@ -1420,11 +1322,6 @@ bool Logger::IsProfilerSamplerActive() { } -int Logger::GetLogLines(int from_pos, char* dest_buf, int max_size) { - return log_->GetLogLines(from_pos, dest_buf, max_size); -} - - class EnumerateOptimizedFunctionsVisitor: public OptimizedFunctionVisitor { public: EnumerateOptimizedFunctionsVisitor(Handle<SharedFunctionInfo>* sfis, @@ -1545,7 +1442,6 @@ void Logger::LogCodeObject(Object* object) { void Logger::LogCodeInfo() { -#ifdef ENABLE_LOGGING_AND_PROFILING if (!log_->IsEnabled() || !FLAG_ll_prof) return; #if V8_TARGET_ARCH_IA32 const char arch[] = "ia32"; @@ -1557,7 +1453,6 @@ void Logger::LogCodeInfo() { const char arch[] = "unknown"; #endif LowLevelLogWriteBytes(arch, sizeof(arch)); -#endif // ENABLE_LOGGING_AND_PROFILING } @@ -1710,11 +1605,8 @@ void Logger::LogAccessorCallbacks() { } } -#endif - bool Logger::Setup() { -#ifdef ENABLE_LOGGING_AND_PROFILING // Tests and EnsureInitialize() can call this twice in a row. It's harmless. if (is_initialized_) return true; is_initialized_ = true; @@ -1766,40 +1658,27 @@ bool Logger::Setup() { } return true; - -#else - return false; -#endif } Sampler* Logger::sampler() { -#ifdef ENABLE_LOGGING_AND_PROFILING return ticker_; -#else - return NULL; -#endif } void Logger::EnsureTickerStarted() { -#ifdef ENABLE_LOGGING_AND_PROFILING ASSERT(ticker_ != NULL); if (!ticker_->IsActive()) ticker_->Start(); -#endif } void Logger::EnsureTickerStopped() { -#ifdef ENABLE_LOGGING_AND_PROFILING if (ticker_ != NULL && ticker_->IsActive()) ticker_->Stop(); -#endif } -void Logger::TearDown() { -#ifdef ENABLE_LOGGING_AND_PROFILING - if (!is_initialized_) return; +FILE* Logger::TearDown() { + if (!is_initialized_) return NULL; is_initialized_ = false; // Stop the profiler before closing the file. @@ -1815,13 +1694,11 @@ void Logger::TearDown() { delete ticker_; ticker_ = NULL; - log_->Close(); -#endif + return log_->Close(); } void Logger::EnableSlidingStateWindow() { -#ifdef ENABLE_LOGGING_AND_PROFILING // If the ticker is NULL, Logger::Setup has not been called yet. In // that case, we set the sliding_state_window flag so that the // sliding window computation will be started when Logger::Setup is @@ -1835,7 +1712,6 @@ void Logger::EnableSlidingStateWindow() { if (sliding_state_window_ == NULL) { sliding_state_window_ = new SlidingStateWindow(Isolate::Current()); } -#endif } @@ -1855,10 +1731,8 @@ bool SamplerRegistry::IterateActiveSamplers(VisitSampler func, void* param) { static void ComputeCpuProfiling(Sampler* sampler, void* flag_ptr) { -#ifdef ENABLE_LOGGING_AND_PROFILING bool* flag = reinterpret_cast<bool*>(flag_ptr); *flag |= sampler->IsProfiling(); -#endif } diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h index 6ffd18c61b..02250595f8 100644 --- a/deps/v8/src/log.h +++ b/deps/v8/src/log.h @@ -78,7 +78,6 @@ class SlidingStateWindow; class Ticker; #undef LOG -#ifdef ENABLE_LOGGING_AND_PROFILING #define LOG(isolate, Call) \ do { \ v8::internal::Logger* logger = \ @@ -86,9 +85,6 @@ class Ticker; if (logger->is_logging()) \ logger->Call; \ } while (false) -#else -#define LOG(isolate, Call) ((void) 0) -#endif #define LOG_EVENTS_AND_TAGS_LIST(V) \ V(CODE_CREATION_EVENT, "code-creation") \ @@ -161,7 +157,9 @@ class Logger { Sampler* sampler(); // Frees resources acquired in Setup. - void TearDown(); + // When a temporary file is used for the log, returns its stream descriptor, + // leaving the file open. + FILE* TearDown(); // Enable the computation of a sliding window of states. void EnableSlidingStateWindow(); @@ -272,7 +270,6 @@ class Logger { // Log an event reported from generated code void LogRuntime(Vector<const char> format, JSArray* args); -#ifdef ENABLE_LOGGING_AND_PROFILING bool is_logging() { return logging_nesting_ > 0; } @@ -284,10 +281,6 @@ class Logger { void ResumeProfiler(); bool IsProfilerPaused(); - // If logging is performed into a memory buffer, allows to - // retrieve previously written messages. See v8.h. - int GetLogLines(int from_pos, char* dest_buf, int max_size); - // Logs all compiled functions found in the heap. void LogCompiledFunctions(); // Logs all accessor callbacks found in the heap. @@ -424,9 +417,6 @@ class Logger { Address prev_code_; friend class CpuProfiler; -#else - bool is_logging() { return false; } -#endif }; diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc index 5792f6c403..0bf82863d7 100644 --- a/deps/v8/src/mark-compact.cc +++ b/deps/v8/src/mark-compact.cc @@ -1661,31 +1661,34 @@ void MarkCompactCollector::ClearNonLiveTransitions() { // Clear dead prototype transitions. int number_of_transitions = map->NumberOfProtoTransitions(); - FixedArray* prototype_transitions = map->unchecked_prototype_transitions(); - int new_number_of_transitions = 0; - const int header = Map::kProtoTransitionHeaderSize; - const int proto_offset = - header + Map::kProtoTransitionPrototypeOffset; - const int map_offset = header + Map::kProtoTransitionMapOffset; - const int step = Map::kProtoTransitionElementsPerEntry; - for (int i = 0; i < number_of_transitions; i++) { - Object* prototype = prototype_transitions->get(proto_offset + i * step); - Object* cached_map = prototype_transitions->get(map_offset + i * step); - if (HeapObject::cast(prototype)->IsMarked() && - HeapObject::cast(cached_map)->IsMarked()) { - if (new_number_of_transitions != i) { - prototype_transitions->set_unchecked( - heap_, - proto_offset + new_number_of_transitions * step, - prototype, - UPDATE_WRITE_BARRIER); - prototype_transitions->set_unchecked( - heap_, - map_offset + new_number_of_transitions * step, - cached_map, - SKIP_WRITE_BARRIER); + if (number_of_transitions > 0) { + FixedArray* prototype_transitions = + map->unchecked_prototype_transitions(); + int new_number_of_transitions = 0; + const int header = Map::kProtoTransitionHeaderSize; + const int proto_offset = + header + Map::kProtoTransitionPrototypeOffset; + const int map_offset = header + Map::kProtoTransitionMapOffset; + const int step = Map::kProtoTransitionElementsPerEntry; + for (int i = 0; i < number_of_transitions; i++) { + Object* prototype = prototype_transitions->get(proto_offset + i * step); + Object* cached_map = prototype_transitions->get(map_offset + i * step); + if (HeapObject::cast(prototype)->IsMarked() && + HeapObject::cast(cached_map)->IsMarked()) { + if (new_number_of_transitions != i) { + prototype_transitions->set_unchecked( + heap_, + proto_offset + new_number_of_transitions * step, + prototype, + UPDATE_WRITE_BARRIER); + prototype_transitions->set_unchecked( + heap_, + map_offset + new_number_of_transitions * step, + cached_map, + SKIP_WRITE_BARRIER); + } + new_number_of_transitions++; } - new_number_of_transitions++; } // Fill slots that became free with undefined value. @@ -3255,11 +3258,9 @@ void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj, GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj)); } #endif -#ifdef ENABLE_LOGGING_AND_PROFILING if (obj->IsCode()) { PROFILE(isolate, CodeDeleteEvent(obj->address())); } -#endif } diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js index 841c5180a3..b9281070a7 100644 --- a/deps/v8/src/messages.js +++ b/deps/v8/src/messages.js @@ -195,6 +195,7 @@ function FormatMessage(message) { non_extensible_proto: ["%0", " is not extensible"], handler_non_object: ["Proxy.", "%0", " called with non-object as handler"], handler_trap_missing: ["Proxy handler ", "%0", " has no '", "%1", "' trap"], + handler_failed: ["Proxy handler ", "%0", " returned false for '", "%1", "' trap"], proxy_prop_not_configurable: ["Trap ", "%1", " of proxy handler ", "%0", " returned non-configurable descriptor for property ", "%2"], proxy_non_object_prop_names: ["Trap ", "%1", " returned non-object ", "%0"], proxy_repeated_prop_name: ["Trap ", "%1", " returned repeated property name ", "%2"], diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h index 92c958b962..f3730d6f31 100644 --- a/deps/v8/src/mips/assembler-mips.h +++ b/deps/v8/src/mips/assembler-mips.h @@ -779,8 +779,13 @@ class Assembler : public AssemblerBase { void fcmp(FPURegister src1, const double src2, FPUCondition cond); // Check the code size generated from label to here. - int InstructionsGeneratedSince(Label* l) { - return (pc_offset() - l->pos()) / kInstrSize; + int SizeOfCodeGeneratedSince(Label* label) { + return pc_offset() - label->pos(); + } + + // Check the number of instructions generated from label to here. + int InstructionsGeneratedSince(Label* label) { + return SizeOfCodeGeneratedSince(label) / kInstrSize; } // Class for scoping postponing the trampoline pool generation. diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc index 4bb1d8cba7..1555653f0a 100644 --- a/deps/v8/src/mips/builtins-mips.cc +++ b/deps/v8/src/mips/builtins-mips.cc @@ -634,7 +634,7 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) { __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kConstructStubOffset)); __ Addu(t9, a2, Operand(Code::kHeaderSize - kHeapObjectTag)); - __ Jump(Operand(t9)); + __ Jump(t9); // a0: number of arguments // a1: called object @@ -1075,8 +1075,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, // Invoke the code and pass argc as a0. __ mov(a0, a3); if (is_construct) { - __ Call(masm->isolate()->builtins()->JSConstructCall(), - RelocInfo::CODE_TARGET); + __ Call(masm->isolate()->builtins()->JSConstructCall()); } else { ParameterCount actual(a0); __ InvokeFunction(a1, actual, CALL_FUNCTION, diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc index d7fac867f3..d03443f272 100644 --- a/deps/v8/src/mips/code-stubs-mips.cc +++ b/deps/v8/src/mips/code-stubs-mips.cc @@ -305,12 +305,6 @@ class ConvertToDoubleStub : public CodeStub { } void Generate(MacroAssembler* masm); - - const char* GetName() { return "ConvertToDoubleStub"; } - -#ifdef DEBUG - void Print() { PrintF("ConvertToDoubleStub\n"); } -#endif }; @@ -396,11 +390,11 @@ void FloatingPointHelper::LoadSmis(MacroAssembler* masm, __ mov(scratch1, a0); ConvertToDoubleStub stub1(a3, a2, scratch1, scratch2); __ push(ra); - __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); + __ Call(stub1.GetCode()); // Write Smi from a1 to a1 and a0 in double format. __ mov(scratch1, a1); ConvertToDoubleStub stub2(a1, a0, scratch1, scratch2); - __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); + __ Call(stub2.GetCode()); __ pop(ra); } } @@ -482,7 +476,7 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm, __ mov(scratch1, object); ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2); __ push(ra); - __ Call(stub.GetCode(), RelocInfo::CODE_TARGET); + __ Call(stub.GetCode()); __ pop(ra); } @@ -1107,7 +1101,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, __ mov(t6, rhs); ConvertToDoubleStub stub1(a1, a0, t6, t5); __ push(ra); - __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); + __ Call(stub1.GetCode()); __ pop(ra); } @@ -1142,7 +1136,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, __ mov(t6, lhs); ConvertToDoubleStub stub2(a3, a2, t6, t5); __ push(ra); - __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); + __ Call(stub2.GetCode()); __ pop(ra); // Load rhs to a double in a1, a0. if (rhs.is(a0)) { @@ -1803,25 +1797,17 @@ void ToBooleanStub::Generate(MacroAssembler* masm) { } -const char* UnaryOpStub::GetName() { - if (name_ != NULL) return name_; - const int kMaxNameLength = 100; - name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray( - kMaxNameLength); - if (name_ == NULL) return "OOM"; +void UnaryOpStub::PrintName(StringStream* stream) { const char* op_name = Token::Name(op_); const char* overwrite_name = NULL; // Make g++ happy. switch (mode_) { case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break; case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break; } - - OS::SNPrintF(Vector<char>(name_, kMaxNameLength), - "UnaryOpStub_%s_%s_%s", - op_name, - overwrite_name, - UnaryOpIC::GetName(operand_type_)); - return name_; + stream->Add("UnaryOpStub_%s_%s_%s", + op_name, + overwrite_name, + UnaryOpIC::GetName(operand_type_)); } @@ -2160,12 +2146,7 @@ void BinaryOpStub::Generate(MacroAssembler* masm) { } -const char* BinaryOpStub::GetName() { - if (name_ != NULL) return name_; - const int kMaxNameLength = 100; - name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray( - kMaxNameLength); - if (name_ == NULL) return "OOM"; +void BinaryOpStub::PrintName(StringStream* stream) { const char* op_name = Token::Name(op_); const char* overwrite_name; switch (mode_) { @@ -2174,13 +2155,10 @@ const char* BinaryOpStub::GetName() { case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; default: overwrite_name = "UnknownOverwrite"; break; } - - OS::SNPrintF(Vector<char>(name_, kMaxNameLength), - "BinaryOpStub_%s_%s_%s", - op_name, - overwrite_name, - BinaryOpIC::GetName(operands_type_)); - return name_; + stream->Add("BinaryOpStub_%s_%s_%s", + op_name, + overwrite_name, + BinaryOpIC::GetName(operands_type_)); } @@ -3749,24 +3727,22 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // 4 args slots // args - #ifdef ENABLE_LOGGING_AND_PROFILING - // If this is the outermost JS call, set js_entry_sp value. - Label non_outermost_js; - ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address, - masm->isolate()); - __ li(t1, Operand(ExternalReference(js_entry_sp))); - __ lw(t2, MemOperand(t1)); - __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg)); - __ sw(fp, MemOperand(t1)); - __ li(t0, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); - Label cont; - __ b(&cont); - __ nop(); // Branch delay slot nop. - __ bind(&non_outermost_js); - __ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME))); - __ bind(&cont); - __ push(t0); - #endif + // If this is the outermost JS call, set js_entry_sp value. + Label non_outermost_js; + ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address, + masm->isolate()); + __ li(t1, Operand(ExternalReference(js_entry_sp))); + __ lw(t2, MemOperand(t1)); + __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg)); + __ sw(fp, MemOperand(t1)); + __ li(t0, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); + Label cont; + __ b(&cont); + __ nop(); // Branch delay slot nop. + __ bind(&non_outermost_js); + __ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME))); + __ bind(&cont); + __ push(t0); // Call a faked try-block that does the invoke. __ bal(&invoke); // bal exposes branch delay slot. @@ -3835,16 +3811,14 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { __ PopTryHandler(); __ bind(&exit); // v0 holds result - #ifdef ENABLE_LOGGING_AND_PROFILING - // Check if the current stack frame is marked as the outermost JS frame. - Label non_outermost_js_2; - __ pop(t1); - __ Branch(&non_outermost_js_2, ne, t1, - Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); - __ li(t1, Operand(ExternalReference(js_entry_sp))); - __ sw(zero_reg, MemOperand(t1)); - __ bind(&non_outermost_js_2); - #endif + // Check if the current stack frame is marked as the outermost JS frame. + Label non_outermost_js_2; + __ pop(t1); + __ Branch(&non_outermost_js_2, ne, t1, + Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); + __ li(t1, Operand(ExternalReference(js_entry_sp))); + __ sw(zero_reg, MemOperand(t1)); + __ bind(&non_outermost_js_2); // Restore the top frame descriptors from the stack. __ pop(t1); @@ -4592,10 +4566,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ movz(t9, t0, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset. // Check that the irregexp code has been generated for the actual string - // encoding. If it has, the field contains a code object otherwise it - // contains the hole. - __ GetObjectType(t9, a0, a0); - __ Branch(&runtime, ne, a0, Operand(CODE_TYPE)); + // encoding. If it has, the field contains a code object otherwise it contains + // a smi (code flushing support). + __ JumpIfSmi(t9, &runtime); // a3: encoding of subject string (1 if ASCII, 0 if two_byte); // t9: code @@ -4947,16 +4920,9 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { // Unfortunately you have to run without snapshots to see most of these // names in the profile since most compare stubs end up in the snapshot. -const char* CompareStub::GetName() { +void CompareStub::PrintName(StringStream* stream) { ASSERT((lhs_.is(a0) && rhs_.is(a1)) || (lhs_.is(a1) && rhs_.is(a0))); - - if (name_ != NULL) return name_; - const int kMaxNameLength = 100; - name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray( - kMaxNameLength); - if (name_ == NULL) return "OOM"; - const char* cc_name; switch (cc_) { case lt: cc_name = "LT"; break; @@ -4967,40 +4933,14 @@ const char* CompareStub::GetName() { case ne: cc_name = "NE"; break; default: cc_name = "UnknownCondition"; break; } - - const char* lhs_name = lhs_.is(a0) ? "_a0" : "_a1"; - const char* rhs_name = rhs_.is(a0) ? "_a0" : "_a1"; - - const char* strict_name = ""; - if (strict_ && (cc_ == eq || cc_ == ne)) { - strict_name = "_STRICT"; - } - - const char* never_nan_nan_name = ""; - if (never_nan_nan_ && (cc_ == eq || cc_ == ne)) { - never_nan_nan_name = "_NO_NAN"; - } - - const char* include_number_compare_name = ""; - if (!include_number_compare_) { - include_number_compare_name = "_NO_NUMBER"; - } - - const char* include_smi_compare_name = ""; - if (!include_smi_compare_) { - include_smi_compare_name = "_NO_SMI"; - } - - OS::SNPrintF(Vector<char>(name_, kMaxNameLength), - "CompareStub_%s%s%s%s%s%s", - cc_name, - lhs_name, - rhs_name, - strict_name, - never_nan_nan_name, - include_number_compare_name, - include_smi_compare_name); - return name_; + bool is_equality = cc_ == eq || cc_ == ne; + stream->Add("CompareStub_%s", cc_name); + stream->Add(lhs_.is(a0) ? "_a0" : "_a1"); + stream->Add(rhs_.is(a0) ? "_a0" : "_a1"); + if (strict_ && is_equality) stream->Add("_STRICT"); + if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN"); + if (!include_number_compare_) stream->Add("_NO_NUMBER"); + if (!include_smi_compare_) stream->Add("_NO_SMI"); } diff --git a/deps/v8/src/mips/code-stubs-mips.h b/deps/v8/src/mips/code-stubs-mips.h index 6c70bdd70a..aa224bcfa6 100644 --- a/deps/v8/src/mips/code-stubs-mips.h +++ b/deps/v8/src/mips/code-stubs-mips.h @@ -66,8 +66,7 @@ class UnaryOpStub: public CodeStub { UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED) : op_(op), mode_(mode), - operand_type_(operand_type), - name_(NULL) { + operand_type_(operand_type) { } private: @@ -77,19 +76,7 @@ class UnaryOpStub: public CodeStub { // Operand type information determined at runtime. UnaryOpIC::TypeInfo operand_type_; - char* name_; - - const char* GetName(); - -#ifdef DEBUG - void Print() { - PrintF("UnaryOpStub %d (op %s), (mode %d, runtime_type_info %s)\n", - MinorKey(), - Token::String(op_), - static_cast<int>(mode_), - UnaryOpIC::GetName(operand_type_)); - } -#endif + virtual void PrintName(StringStream* stream); class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {}; class OpBits: public BitField<Token::Value, 1, 7> {}; @@ -143,8 +130,7 @@ class BinaryOpStub: public CodeStub { : op_(op), mode_(mode), operands_type_(BinaryOpIC::UNINITIALIZED), - result_type_(BinaryOpIC::UNINITIALIZED), - name_(NULL) { + result_type_(BinaryOpIC::UNINITIALIZED) { use_fpu_ = CpuFeatures::IsSupported(FPU); ASSERT(OpBits::is_valid(Token::NUM_TOKENS)); } @@ -157,8 +143,7 @@ class BinaryOpStub: public CodeStub { mode_(ModeBits::decode(key)), use_fpu_(FPUBits::decode(key)), operands_type_(operands_type), - result_type_(result_type), - name_(NULL) { } + result_type_(result_type) { } private: enum SmiCodeGenerateHeapNumberResults { @@ -174,20 +159,7 @@ class BinaryOpStub: public CodeStub { BinaryOpIC::TypeInfo operands_type_; BinaryOpIC::TypeInfo result_type_; - char* name_; - - const char* GetName(); - -#ifdef DEBUG - void Print() { - PrintF("BinaryOpStub %d (op %s), " - "(mode %d, runtime_type_info %s)\n", - MinorKey(), - Token::String(op_), - static_cast<int>(mode_), - BinaryOpIC::GetName(operands_type_)); - } -#endif + virtual void PrintName(StringStream* stream); // Minor key encoding in 16 bits RRRTTTVOOOOOOOMM. class ModeBits: public BitField<OverwriteMode, 0, 2> {}; @@ -374,12 +346,6 @@ class WriteInt32ToHeapNumberStub : public CodeStub { } void Generate(MacroAssembler* masm); - - const char* GetName() { return "WriteInt32ToHeapNumberStub"; } - -#ifdef DEBUG - void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); } -#endif }; @@ -406,14 +372,6 @@ class NumberToStringStub: public CodeStub { int MinorKey() { return 0; } void Generate(MacroAssembler* masm); - - const char* GetName() { return "NumberToStringStub"; } - -#ifdef DEBUG - void Print() { - PrintF("NumberToStringStub\n"); - } -#endif }; @@ -431,8 +389,6 @@ class RegExpCEntryStub: public CodeStub { int MinorKey() { return 0; } bool NeedsImmovableCode() { return true; } - - const char* GetName() { return "RegExpCEntryStub"; } }; // Trampoline stub to call into native code. To call safely into native code @@ -453,8 +409,6 @@ class DirectCEntryStub: public CodeStub { int MinorKey() { return 0; } bool NeedsImmovableCode() { return true; } - - const char* GetName() { return "DirectCEntryStub"; } }; class FloatingPointHelper : public AllStatic { @@ -636,13 +590,6 @@ class StringDictionaryLookupStub: public CodeStub { StringDictionary::kHeaderSize + StringDictionary::kElementsStartIndex * kPointerSize; - -#ifdef DEBUG - void Print() { - PrintF("StringDictionaryLookupStub\n"); - } -#endif - Major MajorKey() { return StringDictionaryNegativeLookup; } int MinorKey() { diff --git a/deps/v8/src/mips/codegen-mips.h b/deps/v8/src/mips/codegen-mips.h index fecd321fad..a8de9c8610 100644 --- a/deps/v8/src/mips/codegen-mips.h +++ b/deps/v8/src/mips/codegen-mips.h @@ -60,9 +60,7 @@ class CodeGenerator: public AstVisitor { // Print the code after compiling it. static void PrintCode(Handle<Code> code, CompilationInfo* info); -#ifdef ENABLE_LOGGING_AND_PROFILING static bool ShouldGenerateLog(Expression* type); -#endif static void SetFunctionInfo(Handle<JSFunction> fun, FunctionLiteral* lit, diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc index 5b9bbb5789..3f5ea7b914 100644 --- a/deps/v8/src/mips/full-codegen-mips.cc +++ b/deps/v8/src/mips/full-codegen-mips.cc @@ -783,7 +783,7 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable, // IDs for bailouts from optimized code. ASSERT(prop->obj()->AsVariableProxy() != NULL); { AccumulatorValueContext for_object(this); - EmitVariableLoad(prop->obj()->AsVariableProxy()->var()); + EmitVariableLoad(prop->obj()->AsVariableProxy()); } __ push(result_register()); @@ -798,7 +798,7 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable, Handle<Code> ic = is_strict_mode() ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() : isolate()->builtins()->KeyedStoreIC_Initialize(); - __ CallWithAstId(ic); + __ Call(ic); // Value in v0 is ignored (declarations are statements). } } @@ -873,7 +873,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { // Record position before stub call for type feedback. SetSourcePosition(clause->position()); Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT); - __ CallWithAstId(ic, RelocInfo::CODE_TARGET, clause->CompareId()); + __ Call(ic, RelocInfo::CODE_TARGET, clause->CompareId()); patch_site.EmitPatchInfo(); __ Branch(&next_test, ne, v0, Operand(zero_reg)); @@ -1117,7 +1117,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info, void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) { Comment cmnt(masm_, "[ VariableProxy"); - EmitVariableLoad(expr->var()); + EmitVariableLoad(expr); } @@ -1173,7 +1173,7 @@ void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions( ? RelocInfo::CODE_TARGET : RelocInfo::CODE_TARGET_CONTEXT; Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); - __ CallWithAstId(ic, mode); + __ Call(ic, mode); } @@ -1253,7 +1253,7 @@ void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase( __ li(a0, Operand(key_literal->handle())); Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); - __ CallWithAstId(ic, RelocInfo::CODE_TARGET, GetPropertyId(property)); + __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(property)); __ Branch(done); } } @@ -1262,7 +1262,11 @@ void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase( } -void FullCodeGenerator::EmitVariableLoad(Variable* var) { +void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) { + // Record position before possible IC call. + SetSourcePosition(proxy->position()); + Variable* var = proxy->var(); + // Three cases: non-this global variables, lookup slots, and all other // types of slots. Slot* slot = var->AsSlot(); @@ -1275,7 +1279,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var) { __ lw(a0, GlobalObjectOperand()); __ li(a2, Operand(var->name())); Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); - __ CallWithAstId(ic, RelocInfo::CODE_TARGET_CONTEXT); + __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT); context()->Plug(v0); } else if (slot->type() == Slot::LOOKUP) { @@ -1421,7 +1425,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { Handle<Code> ic = is_strict_mode() ? isolate()->builtins()->StoreIC_Initialize_Strict() : isolate()->builtins()->StoreIC_Initialize(); - __ CallWithAstId(ic, RelocInfo::CODE_TARGET, key->id()); + __ Call(ic, RelocInfo::CODE_TARGET, key->id()); PrepareForBailoutForId(key->id(), NO_REGISTERS); } else { VisitForEffect(value); @@ -1598,7 +1602,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) { { AccumulatorValueContext context(this); switch (assign_type) { case VARIABLE: - EmitVariableLoad(expr->target()->AsVariableProxy()->var()); + EmitVariableLoad(expr->target()->AsVariableProxy()); PrepareForBailout(expr->target(), TOS_REG); break; case NAMED_PROPERTY: @@ -1665,7 +1669,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) { __ li(a2, Operand(key->handle())); // Call load IC. It has arguments receiver and property name a0 and a2. Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); - __ CallWithAstId(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop)); + __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop)); } @@ -1674,7 +1678,7 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) { __ mov(a0, result_register()); // Call keyed load IC. It has arguments key and receiver in a0 and a1. Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); - __ CallWithAstId(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop)); + __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop)); } @@ -1702,7 +1706,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr, __ bind(&stub_call); BinaryOpStub stub(op, mode); - __ CallWithAstId(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id()); + __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id()); patch_site.EmitPatchInfo(); __ jmp(&done); @@ -1785,7 +1789,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, __ pop(a1); BinaryOpStub stub(op, mode); JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code. - __ CallWithAstId(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id()); + __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id()); patch_site.EmitPatchInfo(); context()->Plug(v0); } @@ -1826,7 +1830,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) { Handle<Code> ic = is_strict_mode() ? isolate()->builtins()->StoreIC_Initialize_Strict() : isolate()->builtins()->StoreIC_Initialize(); - __ CallWithAstId(ic); + __ Call(ic); break; } case KEYED_PROPERTY: { @@ -1839,7 +1843,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) { Handle<Code> ic = is_strict_mode() ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() : isolate()->builtins()->KeyedStoreIC_Initialize(); - __ CallWithAstId(ic); + __ Call(ic); break; } } @@ -1864,7 +1868,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Handle<Code> ic = is_strict_mode() ? isolate()->builtins()->StoreIC_Initialize_Strict() : isolate()->builtins()->StoreIC_Initialize(); - __ CallWithAstId(ic, RelocInfo::CODE_TARGET_CONTEXT); + __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT); } else if (op == Token::INIT_CONST) { // Like var declarations, const declarations are hoisted to function @@ -1962,7 +1966,7 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) { Handle<Code> ic = is_strict_mode() ? isolate()->builtins()->StoreIC_Initialize_Strict() : isolate()->builtins()->StoreIC_Initialize(); - __ CallWithAstId(ic, RelocInfo::CODE_TARGET, expr->id()); + __ Call(ic, RelocInfo::CODE_TARGET, expr->id()); // If the assignment ends an initialization block, revert to fast case. if (expr->ends_initialization_block()) { @@ -2014,7 +2018,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) { Handle<Code> ic = is_strict_mode() ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() : isolate()->builtins()->KeyedStoreIC_Initialize(); - __ CallWithAstId(ic, RelocInfo::CODE_TARGET, expr->id()); + __ Call(ic, RelocInfo::CODE_TARGET, expr->id()); // If the assignment ends an initialization block, revert to fast case. if (expr->ends_initialization_block()) { @@ -2067,7 +2071,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr, InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP; Handle<Code> ic = isolate()->stub_cache()->ComputeCallInitialize(arg_count, in_loop, mode); - __ CallWithAstId(ic, mode, expr->id()); + __ Call(ic, mode, expr->id()); RecordJSReturnSite(expr); // Restore context register. __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); @@ -2101,7 +2105,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr, Handle<Code> ic = isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count, in_loop); __ lw(a2, MemOperand(sp, (arg_count + 1) * kPointerSize)); // Key. - __ CallWithAstId(ic, RelocInfo::CODE_TARGET, expr->id()); + __ Call(ic, RelocInfo::CODE_TARGET, expr->id()); RecordJSReturnSite(expr); // Restore context register. __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); @@ -2301,7 +2305,7 @@ void FullCodeGenerator::VisitCall(Call* expr) { SetSourcePosition(prop->position()); Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); - __ CallWithAstId(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop)); + __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop)); __ lw(a1, GlobalObjectOperand()); __ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset)); __ Push(v0, a1); // Function, receiver. @@ -2780,13 +2784,12 @@ void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) { // with '%2s' (see Logger::LogRuntime for all the formats). // 2 (array): Arguments to the format string. ASSERT_EQ(args->length(), 3); -#ifdef ENABLE_LOGGING_AND_PROFILING if (CodeGenerator::ShouldGenerateLog(args->at(0))) { VisitForStackValue(args->at(1)); VisitForStackValue(args->at(2)); __ CallRuntime(Runtime::kLog, 2); } -#endif + // Finally, we're expected to leave a value on the top of the stack. __ LoadRoot(v0, Heap::kUndefinedValueRootIndex); context()->Plug(v0); @@ -3664,7 +3667,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) { isolate()->stub_cache()->ComputeCallInitialize(arg_count, NOT_IN_LOOP, mode); - __ CallWithAstId(ic, mode, expr->id()); + __ Call(ic, mode, expr->id()); // Restore context register. __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); } else { @@ -3807,7 +3810,7 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr, VisitForAccumulatorValue(expr->expression()); SetSourcePosition(expr->position()); __ mov(a0, result_register()); - __ CallWithAstId(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id()); + __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id()); context()->Plug(v0); } @@ -3839,7 +3842,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { if (assign_type == VARIABLE) { ASSERT(expr->expression()->AsVariableProxy()->var() != NULL); AccumulatorValueContext context(this); - EmitVariableLoad(expr->expression()->AsVariableProxy()->var()); + EmitVariableLoad(expr->expression()->AsVariableProxy()); } else { // Reserve space for result of postfix operation. if (expr->is_postfix() && !context()->IsEffect()) { @@ -3918,7 +3921,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { SetSourcePosition(expr->position()); BinaryOpStub stub(Token::ADD, NO_OVERWRITE); - __ CallWithAstId(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId()); + __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId()); patch_site.EmitPatchInfo(); __ bind(&done); @@ -3951,7 +3954,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { Handle<Code> ic = is_strict_mode() ? isolate()->builtins()->StoreIC_Initialize_Strict() : isolate()->builtins()->StoreIC_Initialize(); - __ CallWithAstId(ic, RelocInfo::CODE_TARGET, expr->id()); + __ Call(ic, RelocInfo::CODE_TARGET, expr->id()); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); if (expr->is_postfix()) { if (!context()->IsEffect()) { @@ -3969,7 +3972,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { Handle<Code> ic = is_strict_mode() ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() : isolate()->builtins()->KeyedStoreIC_Initialize(); - __ CallWithAstId(ic, RelocInfo::CODE_TARGET, expr->id()); + __ Call(ic, RelocInfo::CODE_TARGET, expr->id()); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); if (expr->is_postfix()) { if (!context()->IsEffect()) { @@ -3993,7 +3996,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) { Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); // Use a regular load, not a contextual load, to avoid a reference // error. - __ CallWithAstId(ic); + __ Call(ic); PrepareForBailout(expr, TOS_REG); context()->Plug(v0); } else if (proxy != NULL && @@ -4190,7 +4193,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { // Record position and call the compare IC. SetSourcePosition(expr->position()); Handle<Code> ic = CompareIC::GetUninitialized(op); - __ CallWithAstId(ic, RelocInfo::CODE_TARGET, expr->id()); + __ Call(ic, RelocInfo::CODE_TARGET, expr->id()); patch_site.EmitPatchInfo(); PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false); Split(cc, v0, Operand(zero_reg), if_true, if_false, fall_through); diff --git a/deps/v8/src/mips/ic-mips.cc b/deps/v8/src/mips/ic-mips.cc index cbae8e46e6..da39962691 100644 --- a/deps/v8/src/mips/ic-mips.cc +++ b/deps/v8/src/mips/ic-mips.cc @@ -214,115 +214,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm, } -static void GenerateNumberDictionaryLoad(MacroAssembler* masm, - Label* miss, - Register elements, - Register key, - Register result, - Register reg0, - Register reg1, - Register reg2) { - // Register use: - // - // elements - holds the slow-case elements of the receiver on entry. - // Unchanged unless 'result' is the same register. - // - // key - holds the smi key on entry. - // Unchanged unless 'result' is the same register. - // - // - // result - holds the result on exit if the load succeeded. - // Allowed to be the same as 'key' or 'result'. - // Unchanged on bailout so 'key' or 'result' can be used - // in further computation. - // - // Scratch registers: - // - // reg0 - holds the untagged key on entry and holds the hash once computed. - // - // reg1 - Used to hold the capacity mask of the dictionary. - // - // reg2 - Used for the index into the dictionary. - // at - Temporary (avoid MacroAssembler instructions also using 'at'). - Label done; - - // Compute the hash code from the untagged key. This must be kept in sync - // with ComputeIntegerHash in utils.h. - // - // hash = ~hash + (hash << 15); - __ nor(reg1, reg0, zero_reg); - __ sll(at, reg0, 15); - __ addu(reg0, reg1, at); - - // hash = hash ^ (hash >> 12); - __ srl(at, reg0, 12); - __ xor_(reg0, reg0, at); - - // hash = hash + (hash << 2); - __ sll(at, reg0, 2); - __ addu(reg0, reg0, at); - - // hash = hash ^ (hash >> 4); - __ srl(at, reg0, 4); - __ xor_(reg0, reg0, at); - - // hash = hash * 2057; - __ li(reg1, Operand(2057)); - __ mul(reg0, reg0, reg1); - - // hash = hash ^ (hash >> 16); - __ srl(at, reg0, 16); - __ xor_(reg0, reg0, at); - - // Compute the capacity mask. - __ lw(reg1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset)); - __ sra(reg1, reg1, kSmiTagSize); - __ Subu(reg1, reg1, Operand(1)); - - // Generate an unrolled loop that performs a few probes before giving up. - static const int kProbes = 4; - for (int i = 0; i < kProbes; i++) { - // Use reg2 for index calculations and keep the hash intact in reg0. - __ mov(reg2, reg0); - // Compute the masked index: (hash + i + i * i) & mask. - if (i > 0) { - __ Addu(reg2, reg2, Operand(NumberDictionary::GetProbeOffset(i))); - } - __ and_(reg2, reg2, reg1); - - // Scale the index by multiplying by the element size. - ASSERT(NumberDictionary::kEntrySize == 3); - __ sll(at, reg2, 1); // 2x. - __ addu(reg2, reg2, at); // reg2 = reg2 * 3. - - // Check if the key is identical to the name. - __ sll(at, reg2, kPointerSizeLog2); - __ addu(reg2, elements, at); - - __ lw(at, FieldMemOperand(reg2, NumberDictionary::kElementsStartOffset)); - if (i != kProbes - 1) { - __ Branch(&done, eq, key, Operand(at)); - } else { - __ Branch(miss, ne, key, Operand(at)); - } - } - - __ bind(&done); - // Check that the value is a normal property. - // reg2: elements + (index * kPointerSize). - const int kDetailsOffset = - NumberDictionary::kElementsStartOffset + 2 * kPointerSize; - __ lw(reg1, FieldMemOperand(reg2, kDetailsOffset)); - __ And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::mask()))); - __ Branch(miss, ne, at, Operand(zero_reg)); - - // Get the value at the masked, scaled index and return. - const int kValueOffset = - NumberDictionary::kElementsStartOffset + kPointerSize; - __ lw(result, FieldMemOperand(reg2, kValueOffset)); -} - - void LoadIC::GenerateArrayLength(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- a2 : name @@ -751,7 +642,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) { __ Branch(&slow_load, ne, a3, Operand(at)); __ sra(a0, a2, kSmiTagSize); // a0: untagged index - GenerateNumberDictionaryLoad(masm, &slow_load, t0, a2, a1, a0, a3, t1); + __ LoadFromNumberDictionary(&slow_load, t0, a2, a1, a0, a3, t1); __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1, a0, a3); __ jmp(&do_call); @@ -963,6 +854,9 @@ static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm, const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize; Register backing_store = parameter_map; __ lw(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset)); + Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map()); + __ CheckMap(backing_store, scratch, fixed_array_map, slow_case, + DONT_DO_SMI_CHECK); __ lw(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset)); __ Branch(slow_case, Ugreater_equal, key, Operand(scratch)); __ li(scratch, Operand(kPointerSize >> 1)); @@ -1136,7 +1030,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ LoadRoot(at, Heap::kHashTableMapRootIndex); __ Branch(&slow, ne, a3, Operand(at)); __ sra(a2, a0, kSmiTagSize); - GenerateNumberDictionaryLoad(masm, &slow, t0, a0, v0, a2, a3, t1); + __ LoadFromNumberDictionary(&slow, t0, a0, v0, a2, a3, t1); __ Ret(); // Slow case, key and receiver still in a0 and a1. diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc index 7c085baac0..712ceec957 100644 --- a/deps/v8/src/mips/macro-assembler-mips.cc +++ b/deps/v8/src/mips/macro-assembler-mips.cc @@ -50,87 +50,6 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size) } -// Arguments macros. -#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2 -#define COND_ARGS cond, r1, r2 - -#define REGISTER_TARGET_BODY(Name) \ -void MacroAssembler::Name(Register target, \ - BranchDelaySlot bd) { \ - Name(Operand(target), bd); \ -} \ -void MacroAssembler::Name(Register target, COND_TYPED_ARGS, \ - BranchDelaySlot bd) { \ - Name(Operand(target), COND_ARGS, bd); \ -} - - -#define INT_PTR_TARGET_BODY(Name) \ -void MacroAssembler::Name(intptr_t target, RelocInfo::Mode rmode, \ - BranchDelaySlot bd) { \ - Name(Operand(target, rmode), bd); \ -} \ -void MacroAssembler::Name(intptr_t target, \ - RelocInfo::Mode rmode, \ - COND_TYPED_ARGS, \ - BranchDelaySlot bd) { \ - Name(Operand(target, rmode), COND_ARGS, bd); \ -} - - -#define BYTE_PTR_TARGET_BODY(Name) \ -void MacroAssembler::Name(byte* target, RelocInfo::Mode rmode, \ - BranchDelaySlot bd) { \ - Name(reinterpret_cast<intptr_t>(target), rmode, bd); \ -} \ -void MacroAssembler::Name(byte* target, \ - RelocInfo::Mode rmode, \ - COND_TYPED_ARGS, \ - BranchDelaySlot bd) { \ - Name(reinterpret_cast<intptr_t>(target), rmode, COND_ARGS, bd); \ -} - - -#define CODE_TARGET_BODY(Name) \ -void MacroAssembler::Name(Handle<Code> target, RelocInfo::Mode rmode, \ - BranchDelaySlot bd) { \ - Name(reinterpret_cast<intptr_t>(target.location()), rmode, bd); \ -} \ -void MacroAssembler::Name(Handle<Code> target, \ - RelocInfo::Mode rmode, \ - COND_TYPED_ARGS, \ - BranchDelaySlot bd) { \ - Name(reinterpret_cast<intptr_t>(target.location()), rmode, COND_ARGS, bd); \ -} - - -REGISTER_TARGET_BODY(Jump) -REGISTER_TARGET_BODY(Call) -INT_PTR_TARGET_BODY(Jump) -INT_PTR_TARGET_BODY(Call) -BYTE_PTR_TARGET_BODY(Jump) -BYTE_PTR_TARGET_BODY(Call) -CODE_TARGET_BODY(Jump) -CODE_TARGET_BODY(Call) - -#undef COND_TYPED_ARGS -#undef COND_ARGS -#undef REGISTER_TARGET_BODY -#undef BYTE_PTR_TARGET_BODY -#undef CODE_TARGET_BODY - - -void MacroAssembler::Ret(BranchDelaySlot bd) { - Jump(Operand(ra), bd); -} - - -void MacroAssembler::Ret(Condition cond, Register r1, const Operand& r2, - BranchDelaySlot bd) { - Jump(Operand(ra), cond, r1, r2, bd); -} - - void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) { lw(destination, MemOperand(s6, index << kPointerSizeLog2)); @@ -424,6 +343,114 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, } +void MacroAssembler::LoadFromNumberDictionary(Label* miss, + Register elements, + Register key, + Register result, + Register reg0, + Register reg1, + Register reg2) { + // Register use: + // + // elements - holds the slow-case elements of the receiver on entry. + // Unchanged unless 'result' is the same register. + // + // key - holds the smi key on entry. + // Unchanged unless 'result' is the same register. + // + // + // result - holds the result on exit if the load succeeded. + // Allowed to be the same as 'key' or 'result'. + // Unchanged on bailout so 'key' or 'result' can be used + // in further computation. + // + // Scratch registers: + // + // reg0 - holds the untagged key on entry and holds the hash once computed. + // + // reg1 - Used to hold the capacity mask of the dictionary. + // + // reg2 - Used for the index into the dictionary. + // at - Temporary (avoid MacroAssembler instructions also using 'at'). + Label done; + + // Compute the hash code from the untagged key. This must be kept in sync + // with ComputeIntegerHash in utils.h. + // + // hash = ~hash + (hash << 15); + nor(reg1, reg0, zero_reg); + sll(at, reg0, 15); + addu(reg0, reg1, at); + + // hash = hash ^ (hash >> 12); + srl(at, reg0, 12); + xor_(reg0, reg0, at); + + // hash = hash + (hash << 2); + sll(at, reg0, 2); + addu(reg0, reg0, at); + + // hash = hash ^ (hash >> 4); + srl(at, reg0, 4); + xor_(reg0, reg0, at); + + // hash = hash * 2057; + li(reg1, Operand(2057)); + mul(reg0, reg0, reg1); + + // hash = hash ^ (hash >> 16); + srl(at, reg0, 16); + xor_(reg0, reg0, at); + + // Compute the capacity mask. + lw(reg1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset)); + sra(reg1, reg1, kSmiTagSize); + Subu(reg1, reg1, Operand(1)); + + // Generate an unrolled loop that performs a few probes before giving up. + static const int kProbes = 4; + for (int i = 0; i < kProbes; i++) { + // Use reg2 for index calculations and keep the hash intact in reg0. + mov(reg2, reg0); + // Compute the masked index: (hash + i + i * i) & mask. + if (i > 0) { + Addu(reg2, reg2, Operand(NumberDictionary::GetProbeOffset(i))); + } + and_(reg2, reg2, reg1); + + // Scale the index by multiplying by the element size. + ASSERT(NumberDictionary::kEntrySize == 3); + sll(at, reg2, 1); // 2x. + addu(reg2, reg2, at); // reg2 = reg2 * 3. + + // Check if the key is identical to the name. + sll(at, reg2, kPointerSizeLog2); + addu(reg2, elements, at); + + lw(at, FieldMemOperand(reg2, NumberDictionary::kElementsStartOffset)); + if (i != kProbes - 1) { + Branch(&done, eq, key, Operand(at)); + } else { + Branch(miss, ne, key, Operand(at)); + } + } + + bind(&done); + // Check that the value is a normal property. + // reg2: elements + (index * kPointerSize). + const int kDetailsOffset = + NumberDictionary::kElementsStartOffset + 2 * kPointerSize; + lw(reg1, FieldMemOperand(reg2, kDetailsOffset)); + And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::mask()))); + Branch(miss, ne, at, Operand(zero_reg)); + + // Get the value at the masked, scaled index and return. + const int kValueOffset = + NumberDictionary::kElementsStartOffset + kPointerSize; + lw(result, FieldMemOperand(reg2, kValueOffset)); +} + + // --------------------------------------------------------------------------- // Instruction macros. @@ -1901,6 +1928,176 @@ void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs, } +void MacroAssembler::Jump(Register target, + Condition cond, + Register rs, + const Operand& rt, + BranchDelaySlot bd) { + BlockTrampolinePoolScope block_trampoline_pool(this); + if (cond == cc_always) { + jr(target); + } else { + BRANCH_ARGS_CHECK(cond, rs, rt); + Branch(2, NegateCondition(cond), rs, rt); + jr(target); + } + // Emit a nop in the branch delay slot if required. + if (bd == PROTECT) + nop(); +} + + +void MacroAssembler::Jump(intptr_t target, + RelocInfo::Mode rmode, + Condition cond, + Register rs, + const Operand& rt, + BranchDelaySlot bd) { + li(t9, Operand(target, rmode)); + Jump(t9, cond, rs, rt, bd); +} + + +void MacroAssembler::Jump(Address target, + RelocInfo::Mode rmode, + Condition cond, + Register rs, + const Operand& rt, + BranchDelaySlot bd) { + ASSERT(!RelocInfo::IsCodeTarget(rmode)); + Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd); +} + + +void MacroAssembler::Jump(Handle<Code> code, + RelocInfo::Mode rmode, + Condition cond, + Register rs, + const Operand& rt, + BranchDelaySlot bd) { + ASSERT(RelocInfo::IsCodeTarget(rmode)); + Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd); +} + + +int MacroAssembler::CallSize(Register target, + Condition cond, + Register rs, + const Operand& rt, + BranchDelaySlot bd) { + int size = 0; + + if (cond == cc_always) { + size += 1; + } else { + size += 3; + } + + if (bd == PROTECT) + size += 1; + + return size * kInstrSize; +} + + +// Note: To call gcc-compiled C code on mips, you must call thru t9. +void MacroAssembler::Call(Register target, + Condition cond, + Register rs, + const Operand& rt, + BranchDelaySlot bd) { + BlockTrampolinePoolScope block_trampoline_pool(this); + Label start; + bind(&start); + if (cond == cc_always) { + jalr(target); + } else { + BRANCH_ARGS_CHECK(cond, rs, rt); + Branch(2, NegateCondition(cond), rs, rt); + jalr(target); + } + // Emit a nop in the branch delay slot if required. + if (bd == PROTECT) + nop(); + + ASSERT_EQ(CallSize(target, cond, rs, rt, bd), + SizeOfCodeGeneratedSince(&start)); +} + + +int MacroAssembler::CallSize(Address target, + RelocInfo::Mode rmode, + Condition cond, + Register rs, + const Operand& rt, + BranchDelaySlot bd) { + int size = CallSize(t9, cond, rs, rt, bd); + return size + 2 * kInstrSize; +} + + +void MacroAssembler::Call(Address target, + RelocInfo::Mode rmode, + Condition cond, + Register rs, + const Operand& rt, + BranchDelaySlot bd) { + BlockTrampolinePoolScope block_trampoline_pool(this); + Label start; + bind(&start); + int32_t target_int = reinterpret_cast<int32_t>(target); + // Must record previous source positions before the + // li() generates a new code target. + positions_recorder()->WriteRecordedPositions(); + li(t9, Operand(target_int, rmode), true); + Call(t9, cond, rs, rt, bd); + ASSERT_EQ(CallSize(target, rmode, cond, rs, rt, bd), + SizeOfCodeGeneratedSince(&start)); +} + + +int MacroAssembler::CallSize(Handle<Code> code, + RelocInfo::Mode rmode, + unsigned ast_id, + Condition cond, + Register rs, + const Operand& rt, + BranchDelaySlot bd) { + return CallSize(reinterpret_cast<Address>(code.location()), + rmode, cond, rs, rt, bd); +} + + +void MacroAssembler::Call(Handle<Code> code, + RelocInfo::Mode rmode, + unsigned ast_id, + Condition cond, + Register rs, + const Operand& rt, + BranchDelaySlot bd) { + BlockTrampolinePoolScope block_trampoline_pool(this); + Label start; + bind(&start); + ASSERT(RelocInfo::IsCodeTarget(rmode)); + if (rmode == RelocInfo::CODE_TARGET && ast_id != kNoASTId) { + ASSERT(ast_id_for_reloc_info_ == kNoASTId); + ast_id_for_reloc_info_ = ast_id; + rmode = RelocInfo::CODE_TARGET_WITH_ID; + } + Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd); + ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt), + SizeOfCodeGeneratedSince(&start)); +} + + +void MacroAssembler::Ret(Condition cond, + Register rs, + const Operand& rt, + BranchDelaySlot bd) { + Jump(ra, cond, rs, rt, bd); +} + + void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) { BlockTrampolinePoolScope block_trampoline_pool(this); @@ -1959,142 +2156,24 @@ void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) { } -void MacroAssembler::Jump(const Operand& target, BranchDelaySlot bdslot) { - BlockTrampolinePoolScope block_trampoline_pool(this); - if (target.is_reg()) { - jr(target.rm()); - } else { - if (!MustUseReg(target.rmode_)) { - j(target.imm32_); - } else { - li(t9, target); - jr(t9); - } - } - // Emit a nop in the branch delay slot if required. - if (bdslot == PROTECT) - nop(); -} - - -void MacroAssembler::Jump(const Operand& target, - Condition cond, Register rs, const Operand& rt, - BranchDelaySlot bdslot) { - BlockTrampolinePoolScope block_trampoline_pool(this); - BRANCH_ARGS_CHECK(cond, rs, rt); - if (target.is_reg()) { - if (cond == cc_always) { - jr(target.rm()); - } else { - Branch(2, NegateCondition(cond), rs, rt); - jr(target.rm()); - } - } else { // Not register target. - if (!MustUseReg(target.rmode_)) { - if (cond == cc_always) { - j(target.imm32_); - } else { - Branch(2, NegateCondition(cond), rs, rt); - j(target.imm32_); // Will generate only one instruction. - } - } else { // MustUseReg(target). - li(t9, target); - if (cond == cc_always) { - jr(t9); - } else { - Branch(2, NegateCondition(cond), rs, rt); - jr(t9); // Will generate only one instruction. - } - } - } - // Emit a nop in the branch delay slot if required. - if (bdslot == PROTECT) - nop(); -} - - -int MacroAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) { - return 4 * kInstrSize; -} - - -int MacroAssembler::CallSize(Register reg) { - return 2 * kInstrSize; -} - - -// Note: To call gcc-compiled C code on mips, you must call thru t9. -void MacroAssembler::Call(const Operand& target, BranchDelaySlot bdslot) { - BlockTrampolinePoolScope block_trampoline_pool(this); - if (target.is_reg()) { - jalr(target.rm()); - } else { // !target.is_reg(). - if (!MustUseReg(target.rmode_)) { - jal(target.imm32_); - } else { // MustUseReg(target). - // Must record previous source positions before the - // li() generates a new code target. - positions_recorder()->WriteRecordedPositions(); - li(t9, target); - jalr(t9); - } - } - // Emit a nop in the branch delay slot if required. - if (bdslot == PROTECT) - nop(); -} - - -// Note: To call gcc-compiled C code on mips, you must call thru t9. -void MacroAssembler::Call(const Operand& target, - Condition cond, Register rs, const Operand& rt, - BranchDelaySlot bdslot) { - BlockTrampolinePoolScope block_trampoline_pool(this); - BRANCH_ARGS_CHECK(cond, rs, rt); - if (target.is_reg()) { - if (cond == cc_always) { - jalr(target.rm()); - } else { - Branch(2, NegateCondition(cond), rs, rt); - jalr(target.rm()); - } - } else { // !target.is_reg(). - if (!MustUseReg(target.rmode_)) { - if (cond == cc_always) { - jal(target.imm32_); - } else { - Branch(2, NegateCondition(cond), rs, rt); - jal(target.imm32_); // Will generate only one instruction. - } - } else { // MustUseReg(target) - li(t9, target); - if (cond == cc_always) { - jalr(t9); - } else { - Branch(2, NegateCondition(cond), rs, rt); - jalr(t9); // Will generate only one instruction. - } - } +void MacroAssembler::DropAndRet(int drop, + Condition cond, + Register r1, + const Operand& r2) { + // This is a workaround to make sure only one branch instruction is + // generated. It relies on Drop and Ret not creating branches if + // cond == cc_always. + Label skip; + if (cond != cc_always) { + Branch(&skip, NegateCondition(cond), r1, r2); } - // Emit a nop in the branch delay slot if required. - if (bdslot == PROTECT) - nop(); -} + Drop(drop); + Ret(); -void MacroAssembler::CallWithAstId(Handle<Code> code, - RelocInfo::Mode rmode, - unsigned ast_id, - Condition cond, - Register r1, - const Operand& r2) { - ASSERT(RelocInfo::IsCodeTarget(rmode)); - if (rmode == RelocInfo::CODE_TARGET && ast_id != kNoASTId) { - ASSERT(ast_id_for_reloc_info_ == kNoASTId); - ast_id_for_reloc_info_ = ast_id; - rmode = RelocInfo::CODE_TARGET_WITH_ID; + if (cond != cc_always) { + bind(&skip); } - Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond, r1, r2); } @@ -2109,12 +2188,10 @@ void MacroAssembler::Drop(int count, Label skip; if (cond != al) { - Branch(&skip, NegateCondition(cond), reg, op); + Branch(&skip, NegateCondition(cond), reg, op); } - if (count > 0) { - addiu(sp, sp, count * kPointerSize); - } + addiu(sp, sp, count * kPointerSize); if (cond != al) { bind(&skip); @@ -2122,26 +2199,6 @@ void MacroAssembler::Drop(int count, } -void MacroAssembler::DropAndRet(int drop, - Condition cond, - Register r1, - const Operand& r2) { - // This is a workaround to make sure only one branch instruction is - // generated. It relies on Drop and Ret not creating branches if - // cond == cc_always. - Label skip; - if (cond != cc_always) { - Branch(&skip, NegateCondition(cond), r1, r2); - } - - Drop(drop); - Ret(); - - if (cond != cc_always) { - bind(&skip); - } -} - void MacroAssembler::Swap(Register reg1, Register reg2, @@ -2804,7 +2861,7 @@ void MacroAssembler::CheckFastElements(Register map, Register scratch, Label* fail) { STATIC_ASSERT(JSObject::FAST_ELEMENTS == 0); - lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); + lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset)); Branch(fail, hi, scratch, Operand(Map::kMaximumBitField2FastElementValue)); } @@ -2979,9 +3036,9 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected, Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline(); if (flag == CALL_FUNCTION) { - call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET)); + call_wrapper.BeforeCall(CallSize(adaptor)); SetCallKind(t1, call_kind); - Call(adaptor, RelocInfo::CODE_TARGET); + Call(adaptor); call_wrapper.AfterCall(); jmp(done); } else { @@ -3178,7 +3235,7 @@ void MacroAssembler::GetObjectType(Register object, void MacroAssembler::CallStub(CodeStub* stub, Condition cond, Register r1, const Operand& r2) { ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs. - Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2); + Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond, r1, r2); } @@ -3189,7 +3246,8 @@ MaybeObject* MacroAssembler::TryCallStub(CodeStub* stub, Condition cond, { MaybeObject* maybe_result = stub->TryGetCode(); if (!maybe_result->ToObject(&result)) return maybe_result; } - Call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond, r1, r2); + Call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, + kNoASTId, cond, r1, r2); return result; } diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h index 985ef0c830..4994516ea7 100644 --- a/deps/v8/src/mips/macro-assembler-mips.h +++ b/deps/v8/src/mips/macro-assembler-mips.h @@ -99,44 +99,11 @@ class MacroAssembler: public Assembler { // macro assembler. MacroAssembler(Isolate* isolate, void* buffer, int size); -// Arguments macros. + // Arguments macros. #define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2 #define COND_ARGS cond, r1, r2 -// Prototypes. - -// Prototypes for functions with no target (eg Ret()). -#define DECLARE_NOTARGET_PROTOTYPE(Name) \ - void Name(BranchDelaySlot bd = PROTECT); \ - void Name(COND_TYPED_ARGS, BranchDelaySlot bd = PROTECT); \ - inline void Name(BranchDelaySlot bd, COND_TYPED_ARGS) { \ - Name(COND_ARGS, bd); \ - } - -// Prototypes for functions with a target. - -// Cases when relocation may be needed. -#define DECLARE_RELOC_PROTOTYPE(Name, target_type) \ - void Name(target_type target, \ - RelocInfo::Mode rmode, \ - BranchDelaySlot bd = PROTECT); \ - inline void Name(BranchDelaySlot bd, \ - target_type target, \ - RelocInfo::Mode rmode) { \ - Name(target, rmode, bd); \ - } \ - void Name(target_type target, \ - RelocInfo::Mode rmode, \ - COND_TYPED_ARGS, \ - BranchDelaySlot bd = PROTECT); \ - inline void Name(BranchDelaySlot bd, \ - target_type target, \ - RelocInfo::Mode rmode, \ - COND_TYPED_ARGS) { \ - Name(target, rmode, COND_ARGS, bd); \ - } - -// Cases when relocation is not needed. + // Cases when relocation is not needed. #define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \ void Name(target_type target, BranchDelaySlot bd = PROTECT); \ inline void Name(BranchDelaySlot bd, target_type target) { \ @@ -151,44 +118,44 @@ class MacroAssembler: public Assembler { Name(target, COND_ARGS, bd); \ } -// Target prototypes. - -#define DECLARE_JUMP_CALL_PROTOTYPES(Name) \ - DECLARE_NORELOC_PROTOTYPE(Name, Register) \ - DECLARE_NORELOC_PROTOTYPE(Name, const Operand&) \ - DECLARE_RELOC_PROTOTYPE(Name, byte*) \ - DECLARE_RELOC_PROTOTYPE(Name, Handle<Code>) - #define DECLARE_BRANCH_PROTOTYPES(Name) \ DECLARE_NORELOC_PROTOTYPE(Name, Label*) \ DECLARE_NORELOC_PROTOTYPE(Name, int16_t) + DECLARE_BRANCH_PROTOTYPES(Branch) + DECLARE_BRANCH_PROTOTYPES(BranchAndLink) -DECLARE_JUMP_CALL_PROTOTYPES(Jump) -DECLARE_JUMP_CALL_PROTOTYPES(Call) - -DECLARE_BRANCH_PROTOTYPES(Branch) -DECLARE_BRANCH_PROTOTYPES(BranchAndLink) - -DECLARE_NOTARGET_PROTOTYPE(Ret) - +#undef DECLARE_BRANCH_PROTOTYPES #undef COND_TYPED_ARGS #undef COND_ARGS -#undef DECLARE_NOTARGET_PROTOTYPE -#undef DECLARE_NORELOC_PROTOTYPE -#undef DECLARE_RELOC_PROTOTYPE -#undef DECLARE_JUMP_CALL_PROTOTYPES -#undef DECLARE_BRANCH_PROTOTYPES - void CallWithAstId(Handle<Code> code, - RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, - unsigned ast_id = kNoASTId, - Condition cond = al, - Register r1 = zero_reg, - const Operand& r2 = Operand(zero_reg)); - int CallSize(Register reg); - int CallSize(Handle<Code> code, RelocInfo::Mode rmode); + // Jump, Call, and Ret pseudo instructions implementing inter-working. +#define COND_ARGS Condition cond = al, Register rs = zero_reg, \ + const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT + + void Jump(Register target, COND_ARGS); + void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS); + void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS); + void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS); + int CallSize(Register target, COND_ARGS); + void Call(Register target, COND_ARGS); + int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS); + void Call(Address target, RelocInfo::Mode rmode, COND_ARGS); + int CallSize(Handle<Code> code, + RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, + unsigned ast_id = kNoASTId, + COND_ARGS); + void Call(Handle<Code> code, + RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, + unsigned ast_id = kNoASTId, + COND_ARGS); + void Ret(COND_ARGS); + inline void Ret(BranchDelaySlot bd) { + Ret(al, zero_reg, Operand(zero_reg), bd); + } + +#undef COND_ARGS // Emit code to discard a non-negative number of pointer-sized elements // from the stack, clobbering only the sp register. @@ -299,6 +266,16 @@ DECLARE_NOTARGET_PROTOTYPE(Ret) Register scratch, Label* miss); + + void LoadFromNumberDictionary(Label* miss, + Register elements, + Register key, + Register result, + Register reg0, + Register reg1, + Register reg2); + + inline void MarkCode(NopMarkerTypes type) { nop(type); } @@ -1125,17 +1102,6 @@ DECLARE_NOTARGET_PROTOTYPE(Ret) void Jr(Label* L, BranchDelaySlot bdslot); void Jalr(Label* L, BranchDelaySlot bdslot); - void Jump(intptr_t target, RelocInfo::Mode rmode, - BranchDelaySlot bd = PROTECT); - void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always, - Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg), - BranchDelaySlot bd = PROTECT); - void Call(intptr_t target, RelocInfo::Mode rmode, - BranchDelaySlot bd = PROTECT); - void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always, - Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg), - BranchDelaySlot bd = PROTECT); - // Helper functions for generating invokes. void InvokePrologue(const ParameterCount& expected, const ParameterCount& actual, diff --git a/deps/v8/src/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/mips/regexp-macro-assembler-mips.cc index cfc8f651c7..9935ef9b5b 100644 --- a/deps/v8/src/mips/regexp-macro-assembler-mips.cc +++ b/deps/v8/src/mips/regexp-macro-assembler-mips.cc @@ -179,7 +179,7 @@ void RegExpMacroAssemblerMIPS::Backtrack() { // Pop Code* offset from backtrack stack, add Code* and jump to location. Pop(a0); __ Addu(a0, a0, code_pointer()); - __ Jump(Operand(a0)); + __ Jump(a0); } @@ -1238,7 +1238,7 @@ void RegExpCEntryStub::Generate(MacroAssembler* masm_) { __ Call(t9); __ lw(ra, MemOperand(sp, 0)); __ Addu(sp, sp, Operand(stack_alignment)); - __ Jump(Operand(ra)); + __ Jump(ra); } diff --git a/deps/v8/src/mips/regexp-macro-assembler-mips.h b/deps/v8/src/mips/regexp-macro-assembler-mips.h index ad7ada5473..7fe0c8865e 100644 --- a/deps/v8/src/mips/regexp-macro-assembler-mips.h +++ b/deps/v8/src/mips/regexp-macro-assembler-mips.h @@ -29,6 +29,12 @@ #ifndef V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_ #define V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_ +#include "mips/assembler-mips.h" +#include "mips/assembler-mips-inl.h" +#include "macro-assembler.h" +#include "code.h" +#include "mips/macro-assembler-mips.h" + namespace v8 { namespace internal { @@ -249,4 +255,3 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler { }} // namespace v8::internal #endif // V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_ - diff --git a/deps/v8/src/mips/stub-cache-mips.cc b/deps/v8/src/mips/stub-cache-mips.cc index 3e5a0091ca..919bdc40c2 100644 --- a/deps/v8/src/mips/stub-cache-mips.cc +++ b/deps/v8/src/mips/stub-cache-mips.cc @@ -3099,7 +3099,8 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) { // -- a1 : receiver // ----------------------------------- Code* stub; - MaybeObject* maybe_stub = ComputeSharedKeyedLoadElementStub(receiver_map); + JSObject::ElementsKind elements_kind = receiver_map->elements_kind(); + MaybeObject* maybe_stub = KeyedLoadElementStub(elements_kind).TryGetCode(); if (!maybe_stub->To(&stub)) return maybe_stub; __ DispatchMap(a1, a2, @@ -3190,7 +3191,10 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) { // -- a3 : scratch // ----------------------------------- Code* stub; - MaybeObject* maybe_stub = ComputeSharedKeyedStoreElementStub(receiver_map); + JSObject::ElementsKind elements_kind = receiver_map->elements_kind(); + bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE; + MaybeObject* maybe_stub = + KeyedStoreElementStub(is_js_array, elements_kind).TryGetCode(); if (!maybe_stub->To(&stub)) return maybe_stub; __ DispatchMap(a2, a3, @@ -3390,6 +3394,54 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) { #define __ ACCESS_MASM(masm) +void KeyedLoadStubCompiler::GenerateLoadDictionaryElement( + MacroAssembler* masm) { + // ---------- S t a t e -------------- + // -- ra : return address + // -- a0 : key + // -- a1 : receiver + // ----------------------------------- + Label slow, miss_force_generic; + + Register key = a0; + Register receiver = a1; + + __ JumpIfNotSmi(key, &miss_force_generic); + __ lw(t0, FieldMemOperand(receiver, JSObject::kElementsOffset)); + __ sra(a2, a0, kSmiTagSize); + __ LoadFromNumberDictionary(&slow, t0, a0, v0, a2, a3, t1); + __ Ret(); + + // Slow case, key and receiver still in a0 and a1. + __ bind(&slow); + __ IncrementCounter( + masm->isolate()->counters()->keyed_load_external_array_slow(), + 1, a2, a3); + // Entry registers are intact. + // ---------- S t a t e -------------- + // -- ra : return address + // -- a0 : key + // -- a1 : receiver + // ----------------------------------- + Handle<Code> slow_ic = + masm->isolate()->builtins()->KeyedLoadIC_Slow(); + __ Jump(slow_ic, RelocInfo::CODE_TARGET); + + // Miss case, call the runtime. + __ bind(&miss_force_generic); + + // ---------- S t a t e -------------- + // -- ra : return address + // -- a0 : key + // -- a1 : receiver + // ----------------------------------- + + Handle<Code> miss_ic = + masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric(); + __ Jump(miss_ic, RelocInfo::CODE_TARGET); +} + + static bool IsElementTypeSigned(JSObject::ElementsKind elements_kind) { switch (elements_kind) { case JSObject::EXTERNAL_BYTE_ELEMENTS: @@ -4201,7 +4253,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm, // have been verified by the caller to not be a smi. // Check that the key is a smi. - __ JumpIfNotSmi(a0, &miss_force_generic); + __ JumpIfNotSmi(key_reg, &miss_force_generic); // Get the elements array and make sure it is a fast element array, not 'cow'. __ lw(elements_reg, diff --git a/deps/v8/src/mksnapshot.cc b/deps/v8/src/mksnapshot.cc index 1ed610341e..c5ce12f0ec 100644 --- a/deps/v8/src/mksnapshot.cc +++ b/deps/v8/src/mksnapshot.cc @@ -296,10 +296,9 @@ class BZip2Decompressor : public StartupDataDecompressor { int main(int argc, char** argv) { -#ifdef ENABLE_LOGGING_AND_PROFILING // By default, log code create information in the snapshot. i::FLAG_log_code = true; -#endif + // Print the usage if an error occurs when parsing the command line // flags or if the help flag is set. int result = i::FlagList::SetFlagsFromCommandLine(&argc, argv, true); diff --git a/deps/v8/src/objects-visiting.h b/deps/v8/src/objects-visiting.h index a2b1c4fc24..cc64763b5f 100644 --- a/deps/v8/src/objects-visiting.h +++ b/deps/v8/src/objects-visiting.h @@ -30,6 +30,22 @@ #include "allocation.h" +#if V8_TARGET_ARCH_IA32 +#include "ia32/assembler-ia32.h" +#include "ia32/assembler-ia32-inl.h" +#elif V8_TARGET_ARCH_X64 +#include "x64/assembler-x64.h" +#include "x64/assembler-x64-inl.h" +#elif V8_TARGET_ARCH_ARM +#include "arm/assembler-arm.h" +#include "arm/assembler-arm-inl.h" +#elif V8_TARGET_ARCH_MIPS +#include "mips/assembler-mips.h" +#include "mips/assembler-mips-inl.h" +#else +#error Unsupported target architecture. +#endif + // This file provides base classes and auxiliary methods for defining // static object visitors used during GC. // Visiting HeapObject body with a normal ObjectVisitor requires performing diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc index 6242198ec3..ca780dbe0e 100644 --- a/deps/v8/src/objects.cc +++ b/deps/v8/src/objects.cc @@ -1883,13 +1883,9 @@ void JSObject::LookupCallbackSetterInPrototypes(String* name, pt = pt->GetPrototype()) { JSObject::cast(pt)->LocalLookupRealNamedProperty(name, result); if (result->IsProperty()) { - if (result->IsReadOnly()) { - result->NotFound(); - return; - } - if (result->type() == CALLBACKS) { - return; - } + if (result->type() == CALLBACKS && !result->IsReadOnly()) return; + // Found non-callback or read-only callback, stop looking. + break; } } result->NotFound(); @@ -2273,10 +2269,10 @@ MUST_USE_RESULT PropertyAttributes JSProxy::GetPropertyAttributeWithHandler( MaybeObject* JSObject::SetPropertyForResult(LookupResult* result, - String* name, - Object* value, - PropertyAttributes attributes, - StrictModeFlag strict_mode) { + String* name, + Object* value, + PropertyAttributes attributes, + StrictModeFlag strict_mode) { Heap* heap = GetHeap(); // Make sure that the top context does not change when doing callbacks or // interceptor calls. @@ -3068,7 +3064,9 @@ MaybeObject* JSObject::DeleteDictionaryElement(uint32_t index, Isolate* isolate = GetIsolate(); Heap* heap = isolate->heap(); FixedArray* backing_store = FixedArray::cast(elements()); - if (backing_store->map() == heap->non_strict_arguments_elements_map()) { + bool is_arguments = + (GetElementsKind() == JSObject::NON_STRICT_ARGUMENTS_ELEMENTS); + if (is_arguments) { backing_store = FixedArray::cast(backing_store->get(1)); } NumberDictionary* dictionary = NumberDictionary::cast(backing_store); @@ -3081,7 +3079,11 @@ MaybeObject* JSObject::DeleteDictionaryElement(uint32_t index, if (!maybe_elements->To(&new_elements)) { return maybe_elements; } - set_elements(new_elements); + if (is_arguments) { + FixedArray::cast(elements())->set(1, new_elements); + } else { + set_elements(new_elements); + } } if (mode == STRICT_DELETION && result == heap->false_value()) { // In strict mode, attempting to delete a non-configurable property @@ -3375,23 +3377,22 @@ MaybeObject* JSObject::PreventExtensions() { } // If there are fast elements we normalize. - if (HasFastElements()) { - MaybeObject* result = NormalizeElements(); - if (result->IsFailure()) return result; + NumberDictionary* dictionary = NULL; + { MaybeObject* maybe = NormalizeElements(); + if (!maybe->To<NumberDictionary>(&dictionary)) return maybe; } - // TODO(kmillikin): Handle arguments object with dictionary elements. - ASSERT(HasDictionaryElements()); + ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements()); // Make sure that we never go back to fast case. - element_dictionary()->set_requires_slow_elements(); + dictionary->set_requires_slow_elements(); // Do a map transition, other objects with this map may still // be extensible. - Object* new_map; - { MaybeObject* maybe_new_map = map()->CopyDropTransitions(); - if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map; + Map* new_map; + { MaybeObject* maybe = map()->CopyDropTransitions(); + if (!maybe->To<Map>(&new_map)) return maybe; } - Map::cast(new_map)->set_is_extensible(false); - set_map(Map::cast(new_map)); + new_map->set_is_extensible(false); + set_map(new_map); ASSERT(!map()->is_extensible()); return new_map; } @@ -4117,6 +4118,8 @@ void Map::TraverseTransitionTree(TraverseCallback callback, void* data) { } } if (!map_done) continue; + } else { + map_or_index_field = NULL; } // That was the regular transitions, now for the prototype transitions. FixedArray* prototype_transitions = @@ -9428,7 +9431,7 @@ void JSObject::GetLocalPropertyNames(FixedArray* storage, int index) { } ASSERT(storage->length() >= index); } else { - property_dictionary()->CopyKeysTo(storage); + property_dictionary()->CopyKeysTo(storage, StringDictionary::UNSORTED); } } @@ -9505,33 +9508,49 @@ int JSObject::GetLocalElementKeys(FixedArray* storage, break; case DICTIONARY_ELEMENTS: { if (storage != NULL) { - element_dictionary()->CopyKeysTo(storage, filter); + element_dictionary()->CopyKeysTo(storage, + filter, + NumberDictionary::SORTED); } counter += element_dictionary()->NumberOfElementsFilterAttributes(filter); break; } case NON_STRICT_ARGUMENTS_ELEMENTS: { FixedArray* parameter_map = FixedArray::cast(elements()); - int length = parameter_map->length(); - for (int i = 2; i < length; ++i) { - if (!parameter_map->get(i)->IsTheHole()) { - if (storage != NULL) storage->set(i - 2, Smi::FromInt(i - 2)); - ++counter; - } - } + int mapped_length = parameter_map->length() - 2; FixedArray* arguments = FixedArray::cast(parameter_map->get(1)); if (arguments->IsDictionary()) { + // Copy the keys from arguments first, because Dictionary::CopyKeysTo + // will insert in storage starting at index 0. NumberDictionary* dictionary = NumberDictionary::cast(arguments); - if (storage != NULL) dictionary->CopyKeysTo(storage, filter); + if (storage != NULL) { + dictionary->CopyKeysTo(storage, filter, NumberDictionary::UNSORTED); + } counter += dictionary->NumberOfElementsFilterAttributes(filter); + for (int i = 0; i < mapped_length; ++i) { + if (!parameter_map->get(i + 2)->IsTheHole()) { + if (storage != NULL) storage->set(counter, Smi::FromInt(i)); + ++counter; + } + } + if (storage != NULL) storage->SortPairs(storage, counter); + } else { - int length = arguments->length(); - for (int i = 0; i < length; ++i) { - if (!arguments->get(i)->IsTheHole()) { - if (storage != NULL) storage->set(i, Smi::FromInt(i)); + int backing_length = arguments->length(); + int i = 0; + for (; i < mapped_length; ++i) { + if (!parameter_map->get(i + 2)->IsTheHole()) { + if (storage != NULL) storage->set(counter, Smi::FromInt(i)); + ++counter; + } else if (i < backing_length && !arguments->get(i)->IsTheHole()) { + if (storage != NULL) storage->set(counter, Smi::FromInt(i)); ++counter; } } + for (; i < backing_length; ++i) { + if (storage != NULL) storage->set(counter, Smi::FromInt(i)); + ++counter; + } } break; } @@ -10132,7 +10151,9 @@ template Object* Dictionary<StringDictionaryShape, String*>::SlowReverseLookup( Object*); template void Dictionary<NumberDictionaryShape, uint32_t>::CopyKeysTo( - FixedArray*, PropertyAttributes); + FixedArray*, + PropertyAttributes, + Dictionary<NumberDictionaryShape, uint32_t>::SortMode); template Object* Dictionary<StringDictionaryShape, String*>::DeleteProperty( int, JSObject::DeleteMode); @@ -10147,7 +10168,8 @@ template MaybeObject* Dictionary<NumberDictionaryShape, uint32_t>::Shrink( uint32_t); template void Dictionary<StringDictionaryShape, String*>::CopyKeysTo( - FixedArray*); + FixedArray*, + Dictionary<StringDictionaryShape, String*>::SortMode); template int Dictionary<StringDictionaryShape, String*>::NumberOfElementsFilterAttributes( @@ -11199,8 +11221,10 @@ int Dictionary<Shape, Key>::NumberOfEnumElements() { template<typename Shape, typename Key> -void Dictionary<Shape, Key>::CopyKeysTo(FixedArray* storage, - PropertyAttributes filter) { +void Dictionary<Shape, Key>::CopyKeysTo( + FixedArray* storage, + PropertyAttributes filter, + typename Dictionary<Shape, Key>::SortMode sort_mode) { ASSERT(storage->length() >= NumberOfEnumElements()); int capacity = HashTable<Shape, Key>::Capacity(); int index = 0; @@ -11213,7 +11237,9 @@ void Dictionary<Shape, Key>::CopyKeysTo(FixedArray* storage, if ((attr & filter) == 0) storage->set(index++, k); } } - storage->SortPairs(storage, index); + if (sort_mode == Dictionary<Shape, Key>::SORTED) { + storage->SortPairs(storage, index); + } ASSERT(storage->length() >= index); } @@ -11239,7 +11265,9 @@ void StringDictionary::CopyEnumKeysTo(FixedArray* storage, template<typename Shape, typename Key> -void Dictionary<Shape, Key>::CopyKeysTo(FixedArray* storage) { +void Dictionary<Shape, Key>::CopyKeysTo( + FixedArray* storage, + typename Dictionary<Shape, Key>::SortMode sort_mode) { ASSERT(storage->length() >= NumberOfElementsFilterAttributes( static_cast<PropertyAttributes>(NONE))); int capacity = HashTable<Shape, Key>::Capacity(); @@ -11252,6 +11280,9 @@ void Dictionary<Shape, Key>::CopyKeysTo(FixedArray* storage) { storage->set(index++, k); } } + if (sort_mode == Dictionary<Shape, Key>::SORTED) { + storage->SortPairs(storage, index); + } ASSERT(storage->length() >= index); } diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h index c34efdd4c7..9765fe2a0a 100644 --- a/deps/v8/src/objects.h +++ b/deps/v8/src/objects.h @@ -2770,10 +2770,13 @@ class Dictionary: public HashTable<Shape, Key> { // Returns the number of enumerable elements in the dictionary. int NumberOfEnumElements(); + enum SortMode { UNSORTED, SORTED }; // Copies keys to preallocated fixed array. - void CopyKeysTo(FixedArray* storage, PropertyAttributes filter); + void CopyKeysTo(FixedArray* storage, + PropertyAttributes filter, + SortMode sort_mode); // Fill in details for properties into storage. - void CopyKeysTo(FixedArray* storage); + void CopyKeysTo(FixedArray* storage, SortMode sort_mode); // Accessors for next enumeration index. void SetNextEnumerationIndex(int index) { diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc index 184f0a2a27..3085ef86be 100644 --- a/deps/v8/src/parser.cc +++ b/deps/v8/src/parser.cc @@ -823,14 +823,24 @@ class ParserFinder { // form expr.a = ...; expr.b = ...; etc. class InitializationBlockFinder : public ParserFinder { public: - InitializationBlockFinder() - : first_in_block_(NULL), last_in_block_(NULL), block_size_(0) {} + // We find and mark the initialization blocks in top level + // non-looping code only. This is because the optimization prevents + // reuse of the map transitions, so it should be used only for code + // that will only be run once. + InitializationBlockFinder(Scope* top_scope, Target* target) + : enabled_(top_scope->DeclarationScope()->is_global_scope() && + !IsLoopTarget(target)), + first_in_block_(NULL), + last_in_block_(NULL), + block_size_(0) {} ~InitializationBlockFinder() { + if (!enabled_) return; if (InBlock()) EndBlock(); } void Update(Statement* stat) { + if (!enabled_) return; Assignment* assignment = AsAssignment(stat); if (InBlock()) { if (BlockContinues(assignment)) { @@ -851,6 +861,14 @@ class InitializationBlockFinder : public ParserFinder { // the overhead exceeds the savings below this limit. static const int kMinInitializationBlock = 3; + static bool IsLoopTarget(Target* target) { + while (target != NULL) { + if (target->node()->AsIterationStatement() != NULL) return true; + target = target->previous(); + } + return false; + } + // Returns true if the expressions appear to denote the same object. // In the context of initialization blocks, we only consider expressions // of the form 'expr.x' or expr["x"]. @@ -913,6 +931,7 @@ class InitializationBlockFinder : public ParserFinder { bool InBlock() { return first_in_block_ != NULL; } + const bool enabled_; Assignment* first_in_block_; Assignment* last_in_block_; int block_size_; @@ -1078,7 +1097,7 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor, TargetScope scope(&this->target_stack_); ASSERT(processor != NULL); - InitializationBlockFinder block_finder; + InitializationBlockFinder block_finder(top_scope_, target_stack_); ThisNamedPropertyAssigmentFinder this_property_assignment_finder(isolate()); bool directive_prologue = true; // Parsing directive prologue. @@ -1133,12 +1152,7 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor, } } - // We find and mark the initialization blocks on top level code only. - // This is because the optimization prevents reuse of the map transitions, - // so it should be used only for code that will only be run once. - if (top_scope_->is_global_scope()) { - block_finder.Update(stat); - } + block_finder.Update(stat); // Find and mark all assignments to named properties in this (this.x =) if (top_scope_->is_function_scope()) { this_property_assignment_finder.Update(top_scope_, stat); @@ -1478,9 +1492,13 @@ Block* Parser::ParseBlock(ZoneStringList* labels, bool* ok) { Block* result = new(zone()) Block(labels, 16, false); Target target(&this->target_stack_, result); Expect(Token::LBRACE, CHECK_OK); + InitializationBlockFinder block_finder(top_scope_, target_stack_); while (peek() != Token::RBRACE) { Statement* stat = ParseStatement(NULL, CHECK_OK); - if (stat && !stat->IsEmpty()) result->AddStatement(stat); + if (stat && !stat->IsEmpty()) { + result->AddStatement(stat); + block_finder.Update(stat); + } } Expect(Token::RBRACE, CHECK_OK); return result; diff --git a/deps/v8/src/platform-cygwin.cc b/deps/v8/src/platform-cygwin.cc index 0242f7b2bd..5f283c3571 100644 --- a/deps/v8/src/platform-cygwin.cc +++ b/deps/v8/src/platform-cygwin.cc @@ -166,23 +166,6 @@ void OS::Free(void* address, const size_t size) { } -#ifdef ENABLE_HEAP_PROTECTION - -void OS::Protect(void* address, size_t size) { - // TODO(1240712): mprotect has a return value which is ignored here. - mprotect(address, size, PROT_READ); -} - - -void OS::Unprotect(void* address, size_t size, bool is_executable) { - // TODO(1240712): mprotect has a return value which is ignored here. - int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); - mprotect(address, size, prot); -} - -#endif - - void OS::Sleep(int milliseconds) { unsigned int ms = static_cast<unsigned int>(milliseconds); usleep(1000 * ms); @@ -249,7 +232,6 @@ PosixMemoryMappedFile::~PosixMemoryMappedFile() { void OS::LogSharedLibraryAddresses() { -#ifdef ENABLE_LOGGING_AND_PROFILING // This function assumes that the layout of the file is as follows: // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name] // If we encounter an unexpected situation we abort scanning further entries. @@ -306,7 +288,6 @@ void OS::LogSharedLibraryAddresses() { } free(lib_name); fclose(fp); -#endif } @@ -591,8 +572,6 @@ Semaphore* OS::CreateSemaphore(int count) { } -#ifdef ENABLE_LOGGING_AND_PROFILING - // ---------------------------------------------------------------------------- // Cygwin profiler support. // @@ -769,7 +748,5 @@ void Sampler::Stop() { SetActive(false); } -#endif // ENABLE_LOGGING_AND_PROFILING } } // namespace v8::internal - diff --git a/deps/v8/src/platform-freebsd.cc b/deps/v8/src/platform-freebsd.cc index 755475a8a0..9d9f1b795a 100644 --- a/deps/v8/src/platform-freebsd.cc +++ b/deps/v8/src/platform-freebsd.cc @@ -181,20 +181,6 @@ void OS::Free(void* buf, const size_t length) { } -#ifdef ENABLE_HEAP_PROTECTION - -void OS::Protect(void* address, size_t size) { - UNIMPLEMENTED(); -} - - -void OS::Unprotect(void* address, size_t size, bool is_executable) { - UNIMPLEMENTED(); -} - -#endif - - void OS::Sleep(int milliseconds) { unsigned int ms = static_cast<unsigned int>(milliseconds); usleep(1000 * ms); @@ -266,15 +252,12 @@ PosixMemoryMappedFile::~PosixMemoryMappedFile() { } -#ifdef ENABLE_LOGGING_AND_PROFILING static unsigned StringToLong(char* buffer) { return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT } -#endif void OS::LogSharedLibraryAddresses() { -#ifdef ENABLE_LOGGING_AND_PROFILING static const int MAP_LENGTH = 1024; int fd = open("/proc/self/maps", O_RDONLY); if (fd < 0) return; @@ -311,7 +294,6 @@ void OS::LogSharedLibraryAddresses() { LOG(i::Isolate::Current(), SharedLibraryEvent(start_of_path, start, end)); } close(fd); -#endif } @@ -588,8 +570,6 @@ Semaphore* OS::CreateSemaphore(int count) { } -#ifdef ENABLE_LOGGING_AND_PROFILING - static pthread_t GetThreadID() { pthread_t thread_id = pthread_self(); return thread_id; @@ -817,6 +797,5 @@ void Sampler::Stop() { SetActive(false); } -#endif // ENABLE_LOGGING_AND_PROFILING } } // namespace v8::internal diff --git a/deps/v8/src/platform-linux.cc b/deps/v8/src/platform-linux.cc index d2866cae45..ab22a79cd4 100644 --- a/deps/v8/src/platform-linux.cc +++ b/deps/v8/src/platform-linux.cc @@ -390,23 +390,6 @@ void OS::Free(void* address, const size_t size) { } -#ifdef ENABLE_HEAP_PROTECTION - -void OS::Protect(void* address, size_t size) { - // TODO(1240712): mprotect has a return value which is ignored here. - mprotect(address, size, PROT_READ); -} - - -void OS::Unprotect(void* address, size_t size, bool is_executable) { - // TODO(1240712): mprotect has a return value which is ignored here. - int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); - mprotect(address, size, prot); -} - -#endif - - void OS::Sleep(int milliseconds) { unsigned int ms = static_cast<unsigned int>(milliseconds); usleep(1000 * ms); @@ -483,7 +466,6 @@ PosixMemoryMappedFile::~PosixMemoryMappedFile() { void OS::LogSharedLibraryAddresses() { -#ifdef ENABLE_LOGGING_AND_PROFILING // This function assumes that the layout of the file is as follows: // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name] // If we encounter an unexpected situation we abort scanning further entries. @@ -540,7 +522,6 @@ void OS::LogSharedLibraryAddresses() { } free(lib_name); fclose(fp); -#endif } @@ -548,7 +529,6 @@ static const char kGCFakeMmap[] = "/tmp/__v8_gc__"; void OS::SignalCodeMovingGC() { -#ifdef ENABLE_LOGGING_AND_PROFILING // Support for ll_prof.py. // // The Linux profiler built into the kernel logs all mmap's with @@ -564,7 +544,6 @@ void OS::SignalCodeMovingGC() { ASSERT(addr != MAP_FAILED); munmap(addr, size); fclose(f); -#endif } @@ -859,8 +838,6 @@ Semaphore* OS::CreateSemaphore(int count) { } -#ifdef ENABLE_LOGGING_AND_PROFILING - #if !defined(__GLIBC__) && (defined(__arm__) || defined(__thumb__)) // Android runs a fairly new Linux kernel, so signal info is there, // but the C library doesn't have the structs defined. @@ -1148,6 +1125,5 @@ void Sampler::Stop() { SetActive(false); } -#endif // ENABLE_LOGGING_AND_PROFILING } } // namespace v8::internal diff --git a/deps/v8/src/platform-macos.cc b/deps/v8/src/platform-macos.cc index 104729af5d..be6e1572dc 100644 --- a/deps/v8/src/platform-macos.cc +++ b/deps/v8/src/platform-macos.cc @@ -169,20 +169,6 @@ void OS::Free(void* address, const size_t size) { } -#ifdef ENABLE_HEAP_PROTECTION - -void OS::Protect(void* address, size_t size) { - UNIMPLEMENTED(); -} - - -void OS::Unprotect(void* address, size_t size, bool is_executable) { - UNIMPLEMENTED(); -} - -#endif - - void OS::Sleep(int milliseconds) { usleep(1000 * milliseconds); } @@ -248,7 +234,6 @@ PosixMemoryMappedFile::~PosixMemoryMappedFile() { void OS::LogSharedLibraryAddresses() { -#ifdef ENABLE_LOGGING_AND_PROFILING unsigned int images_count = _dyld_image_count(); for (unsigned int i = 0; i < images_count; ++i) { const mach_header* header = _dyld_get_image_header(i); @@ -270,7 +255,6 @@ void OS::LogSharedLibraryAddresses() { LOG(Isolate::Current(), SharedLibraryEvent(_dyld_get_image_name(i), start, start + size)); } -#endif // ENABLE_LOGGING_AND_PROFILING } @@ -644,8 +628,6 @@ Semaphore* OS::CreateSemaphore(int count) { } -#ifdef ENABLE_LOGGING_AND_PROFILING - class Sampler::PlatformData : public Malloced { public: PlatformData() : profiled_thread_(mach_thread_self()) {} @@ -821,6 +803,5 @@ void Sampler::Stop() { SetActive(false); } -#endif // ENABLE_LOGGING_AND_PROFILING } } // namespace v8::internal diff --git a/deps/v8/src/platform-nullos.cc b/deps/v8/src/platform-nullos.cc index d309806ec7..08480ca800 100644 --- a/deps/v8/src/platform-nullos.cc +++ b/deps/v8/src/platform-nullos.cc @@ -217,20 +217,6 @@ void OS::Free(void* buf, const size_t length) { } -#ifdef ENABLE_HEAP_PROTECTION - -void OS::Protect(void* address, size_t size) { - UNIMPLEMENTED(); -} - - -void OS::Unprotect(void* address, size_t size, bool is_executable) { - UNIMPLEMENTED(); -} - -#endif - - void OS::Sleep(int milliseconds) { UNIMPLEMENTED(); } @@ -437,7 +423,6 @@ Semaphore* OS::CreateSemaphore(int count) { return new NullSemaphore(count); } -#ifdef ENABLE_LOGGING_AND_PROFILING class ProfileSampler::PlatformData : public Malloced { public: @@ -472,6 +457,5 @@ void ProfileSampler::Stop() { UNIMPLEMENTED(); } -#endif // ENABLE_LOGGING_AND_PROFILING } } // namespace v8::internal diff --git a/deps/v8/src/platform-openbsd.cc b/deps/v8/src/platform-openbsd.cc index ceabb51f10..973329b9b1 100644 --- a/deps/v8/src/platform-openbsd.cc +++ b/deps/v8/src/platform-openbsd.cc @@ -179,20 +179,6 @@ void OS::Free(void* buf, const size_t length) { } -#ifdef ENABLE_HEAP_PROTECTION - -void OS::Protect(void* address, size_t size) { - UNIMPLEMENTED(); -} - - -void OS::Unprotect(void* address, size_t size, bool is_executable) { - UNIMPLEMENTED(); -} - -#endif - - void OS::Sleep(int milliseconds) { unsigned int ms = static_cast<unsigned int>(milliseconds); usleep(1000 * ms); @@ -264,15 +250,12 @@ PosixMemoryMappedFile::~PosixMemoryMappedFile() { } -#ifdef ENABLE_LOGGING_AND_PROFILING static unsigned StringToLong(char* buffer) { return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT } -#endif void OS::LogSharedLibraryAddresses() { -#ifdef ENABLE_LOGGING_AND_PROFILING static const int MAP_LENGTH = 1024; int fd = open("/proc/self/maps", O_RDONLY); if (fd < 0) return; @@ -309,7 +292,6 @@ void OS::LogSharedLibraryAddresses() { LOG(i::Isolate::Current(), SharedLibraryEvent(start_of_path, start, end)); } close(fd); -#endif } @@ -590,8 +572,6 @@ Semaphore* OS::CreateSemaphore(int count) { } -#ifdef ENABLE_LOGGING_AND_PROFILING - static pthread_t GetThreadID() { pthread_t thread_id = pthread_self(); return thread_id; @@ -818,6 +798,5 @@ void Sampler::Stop() { SetActive(false); } -#endif // ENABLE_LOGGING_AND_PROFILING } } // namespace v8::internal diff --git a/deps/v8/src/platform-posix.cc b/deps/v8/src/platform-posix.cc index 83f6c8112e..5be305af92 100644 --- a/deps/v8/src/platform-posix.cc +++ b/deps/v8/src/platform-posix.cc @@ -37,6 +37,7 @@ #include <sys/resource.h> #include <sys/time.h> #include <sys/types.h> +#include <sys/stat.h> #include <arpa/inet.h> #include <netinet/in.h> @@ -130,7 +131,14 @@ int OS::GetLastError() { // FILE* OS::FOpen(const char* path, const char* mode) { - return fopen(path, mode); + FILE* file = fopen(path, mode); + if (file == NULL) return NULL; + struct stat file_stat; + if (fstat(fileno(file), &file_stat) != 0) return NULL; + bool is_regular_file = ((file_stat.st_mode & S_IFREG) != 0); + if (is_regular_file) return file; + fclose(file); + return NULL; } @@ -139,6 +147,11 @@ bool OS::Remove(const char* path) { } +FILE* OS::OpenTemporaryFile() { + return tmpfile(); +} + + const char* const OS::LogFileOpenMode = "w"; diff --git a/deps/v8/src/platform-solaris.cc b/deps/v8/src/platform-solaris.cc index ca15b07f11..1e79f102f5 100644 --- a/deps/v8/src/platform-solaris.cc +++ b/deps/v8/src/platform-solaris.cc @@ -192,23 +192,6 @@ void OS::Free(void* address, const size_t size) { } -#ifdef ENABLE_HEAP_PROTECTION - -void OS::Protect(void* address, size_t size) { - // TODO(1240712): mprotect has a return value which is ignored here. - mprotect(address, size, PROT_READ); -} - - -void OS::Unprotect(void* address, size_t size, bool is_executable) { - // TODO(1240712): mprotect has a return value which is ignored here. - int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); - mprotect(address, size, prot); -} - -#endif - - void OS::Sleep(int milliseconds) { useconds_t ms = static_cast<useconds_t>(milliseconds); usleep(1000 * ms); @@ -589,8 +572,6 @@ Semaphore* OS::CreateSemaphore(int count) { } -#ifdef ENABLE_LOGGING_AND_PROFILING - static pthread_t GetThreadID() { return pthread_self(); } @@ -817,6 +798,4 @@ void Sampler::Stop() { SetActive(false); } -#endif // ENABLE_LOGGING_AND_PROFILING - } } // namespace v8::internal diff --git a/deps/v8/src/platform-tls.h b/deps/v8/src/platform-tls.h index 564917540b..32516636be 100644 --- a/deps/v8/src/platform-tls.h +++ b/deps/v8/src/platform-tls.h @@ -30,7 +30,7 @@ #ifndef V8_PLATFORM_TLS_H_ #define V8_PLATFORM_TLS_H_ -#ifdef V8_FAST_TLS +#ifndef V8_NO_FAST_TLS // When fast TLS is requested we include the appropriate // implementation header. diff --git a/deps/v8/src/platform-win32.cc b/deps/v8/src/platform-win32.cc index c226e2f3f8..35b1a8eced 100644 --- a/deps/v8/src/platform-win32.cc +++ b/deps/v8/src/platform-win32.cc @@ -44,11 +44,6 @@ namespace v8 { namespace internal { -intptr_t OS::MaxVirtualMemory() { - return 0; -} - - // Test for finite value - usually defined in math.h int isfinite(double x) { return _finite(x); @@ -143,16 +138,39 @@ int fopen_s(FILE** pFile, const char* filename, const char* mode) { } +#define _TRUNCATE 0 +#define STRUNCATE 80 + int _vsnprintf_s(char* buffer, size_t sizeOfBuffer, size_t count, const char* format, va_list argptr) { + ASSERT(count == _TRUNCATE); return _vsnprintf(buffer, sizeOfBuffer, format, argptr); } -#define _TRUNCATE 0 -int strncpy_s(char* strDest, size_t numberOfElements, - const char* strSource, size_t count) { - strncpy(strDest, strSource, count); +int strncpy_s(char* dest, size_t dest_size, const char* source, size_t count) { + CHECK(source != NULL); + CHECK(dest != NULL); + CHECK_GT(dest_size, 0); + + if (count == _TRUNCATE) { + while (dest_size > 0 && *source != 0) { + *(dest++) = *(source++); + --dest_size; + } + if (dest_size == 0) { + *(dest - 1) = 0; + return STRUNCATE; + } + } else { + while (dest_size > 0 && count > 0 && *source != 0) { + *(dest++) = *(source++); + --dest_size; + --count; + } + } + CHECK_GT(dest_size, 0); + *dest = 0; return 0; } @@ -174,6 +192,11 @@ int random() { namespace v8 { namespace internal { +intptr_t OS::MaxVirtualMemory() { + return 0; +} + + double ceiling(double x) { return ceil(x); } @@ -717,6 +740,24 @@ bool OS::Remove(const char* path) { } +FILE* OS::OpenTemporaryFile() { + // tmpfile_s tries to use the root dir, don't use it. + char tempPathBuffer[MAX_PATH]; + DWORD path_result = 0; + path_result = GetTempPathA(MAX_PATH, tempPathBuffer); + if (path_result > MAX_PATH || path_result == 0) return NULL; + UINT name_result = 0; + char tempNameBuffer[MAX_PATH]; + name_result = GetTempFileNameA(tempPathBuffer, "", 0, tempNameBuffer); + if (name_result == 0) return NULL; + FILE* result = FOpen(tempNameBuffer, "w+"); // Same mode as tmpfile uses. + if (result != NULL) { + Remove(tempNameBuffer); // Delete on close. + } + return result; +} + + // Open log file in binary mode to avoid /n -> /r/n conversion. const char* const OS::LogFileOpenMode = "wb"; @@ -916,25 +957,6 @@ void OS::Free(void* address, const size_t size) { } -#ifdef ENABLE_HEAP_PROTECTION - -void OS::Protect(void* address, size_t size) { - // TODO(1240712): VirtualProtect has a return value which is ignored here. - DWORD old_protect; - VirtualProtect(address, size, PAGE_READONLY, &old_protect); -} - - -void OS::Unprotect(void* address, size_t size, bool is_executable) { - // TODO(1240712): VirtualProtect has a return value which is ignored here. - DWORD new_protect = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; - DWORD old_protect; - VirtualProtect(address, size, new_protect, &old_protect); -} - -#endif - - void OS::Sleep(int milliseconds) { ::Sleep(milliseconds); } @@ -1835,8 +1857,6 @@ Socket* OS::CreateSocket() { } -#ifdef ENABLE_LOGGING_AND_PROFILING - // ---------------------------------------------------------------------------- // Win32 profiler support. @@ -2011,6 +2031,5 @@ void Sampler::Stop() { SetActive(false); } -#endif // ENABLE_LOGGING_AND_PROFILING } } // namespace v8::internal diff --git a/deps/v8/src/platform.h b/deps/v8/src/platform.h index 06d3ca4676..c7fe984c45 100644 --- a/deps/v8/src/platform.h +++ b/deps/v8/src/platform.h @@ -177,6 +177,9 @@ class OS { static FILE* FOpen(const char* path, const char* mode); static bool Remove(const char* path); + // Opens a temporary file, the file is auto removed on close. + static FILE* OpenTemporaryFile(); + // Log file open mode is platform-dependent due to line ends issues. static const char* const LogFileOpenMode; @@ -206,12 +209,6 @@ class OS { // Get the Alignment guaranteed by Allocate(). static size_t AllocateAlignment(); -#ifdef ENABLE_HEAP_PROTECTION - // Protect/unprotect a block of memory by marking it read-only/writable. - static void Protect(void* address, size_t size); - static void Unprotect(void* address, size_t size, bool is_executable); -#endif - // Returns an indication of whether a pointer is in a space that // has been allocated by Allocate(). This method may conservatively // always return false, but giving more accurate information may @@ -603,7 +600,6 @@ class TickSample { bool has_external_callback : 1; }; -#ifdef ENABLE_LOGGING_AND_PROFILING class Sampler { public: // Initialize sampler. @@ -662,8 +658,6 @@ class Sampler { }; -#endif // ENABLE_LOGGING_AND_PROFILING - } } // namespace v8::internal #endif // V8_PLATFORM_H_ diff --git a/deps/v8/src/profile-generator-inl.h b/deps/v8/src/profile-generator-inl.h index 747e5c7271..8f4bc6c1f2 100644 --- a/deps/v8/src/profile-generator-inl.h +++ b/deps/v8/src/profile-generator-inl.h @@ -28,8 +28,6 @@ #ifndef V8_PROFILE_GENERATOR_INL_H_ #define V8_PROFILE_GENERATOR_INL_H_ -#ifdef ENABLE_LOGGING_AND_PROFILING - #include "profile-generator.h" namespace v8 { @@ -123,6 +121,4 @@ uint64_t HeapEntry::id() { } } // namespace v8::internal -#endif // ENABLE_LOGGING_AND_PROFILING - #endif // V8_PROFILE_GENERATOR_INL_H_ diff --git a/deps/v8/src/profile-generator.cc b/deps/v8/src/profile-generator.cc index 34d7aa6347..07426f2939 100644 --- a/deps/v8/src/profile-generator.cc +++ b/deps/v8/src/profile-generator.cc @@ -25,8 +25,6 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#ifdef ENABLE_LOGGING_AND_PROFILING - #include "v8.h" #include "profile-generator-inl.h" @@ -3259,5 +3257,3 @@ String* GetConstructorNameForHeapProfile(JSObject* object) { } } } // namespace v8::internal - -#endif // ENABLE_LOGGING_AND_PROFILING diff --git a/deps/v8/src/profile-generator.h b/deps/v8/src/profile-generator.h index 3d0584b630..d1c2b3804a 100644 --- a/deps/v8/src/profile-generator.h +++ b/deps/v8/src/profile-generator.h @@ -28,8 +28,6 @@ #ifndef V8_PROFILE_GENERATOR_H_ #define V8_PROFILE_GENERATOR_H_ -#ifdef ENABLE_LOGGING_AND_PROFILING - #include "allocation.h" #include "hashmap.h" #include "../include/v8-profiler.h" @@ -1126,6 +1124,4 @@ String* GetConstructorNameForHeapProfile(JSObject* object); } } // namespace v8::internal -#endif // ENABLE_LOGGING_AND_PROFILING - #endif // V8_PROFILE_GENERATOR_H_ diff --git a/deps/v8/src/property.h b/deps/v8/src/property.h index 87f9ea3d54..ddecc92198 100644 --- a/deps/v8/src/property.h +++ b/deps/v8/src/property.h @@ -206,6 +206,7 @@ class LookupResult BASE_EMBEDDED { lookup_type_ = HANDLER_TYPE; holder_ = NULL; details_ = PropertyDetails(NONE, HANDLER); + cacheable_ = false; } void InterceptorResult(JSObject* holder) { diff --git a/deps/v8/src/proxy.js b/deps/v8/src/proxy.js index cb9c020e35..27524bd918 100644 --- a/deps/v8/src/proxy.js +++ b/deps/v8/src/proxy.js @@ -135,3 +135,15 @@ function DerivedSetTrap(receiver, name, val) { function DerivedHasTrap(name) { return !!this.getPropertyDescriptor(name) } + +function DerivedKeysTrap() { + var names = this.getOwnPropertyNames() + var enumerableNames = [] + for (var i = 0, count = 0; i < names.length; ++i) { + var name = names[i] + if (this.getOwnPropertyDescriptor(TO_STRING_INLINE(name)).enumerable) { + enumerableNames[count++] = names[i] + } + } + return enumerableNames +} diff --git a/deps/v8/src/runtime-profiler.cc b/deps/v8/src/runtime-profiler.cc index 7a0dd91f70..917f6d0d66 100644 --- a/deps/v8/src/runtime-profiler.cc +++ b/deps/v8/src/runtime-profiler.cc @@ -61,9 +61,7 @@ static const int kSizeLimit = 1500; Atomic32 RuntimeProfiler::state_ = 0; // TODO(isolates): Create the semaphore lazily and clean it up when no // longer required. -#ifdef ENABLE_LOGGING_AND_PROFILING Semaphore* RuntimeProfiler::semaphore_ = OS::CreateSemaphore(0); -#endif #ifdef DEBUG bool RuntimeProfiler::has_been_globally_setup_ = false; @@ -245,9 +243,7 @@ void RuntimeProfiler::OptimizeNow() { void RuntimeProfiler::NotifyTick() { -#ifdef ENABLE_LOGGING_AND_PROFILING isolate_->stack_guard()->RequestRuntimeProfilerTick(); -#endif } @@ -295,7 +291,6 @@ void RuntimeProfiler::UpdateSamplesAfterScavenge() { void RuntimeProfiler::HandleWakeUp(Isolate* isolate) { -#ifdef ENABLE_LOGGING_AND_PROFILING // The profiler thread must still be waiting. ASSERT(NoBarrier_Load(&state_) >= 0); // In IsolateEnteredJS we have already incremented the counter and @@ -303,7 +298,6 @@ void RuntimeProfiler::HandleWakeUp(Isolate* isolate) { // to get the right count of active isolates. NoBarrier_AtomicIncrement(&state_, 1); semaphore_->Signal(); -#endif } @@ -313,18 +307,15 @@ bool RuntimeProfiler::IsSomeIsolateInJS() { bool RuntimeProfiler::WaitForSomeIsolateToEnterJS() { -#ifdef ENABLE_LOGGING_AND_PROFILING Atomic32 old_state = NoBarrier_CompareAndSwap(&state_, 0, -1); ASSERT(old_state >= -1); if (old_state != 0) return false; semaphore_->Wait(); -#endif return true; } void RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(Thread* thread) { -#ifdef ENABLE_LOGGING_AND_PROFILING // Do a fake increment. If the profiler is waiting on the semaphore, // the returned state is 0, which can be left as an initial state in // case profiling is restarted later. If the profiler is not @@ -343,7 +334,6 @@ void RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(Thread* thread) { if (new_state != 0) { NoBarrier_AtomicIncrement(&state_, -1); } -#endif } @@ -365,11 +355,9 @@ void RuntimeProfiler::UpdateSamplesAfterCompact(ObjectVisitor* visitor) { bool RuntimeProfilerRateLimiter::SuspendIfNecessary() { -#ifdef ENABLE_LOGGING_AND_PROFILING if (!RuntimeProfiler::IsSomeIsolateInJS()) { return RuntimeProfiler::WaitForSomeIsolateToEnterJS(); } -#endif return false; } diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc index 5e96ef8e1c..56507aeb42 100644 --- a/deps/v8/src/runtime.cc +++ b/deps/v8/src/runtime.cc @@ -628,11 +628,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPrototype) { NoHandleAllocation ha; ASSERT(args.length() == 1); Object* obj = args[0]; - obj = obj->GetPrototype(); - while (obj->IsJSObject() && - JSObject::cast(obj)->map()->is_hidden_prototype()) { + do { obj = obj->GetPrototype(); - } + } while (obj->IsJSObject() && + JSObject::cast(obj)->map()->is_hidden_prototype()); return obj; } @@ -10060,8 +10059,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) { int position = it.frame()->LookupCode()->SourcePosition(it.frame()->pc()); - // Check for constructor frame. - bool constructor = it.frame()->IsConstructor(); + // Check for constructor frame. Inlined frames cannot be construct calls. + bool inlined_frame = + it.frame()->is_optimized() && deoptimized_frame_index != 0; + bool constructor = !inlined_frame && it.frame()->IsConstructor(); // Get scope info and read from it for local variable information. Handle<JSFunction> function(JSFunction::cast(it.frame()->function())); @@ -10151,8 +10152,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) { // Find the number of arguments to fill. At least fill the number of // parameters for the function and fill more if more parameters are provided. int argument_count = info.number_of_parameters(); - if (argument_count < it.frame()->ComputeParametersCount()) { - argument_count = it.frame()->ComputeParametersCount(); + if (it.frame()->is_optimized()) { + ASSERT_EQ(argument_count, deoptimized_frame->parameters_count()); + } else { + if (argument_count < it.frame()->ComputeParametersCount()) { + argument_count = it.frame()->ComputeParametersCount(); + } } // Calculate the size of the result. @@ -10165,7 +10170,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) { details->set(kFrameDetailsFrameIdIndex, *frame_id); // Add the function (same as in function frame). - details->set(kFrameDetailsFunctionIndex, it.frame()->function()); + if (it.frame()->is_optimized()) { + // Get the function from the deoptimized frame. + details->set(kFrameDetailsFunctionIndex, deoptimized_frame->GetFunction()); + } else { + // Get the function from the stack. + details->set(kFrameDetailsFunctionIndex, it.frame()->function()); + } // Add the arguments count. details->set(kFrameDetailsArgumentCountIndex, Smi::FromInt(argument_count)); @@ -10215,16 +10226,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) { details->set(details_index++, heap->undefined_value()); } - // Parameter value. If we are inspecting an optimized frame, use - // undefined as the value. - // - // TODO(3141533): We should be able to get the actual parameter - // value for optimized frames. - if (!it.frame()->is_optimized() && - (i < it.frame()->ComputeParametersCount())) { - details->set(details_index++, it.frame()->GetParameter(i)); + // Parameter value. + if (it.frame()->is_optimized()) { + // Get the value from the deoptimized frame. + details->set(details_index++, deoptimized_frame->GetParameter(i)); } else { - details->set(details_index++, heap->undefined_value()); + if (i < it.frame()->ComputeParametersCount()) { + // Get the value from the stack. + details->set(details_index++, it.frame()->GetParameter(i)); + } else { + details->set(details_index++, heap->undefined_value()); + } } } @@ -12133,7 +12145,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SummarizeLOL) { #endif // ENABLE_DEBUGGER_SUPPORT -#ifdef ENABLE_LOGGING_AND_PROFILING RUNTIME_FUNCTION(MaybeObject*, Runtime_ProfilerResume) { NoHandleAllocation ha; v8::V8::ResumeProfiler(); @@ -12147,7 +12158,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ProfilerPause) { return isolate->heap()->undefined_value(); } -#endif // ENABLE_LOGGING_AND_PROFILING // Finds the script object from the script data. NOTE: This operation uses // heap traversal to find the function generated for the source position diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h index 0900fd3606..e59c82cd64 100644 --- a/deps/v8/src/runtime.h +++ b/deps/v8/src/runtime.h @@ -349,7 +349,10 @@ namespace internal { F(HasExternalIntElements, 1, 1) \ F(HasExternalUnsignedIntElements, 1, 1) \ F(HasExternalFloatElements, 1, 1) \ - F(HasExternalDoubleElements, 1, 1) + F(HasExternalDoubleElements, 1, 1) \ + /* profiler */ \ + F(ProfilerResume, 0, 1) \ + F(ProfilerPause, 0, 1) #ifdef ENABLE_DEBUGGER_SUPPORT @@ -427,14 +430,6 @@ namespace internal { #define RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F) #endif -#ifdef ENABLE_LOGGING_AND_PROFILING -#define RUNTIME_FUNCTION_LIST_PROFILER_SUPPORT(F) \ - F(ProfilerResume, 0, 1) \ - F(ProfilerPause, 0, 1) -#else -#define RUNTIME_FUNCTION_LIST_PROFILER_SUPPORT(F) -#endif - #ifdef DEBUG #define RUNTIME_FUNCTION_LIST_DEBUG(F) \ /* Testing */ \ @@ -452,8 +447,7 @@ namespace internal { RUNTIME_FUNCTION_LIST_ALWAYS_1(F) \ RUNTIME_FUNCTION_LIST_ALWAYS_2(F) \ RUNTIME_FUNCTION_LIST_DEBUG(F) \ - RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F) \ - RUNTIME_FUNCTION_LIST_PROFILER_SUPPORT(F) + RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F) // ---------------------------------------------------------------------------- // INLINE_FUNCTION_LIST defines all inlined functions accessed diff --git a/deps/v8/src/scopeinfo.cc b/deps/v8/src/scopeinfo.cc index ccc2cc8200..3e18368f74 100644 --- a/deps/v8/src/scopeinfo.cc +++ b/deps/v8/src/scopeinfo.cc @@ -32,6 +32,8 @@ #include "scopeinfo.h" #include "scopes.h" +#include "allocation-inl.h" + namespace v8 { namespace internal { diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/scopes.cc index 5546875c4b..c6e2a4650e 100644 --- a/deps/v8/src/scopes.cc +++ b/deps/v8/src/scopes.cc @@ -34,6 +34,8 @@ #include "prettyprinter.h" #include "scopeinfo.h" +#include "allocation-inl.h" + namespace v8 { namespace internal { diff --git a/deps/v8/src/serialize.cc b/deps/v8/src/serialize.cc index d960afde40..8cde580fbb 100644 --- a/deps/v8/src/serialize.cc +++ b/deps/v8/src/serialize.cc @@ -284,7 +284,6 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) { const char* AddressNames[] = { #define C(name) "Isolate::" #name, ISOLATE_ADDRESS_LIST(C) - ISOLATE_ADDRESS_LIST_PROF(C) NULL #undef C }; diff --git a/deps/v8/src/spaces-inl.h b/deps/v8/src/spaces-inl.h index 070f970577..ca1177f33c 100644 --- a/deps/v8/src/spaces-inl.h +++ b/deps/v8/src/spaces-inl.h @@ -378,35 +378,6 @@ bool MemoryAllocator::InInitialChunk(Address address) { } -#ifdef ENABLE_HEAP_PROTECTION - -void MemoryAllocator::Protect(Address start, size_t size) { - OS::Protect(start, size); -} - - -void MemoryAllocator::Unprotect(Address start, - size_t size, - Executability executable) { - OS::Unprotect(start, size, executable); -} - - -void MemoryAllocator::ProtectChunkFromPage(Page* page) { - int id = GetChunkId(page); - OS::Protect(chunks_[id].address(), chunks_[id].size()); -} - - -void MemoryAllocator::UnprotectChunkFromPage(Page* page) { - int id = GetChunkId(page); - OS::Unprotect(chunks_[id].address(), chunks_[id].size(), - chunks_[id].owner()->executable() == EXECUTABLE); -} - -#endif - - // -------------------------------------------------------------------------- // PagedSpace diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc index 23c87cd0c5..d41ce5589c 100644 --- a/deps/v8/src/spaces.cc +++ b/deps/v8/src/spaces.cc @@ -868,30 +868,6 @@ void PagedSpace::TearDown() { } -#ifdef ENABLE_HEAP_PROTECTION - -void PagedSpace::Protect() { - Page* page = first_page_; - while (page->is_valid()) { - Isolate::Current()->memory_allocator()->ProtectChunkFromPage(page); - page = Isolate::Current()->memory_allocator()-> - FindLastPageInSameChunk(page)->next_page(); - } -} - - -void PagedSpace::Unprotect() { - Page* page = first_page_; - while (page->is_valid()) { - Isolate::Current()->memory_allocator()->UnprotectChunkFromPage(page); - page = Isolate::Current()->memory_allocator()-> - FindLastPageInSameChunk(page)->next_page(); - } -} - -#endif - - void PagedSpace::MarkAllPagesClean() { PageIterator it(this, PageIterator::ALL_PAGES); while (it.has_next()) { @@ -1196,7 +1172,6 @@ bool NewSpace::Setup(Address start, int size) { ASSERT(IsPowerOf2(maximum_semispace_capacity)); // Allocate and setup the histogram arrays if necessary. -#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); @@ -1204,7 +1179,6 @@ bool NewSpace::Setup(Address start, int size) { promoted_histogram_[name].set_name(#name); INSTANCE_TYPE_LIST(SET_NAME) #undef SET_NAME -#endif ASSERT(size == 2 * heap()->ReservedSemiSpaceSize()); ASSERT(IsAddressAligned(start, size, 0)); @@ -1236,7 +1210,6 @@ bool NewSpace::Setup(Address start, int size) { void NewSpace::TearDown() { -#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) if (allocated_histogram_) { DeleteArray(allocated_histogram_); allocated_histogram_ = NULL; @@ -1245,7 +1218,6 @@ void NewSpace::TearDown() { DeleteArray(promoted_histogram_); promoted_histogram_ = NULL; } -#endif start_ = NULL; allocation_info_.top = NULL; @@ -1258,24 +1230,6 @@ void NewSpace::TearDown() { } -#ifdef ENABLE_HEAP_PROTECTION - -void NewSpace::Protect() { - heap()->isolate()->memory_allocator()->Protect(ToSpaceLow(), Capacity()); - heap()->isolate()->memory_allocator()->Protect(FromSpaceLow(), Capacity()); -} - - -void NewSpace::Unprotect() { - heap()->isolate()->memory_allocator()->Unprotect(ToSpaceLow(), Capacity(), - to_space_.executable()); - heap()->isolate()->memory_allocator()->Unprotect(FromSpaceLow(), Capacity(), - from_space_.executable()); -} - -#endif - - void NewSpace::Flip() { SemiSpace tmp = from_space_; from_space_ = to_space_; @@ -1638,7 +1592,6 @@ static void ReportHistogram(bool print_spill) { // Support for statistics gathering for --heap-stats and --log-gc. -#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) void NewSpace::ClearHistograms() { for (int i = 0; i <= LAST_TYPE; i++) { allocated_histogram_[i].clear(); @@ -1648,9 +1601,7 @@ void NewSpace::ClearHistograms() { // Because the copying collector does not touch garbage objects, we iterate // the new space before a collection to get a histogram of allocated objects. -// This only happens (1) when compiled with DEBUG and the --heap-stats flag is -// set, or when compiled with ENABLE_LOGGING_AND_PROFILING and the --log-gc -// flag is set. +// This only happens when --log-gc flag is set. void NewSpace::CollectStatistics() { ClearHistograms(); SemiSpaceIterator it(this); @@ -1659,7 +1610,6 @@ void NewSpace::CollectStatistics() { } -#ifdef ENABLE_LOGGING_AND_PROFILING static void DoReportStatistics(Isolate* isolate, HistogramInfo* info, const char* description) { LOG(isolate, HeapSampleBeginEvent("NewSpace", description)); @@ -1686,7 +1636,6 @@ static void DoReportStatistics(Isolate* isolate, } LOG(isolate, HeapSampleEndEvent("NewSpace", description)); } -#endif // ENABLE_LOGGING_AND_PROFILING void NewSpace::ReportStatistics() { @@ -1709,13 +1658,11 @@ void NewSpace::ReportStatistics() { } #endif // DEBUG -#ifdef ENABLE_LOGGING_AND_PROFILING if (FLAG_log_gc) { Isolate* isolate = ISOLATE; DoReportStatistics(isolate, allocated_histogram_, "allocated"); DoReportStatistics(isolate, promoted_histogram_, "promoted"); } -#endif // ENABLE_LOGGING_AND_PROFILING } @@ -1733,7 +1680,6 @@ void NewSpace::RecordPromotion(HeapObject* obj) { promoted_histogram_[type].increment_number(1); promoted_histogram_[type].increment_bytes(obj->Size()); } -#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) // ----------------------------------------------------------------------------- @@ -2809,31 +2755,6 @@ void LargeObjectSpace::TearDown() { } -#ifdef ENABLE_HEAP_PROTECTION - -void LargeObjectSpace::Protect() { - LargeObjectChunk* chunk = first_chunk_; - while (chunk != NULL) { - heap()->isolate()->memory_allocator()->Protect(chunk->address(), - chunk->size()); - chunk = chunk->next(); - } -} - - -void LargeObjectSpace::Unprotect() { - LargeObjectChunk* chunk = first_chunk_; - while (chunk != NULL) { - bool is_code = chunk->GetObject()->IsCode(); - heap()->isolate()->memory_allocator()->Unprotect(chunk->address(), - chunk->size(), is_code ? EXECUTABLE : NOT_EXECUTABLE); - chunk = chunk->next(); - } -} - -#endif - - MaybeObject* LargeObjectSpace::AllocateRawInternal(int requested_size, int object_size, Executability executable) { diff --git a/deps/v8/src/spaces.h b/deps/v8/src/spaces.h index 4024387cd8..c554a37771 100644 --- a/deps/v8/src/spaces.h +++ b/deps/v8/src/spaces.h @@ -380,12 +380,6 @@ class Space : public Malloced { // (e.g. see LargeObjectSpace). virtual intptr_t SizeOfObjects() { return Size(); } -#ifdef ENABLE_HEAP_PROTECTION - // Protect/unprotect the space by marking it read-only/writable. - virtual void Protect() = 0; - virtual void Unprotect() = 0; -#endif - #ifdef DEBUG virtual void Print() = 0; #endif @@ -641,17 +635,6 @@ class MemoryAllocator { Page** last_page, Page** last_page_in_use); -#ifdef ENABLE_HEAP_PROTECTION - // Protect/unprotect a block of memory by marking it read-only/writable. - inline void Protect(Address start, size_t size); - inline void Unprotect(Address start, size_t size, - Executability executable); - - // Protect/unprotect a chunk given a page in the chunk. - inline void ProtectChunkFromPage(Page* page); - inline void UnprotectChunkFromPage(Page* page); -#endif - #ifdef DEBUG // Reports statistic info of the space. void ReportStatistics(); @@ -1157,12 +1140,6 @@ class PagedSpace : public Space { // Ensures that the capacity is at least 'capacity'. Returns false on failure. bool EnsureCapacity(int capacity); -#ifdef ENABLE_HEAP_PROTECTION - // Protect/unprotect the space by marking it read-only/writable. - void Protect(); - void Unprotect(); -#endif - #ifdef DEBUG // Print meta info and objects in this space. virtual void Print(); @@ -1270,7 +1247,6 @@ class PagedSpace : public Space { }; -#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) class NumberAndSizeInfo BASE_EMBEDDED { public: NumberAndSizeInfo() : number_(0), bytes_(0) {} @@ -1293,9 +1269,7 @@ class NumberAndSizeInfo BASE_EMBEDDED { // HistogramInfo class for recording a single "bar" of a histogram. This -// class is used for collecting statistics to print to stdout (when compiled -// with DEBUG) or to the log file (when compiled with -// ENABLE_LOGGING_AND_PROFILING). +// class is used for collecting statistics to print to the log file. class HistogramInfo: public NumberAndSizeInfo { public: HistogramInfo() : NumberAndSizeInfo() {} @@ -1306,7 +1280,6 @@ class HistogramInfo: public NumberAndSizeInfo { private: const char* name_; }; -#endif // ----------------------------------------------------------------------------- @@ -1392,12 +1365,6 @@ class SemiSpace : public Space { bool Commit(); bool Uncommit(); -#ifdef ENABLE_HEAP_PROTECTION - // Protect/unprotect the space by marking it read-only/writable. - virtual void Protect() {} - virtual void Unprotect() {} -#endif - #ifdef DEBUG virtual void Print(); virtual void Verify(); @@ -1628,12 +1595,6 @@ class NewSpace : public Space { template <typename StringType> inline void ShrinkStringAtAllocationBoundary(String* string, int len); -#ifdef ENABLE_HEAP_PROTECTION - // Protect/unprotect the space by marking it read-only/writable. - virtual void Protect(); - virtual void Unprotect(); -#endif - #ifdef DEBUG // Verify the active semispace. virtual void Verify(); @@ -1641,7 +1602,6 @@ class NewSpace : public Space { virtual void Print() { to_space_.Print(); } #endif -#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) // Iterates the active semispace to collect statistics. void CollectStatistics(); // Reports previously collected statistics of the active semispace. @@ -1654,7 +1614,6 @@ class NewSpace : public Space { // to space during a scavenge GC. void RecordAllocation(HeapObject* obj); void RecordPromotion(HeapObject* obj); -#endif // Return whether the operation succeded. bool CommitFromSpaceIfNeeded() { @@ -1683,10 +1642,8 @@ class NewSpace : public Space { AllocationInfo allocation_info_; AllocationInfo mc_forwarding_info_; -#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) HistogramInfo* allocated_histogram_; HistogramInfo* promoted_histogram_; -#endif // Implementation of AllocateRaw and MCAllocateRaw. MUST_USE_RESULT inline MaybeObject* AllocateRawInternal( @@ -2296,12 +2253,6 @@ class LargeObjectSpace : public Space { // may use some memory, leaving less for large objects. virtual bool ReserveSpace(int bytes); -#ifdef ENABLE_HEAP_PROTECTION - // Protect/unprotect the space by marking it read-only/writable. - void Protect(); - void Unprotect(); -#endif - #ifdef DEBUG virtual void Verify(); virtual void Print(); diff --git a/deps/v8/src/string-stream.cc b/deps/v8/src/string-stream.cc index aea142042b..9002593bdd 100644 --- a/deps/v8/src/string-stream.cc +++ b/deps/v8/src/string-stream.cc @@ -1,4 +1,4 @@ -// Copyright 2006-2008 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -30,6 +30,8 @@ #include "factory.h" #include "string-stream.h" +#include "allocation-inl.h" + namespace v8 { namespace internal { diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc index eb813814d4..79cd7a0d22 100644 --- a/deps/v8/src/stub-cache.cc +++ b/deps/v8/src/stub-cache.cc @@ -1686,23 +1686,6 @@ MaybeObject* KeyedLoadStubCompiler::GetCode(PropertyType type, } -MaybeObject* KeyedLoadStubCompiler::ComputeSharedKeyedLoadElementStub( - Map* receiver_map) { - MaybeObject* maybe_stub = NULL; - if (receiver_map->has_fast_elements()) { - maybe_stub = KeyedLoadFastElementStub().TryGetCode(); - } else if (receiver_map->has_external_array_elements()) { - JSObject::ElementsKind elements_kind = receiver_map->elements_kind(); - maybe_stub = KeyedLoadExternalArrayStub(elements_kind).TryGetCode(); - } else if (receiver_map->has_dictionary_elements()) { - maybe_stub = isolate()->builtins()->builtin(Builtins::kKeyedLoadIC_Slow); - } else { - UNREACHABLE(); - } - return maybe_stub; -} - - MaybeObject* StoreStubCompiler::GetCode(PropertyType type, String* name) { Code::Flags flags = Code::ComputeMonomorphicFlags( Code::STORE_IC, type, strict_mode_); @@ -1739,21 +1722,9 @@ MaybeObject* KeyedStoreStubCompiler::GetCode(PropertyType type, } -MaybeObject* KeyedStoreStubCompiler::ComputeSharedKeyedStoreElementStub( - Map* receiver_map) { - MaybeObject* maybe_stub = NULL; - if (receiver_map->has_fast_elements()) { - bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE; - maybe_stub = KeyedStoreFastElementStub(is_js_array).TryGetCode(); - } else if (receiver_map->has_external_array_elements()) { - JSObject::ElementsKind elements_kind = receiver_map->elements_kind(); - maybe_stub = KeyedStoreExternalArrayStub(elements_kind).TryGetCode(); - } else if (receiver_map->has_dictionary_elements()) { - maybe_stub = isolate()->builtins()->builtin(Builtins::kKeyedStoreIC_Slow); - } else { - UNREACHABLE(); - } - return maybe_stub; +void KeyedStoreStubCompiler::GenerateStoreDictionaryElement( + MacroAssembler* masm) { + KeyedStoreIC::GenerateSlow(masm); } diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h index fa2676061d..93c50fa988 100644 --- a/deps/v8/src/stub-cache.h +++ b/deps/v8/src/stub-cache.h @@ -662,12 +662,12 @@ class KeyedLoadStubCompiler: public StubCompiler { static void GenerateLoadFastElement(MacroAssembler* masm); + static void GenerateLoadDictionaryElement(MacroAssembler* masm); + private: MaybeObject* GetCode(PropertyType type, String* name, InlineCacheState state = MONOMORPHIC); - - MaybeObject* ComputeSharedKeyedLoadElementStub(Map* receiver_map); }; @@ -720,13 +720,13 @@ class KeyedStoreStubCompiler: public StubCompiler { static void GenerateStoreExternalArray(MacroAssembler* masm, JSObject::ElementsKind elements_kind); + static void GenerateStoreDictionaryElement(MacroAssembler* masm); + private: MaybeObject* GetCode(PropertyType type, String* name, InlineCacheState state = MONOMORPHIC); - MaybeObject* ComputeSharedKeyedStoreElementStub(Map* receiver_map); - StrictModeFlag strict_mode_; }; diff --git a/deps/v8/src/type-info.h b/deps/v8/src/type-info.h index 75aabe8c62..0a8c935dfa 100644 --- a/deps/v8/src/type-info.h +++ b/deps/v8/src/type-info.h @@ -30,7 +30,6 @@ #include "allocation.h" #include "globals.h" -#include "zone.h" #include "zone-inl.h" namespace v8 { diff --git a/deps/v8/src/v8globals.h b/deps/v8/src/v8globals.h index a23ca194aa..d86f299f4e 100644 --- a/deps/v8/src/v8globals.h +++ b/deps/v8/src/v8globals.h @@ -395,12 +395,11 @@ struct AccessorDescriptor { }; -// Logging and profiling. -// A StateTag represents a possible state of the VM. When compiled with -// ENABLE_VMSTATE_TRACKING, the logger maintains a stack of these. -// Creating a VMState object enters a state by pushing on the stack, and -// destroying a VMState object leaves a state by popping the current state -// from the stack. +// Logging and profiling. A StateTag represents a possible state of +// the VM. The logger maintains a stack of these. Creating a VMState +// object enters a state by pushing on the stack, and destroying a +// VMState object leaves a state by popping the current state from the +// stack. #define STATE_TAG_LIST(V) \ V(JS) \ diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js index 0afe231c8c..53a03173b3 100644 --- a/deps/v8/src/v8natives.js +++ b/deps/v8/src/v8natives.js @@ -308,6 +308,13 @@ function ObjectLookupSetter(name) { function ObjectKeys(obj) { if (!IS_SPEC_OBJECT(obj)) throw MakeTypeError("obj_ctor_property_non_object", ["keys"]); + if (%IsJSProxy(obj)) { + var handler = %GetHandler(obj); + var keys = handler.keys; + if (IS_UNDEFINED(keys)) keys = DerivedKeysTrap; + var names = %_CallFunction(handler, keys); + return ToStringArray(names); + } return %LocalKeys(obj); } @@ -315,14 +322,14 @@ function ObjectKeys(obj) { // ES5 8.10.1. function IsAccessorDescriptor(desc) { if (IS_UNDEFINED(desc)) return false; - return desc.hasGetter_ || desc.hasSetter_; + return desc.hasGetter() || desc.hasSetter(); } // ES5 8.10.2. function IsDataDescriptor(desc) { if (IS_UNDEFINED(desc)) return false; - return desc.hasValue_ || desc.hasWritable_; + return desc.hasValue() || desc.hasWritable(); } @@ -354,6 +361,19 @@ function FromPropertyDescriptor(desc) { return obj; } +// Harmony Proxies +function FromGenericPropertyDescriptor(desc) { + if (IS_UNDEFINED(desc)) return desc; + var obj = new $Object(); + if (desc.hasValue()) obj.value = desc.getValue(); + if (desc.hasWritable()) obj.writable = desc.isWritable(); + if (desc.hasGetter()) obj.get = desc.getGet(); + if (desc.hasSetter()) obj.set = desc.getSet(); + if (desc.hasEnumerable()) obj.enumerable = desc.isEnumerable(); + if (desc.hasConfigurable()) obj.configurable = desc.isConfigurable(); + return obj; +} + // ES5 8.10.5. function ToPropertyDescriptor(obj) { if (!IS_SPEC_OBJECT(obj)) { @@ -404,15 +424,15 @@ function ToPropertyDescriptor(obj) { function ToCompletePropertyDescriptor(obj) { var desc = ToPropertyDescriptor(obj) if (IsGenericDescriptor(desc) || IsDataDescriptor(desc)) { - if (!("value" in desc)) desc.value = void 0; - if (!("writable" in desc)) desc.writable = false; + if (!desc.hasValue()) desc.setValue(void 0); + if (!desc.hasWritable()) desc.setWritable(false); } else { // Is accessor descriptor. - if (!("get" in desc)) desc.get = void 0; - if (!("set" in desc)) desc.set = void 0; + if (!desc.hasGetter()) desc.setGet(void 0); + if (!desc.hasSetter()) desc.setSet(void 0); } - if (!("enumerable" in desc)) desc.enumerable = false; - if (!("configurable" in desc)) desc.configurable = false; + if (!desc.hasEnumerable()) desc.setEnumerable(false); + if (!desc.hasConfigurable()) desc.setConfigurable(false); return desc; } @@ -572,10 +592,10 @@ function GetProperty(obj, p) { throw MakeTypeError("handler_trap_missing", [handler, "getPropertyDescriptor"]); } - var descriptor = getProperty.call(handler, p); + var descriptor = %_CallFunction(handler, p, getProperty); if (IS_UNDEFINED(descriptor)) return descriptor; var desc = ToCompletePropertyDescriptor(descriptor); - if (!desc.configurable) { + if (!desc.isConfigurable()) { throw MakeTypeError("proxy_prop_not_configurable", [handler, "getPropertyDescriptor", p, descriptor]); } @@ -595,7 +615,7 @@ function HasProperty(obj, p) { var handler = %GetHandler(obj); var has = handler.has; if (IS_UNDEFINED(has)) has = DerivedHasTrap; - return ToBoolean(has.call(handler, obj, p)); + return ToBoolean(%_CallFunction(handler, obj, p, has)); } var desc = GetProperty(obj, p); return IS_UNDEFINED(desc) ? false : true; @@ -604,6 +624,23 @@ function HasProperty(obj, p) { // ES5 section 8.12.1. function GetOwnProperty(obj, p) { + if (%IsJSProxy(obj)) { + var handler = %GetHandler(obj); + var getOwnProperty = handler.getOwnPropertyDescriptor; + if (IS_UNDEFINED(getOwnProperty)) { + throw MakeTypeError("handler_trap_missing", + [handler, "getOwnPropertyDescriptor"]); + } + var descriptor = %_CallFunction(handler, p, getOwnProperty); + if (IS_UNDEFINED(descriptor)) return descriptor; + var desc = ToCompletePropertyDescriptor(descriptor); + if (!desc.isConfigurable()) { + throw MakeTypeError("proxy_prop_not_configurable", + [handler, "getOwnPropertyDescriptor", p, descriptor]); + } + return desc; + } + // GetOwnProperty returns an array indexed by the constants // defined in macros.py. // If p is not a property on obj undefined is returned. @@ -616,8 +653,32 @@ function GetOwnProperty(obj, p) { } +// Harmony proxies. +function DefineProxyProperty(obj, p, attributes, should_throw) { + var handler = %GetHandler(obj); + var defineProperty = handler.defineProperty; + if (IS_UNDEFINED(defineProperty)) { + throw MakeTypeError("handler_trap_missing", [handler, "defineProperty"]); + } + var result = %_CallFunction(handler, p, attributes, defineProperty); + if (!ToBoolean(result)) { + if (should_throw) { + throw MakeTypeError("handler_failed", [handler, "defineProperty"]); + } else { + return false; + } + } + return true; +} + + // ES5 8.12.9. function DefineOwnProperty(obj, p, desc, should_throw) { + if (%IsJSProxy(obj)) { + var attributes = FromGenericPropertyDescriptor(desc); + return DefineProxyProperty(obj, p, attributes, should_throw); + } + var current_or_access = %GetOwnProperty(ToObject(obj), ToString(p)); // A false value here means that access checks failed. if (current_or_access === false) return void 0; @@ -792,7 +853,8 @@ function ObjectGetPrototypeOf(obj) { // ES5 section 15.2.3.3 function ObjectGetOwnPropertyDescriptor(obj, p) { if (!IS_SPEC_OBJECT(obj)) - throw MakeTypeError("obj_ctor_property_non_object", ["getOwnPropertyDescriptor"]); + throw MakeTypeError("obj_ctor_property_non_object", + ["getOwnPropertyDescriptor"]); var desc = GetOwnProperty(obj, p); return FromPropertyDescriptor(desc); } @@ -831,7 +893,7 @@ function ObjectGetOwnPropertyNames(obj) { throw MakeTypeError("handler_trap_missing", [handler, "getOwnPropertyNames"]); } - var names = getOwnPropertyNames.call(handler); + var names = %_CallFunction(handler, getOwnPropertyNames); return ToStringArray(names, "getOwnPropertyNames"); } @@ -900,8 +962,37 @@ function ObjectDefineProperty(obj, p, attributes) { throw MakeTypeError("obj_ctor_property_non_object", ["defineProperty"]); } var name = ToString(p); - var desc = ToPropertyDescriptor(attributes); - DefineOwnProperty(obj, name, desc, true); + if (%IsJSProxy(obj)) { + // Clone the attributes object for protection. + // TODO(rossberg): not spec'ed yet, so not sure if this should involve + // non-own properties as it does (or non-enumerable ones, as it doesn't?). + var attributesClone = {} + for (var a in attributes) { + attributesClone[a] = attributes[a]; + } + DefineProxyProperty(obj, name, attributesClone, true); + // The following would implement the spec as in the current proposal, + // but after recent comments on es-discuss, is most likely obsolete. + /* + var defineObj = FromGenericPropertyDescriptor(desc); + var names = ObjectGetOwnPropertyNames(attributes); + var standardNames = + {value: 0, writable: 0, get: 0, set: 0, enumerable: 0, configurable: 0}; + for (var i = 0; i < names.length; i++) { + var N = names[i]; + if (!(%HasLocalProperty(standardNames, N))) { + var attr = GetOwnProperty(attributes, N); + DefineOwnProperty(descObj, N, attr, true); + } + } + // This is really confusing the types, but it is what the proxies spec + // currently requires: + desc = descObj; + */ + } else { + var desc = ToPropertyDescriptor(attributes); + DefineOwnProperty(obj, name, desc, true); + } return obj; } diff --git a/deps/v8/src/v8utils.cc b/deps/v8/src/v8utils.cc index 89f9d953ee..bf0e05d05b 100644 --- a/deps/v8/src/v8utils.cc +++ b/deps/v8/src/v8utils.cc @@ -110,11 +110,11 @@ char* ReadLine(const char* prompt) { } -char* ReadCharsFromFile(const char* filename, +char* ReadCharsFromFile(FILE* file, int* size, int extra_space, - bool verbose) { - FILE* file = OS::FOpen(filename, "rb"); + bool verbose, + const char* filename) { if (file == NULL || fseek(file, 0, SEEK_END) != 0) { if (verbose) { OS::PrintError("Cannot read from file %s.\n", filename); @@ -127,16 +127,26 @@ char* ReadCharsFromFile(const char* filename, rewind(file); char* result = NewArray<char>(*size + extra_space); - for (int i = 0; i < *size;) { + for (int i = 0; i < *size && feof(file) == 0;) { int read = static_cast<int>(fread(&result[i], 1, *size - i, file)); - if (read <= 0) { + if (read != (*size - i) && ferror(file) != 0) { fclose(file); DeleteArray(result); return NULL; } i += read; } - fclose(file); + return result; +} + + +char* ReadCharsFromFile(const char* filename, + int* size, + int extra_space, + bool verbose) { + FILE* file = OS::FOpen(filename, "rb"); + char* result = ReadCharsFromFile(file, size, extra_space, verbose, filename); + if (file != NULL) fclose(file); return result; } @@ -147,18 +157,34 @@ byte* ReadBytes(const char* filename, int* size, bool verbose) { } +static Vector<const char> SetVectorContents(char* chars, + int size, + bool* exists) { + if (!chars) { + *exists = false; + return Vector<const char>::empty(); + } + chars[size] = '\0'; + *exists = true; + return Vector<const char>(chars, size); +} + + Vector<const char> ReadFile(const char* filename, bool* exists, bool verbose) { int size; char* result = ReadCharsFromFile(filename, &size, 1, verbose); - if (!result) { - *exists = false; - return Vector<const char>::empty(); - } - result[size] = '\0'; - *exists = true; - return Vector<const char>(result, size); + return SetVectorContents(result, size, exists); +} + + +Vector<const char> ReadFile(FILE* file, + bool* exists, + bool verbose) { + int size; + char* result = ReadCharsFromFile(file, &size, 1, verbose, ""); + return SetVectorContents(result, size, exists); } diff --git a/deps/v8/src/v8utils.h b/deps/v8/src/v8utils.h index 498e23dc7b..aada521e4c 100644 --- a/deps/v8/src/v8utils.h +++ b/deps/v8/src/v8utils.h @@ -188,6 +188,9 @@ class AsciiStringAdapter: public v8::String::ExternalAsciiStringResource { Vector<const char> ReadFile(const char* filename, bool* exists, bool verbose = true); +Vector<const char> ReadFile(FILE* file, + bool* exists, + bool verbose = true); diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc index f744d4e1d6..ee5411d888 100644 --- a/deps/v8/src/version.cc +++ b/deps/v8/src/version.cc @@ -34,8 +34,8 @@ // cannot be changed without changing the SCons build script. #define MAJOR_VERSION 3 #define MINOR_VERSION 4 -#define BUILD_NUMBER 10 -#define PATCH_LEVEL 0 +#define BUILD_NUMBER 12 +#define PATCH_LEVEL 1 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) #define IS_CANDIDATE_VERSION 0 diff --git a/deps/v8/src/vm-state-inl.h b/deps/v8/src/vm-state-inl.h index 1f363de626..c647e56c97 100644 --- a/deps/v8/src/vm-state-inl.h +++ b/deps/v8/src/vm-state-inl.h @@ -39,7 +39,6 @@ namespace internal { // logger and partially threaded through the call stack. States are pushed by // VMState construction and popped by destruction. // -#ifdef ENABLE_VMSTATE_TRACKING inline const char* StateToString(StateTag state) { switch (state) { case JS: @@ -61,32 +60,16 @@ inline const char* StateToString(StateTag state) { VMState::VMState(Isolate* isolate, StateTag tag) : isolate_(isolate), previous_tag_(isolate->current_vm_state()) { -#ifdef ENABLE_LOGGING_AND_PROFILING if (FLAG_log_state_changes) { LOG(isolate, UncheckedStringEvent("Entering", StateToString(tag))); LOG(isolate, UncheckedStringEvent("From", StateToString(previous_tag_))); } -#endif isolate_->SetCurrentVMState(tag); - -#ifdef ENABLE_HEAP_PROTECTION - if (FLAG_protect_heap) { - if (tag == EXTERNAL) { - // We are leaving V8. - ASSERT(previous_tag_ != EXTERNAL); - isolate_->heap()->Protect(); - } else if (previous_tag_ = EXTERNAL) { - // We are entering V8. - isolate_->heap()->Unprotect(); - } - } -#endif } VMState::~VMState() { -#ifdef ENABLE_LOGGING_AND_PROFILING if (FLAG_log_state_changes) { LOG(isolate_, UncheckedStringEvent("Leaving", @@ -94,32 +77,10 @@ VMState::~VMState() { LOG(isolate_, UncheckedStringEvent("To", StateToString(previous_tag_))); } -#endif // ENABLE_LOGGING_AND_PROFILING - -#ifdef ENABLE_HEAP_PROTECTION - StateTag tag = isolate_->current_vm_state(); -#endif isolate_->SetCurrentVMState(previous_tag_); - -#ifdef ENABLE_HEAP_PROTECTION - if (FLAG_protect_heap) { - if (tag == EXTERNAL) { - // We are reentering V8. - ASSERT(previous_tag_ != EXTERNAL); - isolate_->heap()->Unprotect(); - } else if (previous_tag_ == EXTERNAL) { - // We are leaving V8. - isolate_->heap()->Protect(); - } - } -#endif // ENABLE_HEAP_PROTECTION } -#endif // ENABLE_VMSTATE_TRACKING - - -#ifdef ENABLE_LOGGING_AND_PROFILING ExternalCallbackScope::ExternalCallbackScope(Isolate* isolate, Address callback) : isolate_(isolate), previous_callback_(isolate->external_callback()) { @@ -130,8 +91,6 @@ ExternalCallbackScope::~ExternalCallbackScope() { isolate_->set_external_callback(previous_callback_); } -#endif // ENABLE_LOGGING_AND_PROFILING - } } // namespace v8::internal diff --git a/deps/v8/src/vm-state.h b/deps/v8/src/vm-state.h index 2062340f9b..831e2d396b 100644 --- a/deps/v8/src/vm-state.h +++ b/deps/v8/src/vm-state.h @@ -35,7 +35,6 @@ namespace v8 { namespace internal { class VMState BASE_EMBEDDED { -#ifdef ENABLE_VMSTATE_TRACKING public: inline VMState(Isolate* isolate, StateTag tag); inline ~VMState(); @@ -43,26 +42,16 @@ class VMState BASE_EMBEDDED { private: Isolate* isolate_; StateTag previous_tag_; - -#else - public: - VMState(Isolate* isolate, StateTag state) {} -#endif }; class ExternalCallbackScope BASE_EMBEDDED { -#ifdef ENABLE_LOGGING_AND_PROFILING public: inline ExternalCallbackScope(Isolate* isolate, Address callback); inline ~ExternalCallbackScope(); private: Isolate* isolate_; Address previous_callback_; -#else - public: - ExternalCallbackScope(Isolate* isolate, Address callback) {} -#endif }; } } // namespace v8::internal diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc index 2f40251841..3cf7840d54 100644 --- a/deps/v8/src/x64/code-stubs-x64.cc +++ b/deps/v8/src/x64/code-stubs-x64.cc @@ -642,25 +642,17 @@ void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) { } -const char* UnaryOpStub::GetName() { - if (name_ != NULL) return name_; - const int kMaxNameLength = 100; - name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray( - kMaxNameLength); - if (name_ == NULL) return "OOM"; +void UnaryOpStub::PrintName(StringStream* stream) { const char* op_name = Token::Name(op_); const char* overwrite_name = NULL; // Make g++ happy. switch (mode_) { case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break; case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break; } - - OS::SNPrintF(Vector<char>(name_, kMaxNameLength), - "UnaryOpStub_%s_%s_%s", - op_name, - overwrite_name, - UnaryOpIC::GetName(operand_type_)); - return name_; + stream->Add("UnaryOpStub_%s_%s_%s", + op_name, + overwrite_name, + UnaryOpIC::GetName(operand_type_)); } @@ -721,12 +713,7 @@ void BinaryOpStub::Generate(MacroAssembler* masm) { } -const char* BinaryOpStub::GetName() { - if (name_ != NULL) return name_; - const int kMaxNameLength = 100; - name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray( - kMaxNameLength); - if (name_ == NULL) return "OOM"; +void BinaryOpStub::PrintName(StringStream* stream) { const char* op_name = Token::Name(op_); const char* overwrite_name; switch (mode_) { @@ -735,13 +722,10 @@ const char* BinaryOpStub::GetName() { case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; default: overwrite_name = "UnknownOverwrite"; break; } - - OS::SNPrintF(Vector<char>(name_, kMaxNameLength), - "BinaryOpStub_%s_%s_%s", - op_name, - overwrite_name, - BinaryOpIC::GetName(operands_type_)); - return name_; + stream->Add("BinaryOpStub_%s_%s_%s", + op_name, + overwrite_name, + BinaryOpIC::GetName(operands_type_)); } @@ -3450,9 +3434,7 @@ void CEntryStub::Generate(MacroAssembler* masm) { void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { Label invoke, exit; -#ifdef ENABLE_LOGGING_AND_PROFILING Label not_outermost_js, not_outermost_js_2; -#endif { // NOLINT. Scope block confuses linter. MacroAssembler::NoRootArrayScope uninitialized_root_register(masm); // Setup frame. @@ -3497,7 +3479,6 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { __ push(c_entry_fp_operand); } -#ifdef ENABLE_LOGGING_AND_PROFILING // If this is the outermost JS call, set js_entry_sp value. ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address, isolate); __ Load(rax, js_entry_sp); @@ -3511,7 +3492,6 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { __ bind(¬_outermost_js); __ Push(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)); __ bind(&cont); -#endif // Call a faked try-block that does the invoke. __ call(&invoke); @@ -3555,7 +3535,6 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { __ PopTryHandler(); __ bind(&exit); -#ifdef ENABLE_LOGGING_AND_PROFILING // Check if the current stack frame is marked as the outermost JS frame. __ pop(rbx); __ Cmp(rbx, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)); @@ -3563,7 +3542,6 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { __ movq(kScratchRegister, js_entry_sp); __ movq(Operand(kScratchRegister, 0), Immediate(0)); __ bind(¬_outermost_js_2); -#endif // Restore the top frame descriptor from the stack. { Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp); @@ -3772,15 +3750,8 @@ int CompareStub::MinorKey() { // Unfortunately you have to run without snapshots to see most of these // names in the profile since most compare stubs end up in the snapshot. -const char* CompareStub::GetName() { +void CompareStub::PrintName(StringStream* stream) { ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg)); - - if (name_ != NULL) return name_; - const int kMaxNameLength = 100; - name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray( - kMaxNameLength); - if (name_ == NULL) return "OOM"; - const char* cc_name; switch (cc_) { case less: cc_name = "LT"; break; @@ -3791,35 +3762,12 @@ const char* CompareStub::GetName() { case not_equal: cc_name = "NE"; break; default: cc_name = "UnknownCondition"; break; } - - const char* strict_name = ""; - if (strict_ && (cc_ == equal || cc_ == not_equal)) { - strict_name = "_STRICT"; - } - - const char* never_nan_nan_name = ""; - if (never_nan_nan_ && (cc_ == equal || cc_ == not_equal)) { - never_nan_nan_name = "_NO_NAN"; - } - - const char* include_number_compare_name = ""; - if (!include_number_compare_) { - include_number_compare_name = "_NO_NUMBER"; - } - - const char* include_smi_compare_name = ""; - if (!include_smi_compare_) { - include_smi_compare_name = "_NO_SMI"; - } - - OS::SNPrintF(Vector<char>(name_, kMaxNameLength), - "CompareStub_%s%s%s%s", - cc_name, - strict_name, - never_nan_nan_name, - include_number_compare_name, - include_smi_compare_name); - return name_; + bool is_equality = cc_ == equal || cc_ == not_equal; + stream->Add("CompareStub_%s", cc_name); + if (strict_ && is_equality) stream->Add("_STRICT"); + if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN"); + if (!include_number_compare_) stream->Add("_NO_NUMBER"); + if (!include_smi_compare_) stream->Add("_NO_SMI"); } diff --git a/deps/v8/src/x64/code-stubs-x64.h b/deps/v8/src/x64/code-stubs-x64.h index 6a07b3b84d..4058118eef 100644 --- a/deps/v8/src/x64/code-stubs-x64.h +++ b/deps/v8/src/x64/code-stubs-x64.h @@ -66,8 +66,7 @@ class UnaryOpStub: public CodeStub { UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED) : op_(op), mode_(mode), - operand_type_(operand_type), - name_(NULL) { + operand_type_(operand_type) { } private: @@ -77,19 +76,7 @@ class UnaryOpStub: public CodeStub { // Operand type information determined at runtime. UnaryOpIC::TypeInfo operand_type_; - char* name_; - - const char* GetName(); - -#ifdef DEBUG - void Print() { - PrintF("UnaryOpStub %d (op %s), (mode %d, runtime_type_info %s)\n", - MinorKey(), - Token::String(op_), - static_cast<int>(mode_), - UnaryOpIC::GetName(operand_type_)); - } -#endif + virtual void PrintName(StringStream* stream); class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {}; class OpBits: public BitField<Token::Value, 1, 7> {}; @@ -149,8 +136,7 @@ class BinaryOpStub: public CodeStub { : op_(op), mode_(mode), operands_type_(BinaryOpIC::UNINITIALIZED), - result_type_(BinaryOpIC::UNINITIALIZED), - name_(NULL) { + result_type_(BinaryOpIC::UNINITIALIZED) { ASSERT(OpBits::is_valid(Token::NUM_TOKENS)); } @@ -161,8 +147,7 @@ class BinaryOpStub: public CodeStub { : op_(OpBits::decode(key)), mode_(ModeBits::decode(key)), operands_type_(operands_type), - result_type_(result_type), - name_(NULL) { } + result_type_(result_type) { } private: enum SmiCodeGenerateHeapNumberResults { @@ -177,20 +162,7 @@ class BinaryOpStub: public CodeStub { BinaryOpIC::TypeInfo operands_type_; BinaryOpIC::TypeInfo result_type_; - char* name_; - - const char* GetName(); - -#ifdef DEBUG - void Print() { - PrintF("BinaryOpStub %d (op %s), " - "(mode %d, runtime_type_info %s)\n", - MinorKey(), - Token::String(op_), - static_cast<int>(mode_), - BinaryOpIC::GetName(operands_type_)); - } -#endif + virtual void PrintName(StringStream* stream); // Minor key encoding in 15 bits RRRTTTOOOOOOOMM. class ModeBits: public BitField<OverwriteMode, 0, 2> {}; @@ -410,14 +382,6 @@ class NumberToStringStub: public CodeStub { int MinorKey() { return 0; } void Generate(MacroAssembler* masm); - - const char* GetName() { return "NumberToStringStub"; } - -#ifdef DEBUG - void Print() { - PrintF("NumberToStringStub\n"); - } -#endif }; @@ -461,13 +425,6 @@ class StringDictionaryLookupStub: public CodeStub { StringDictionary::kHeaderSize + StringDictionary::kElementsStartIndex * kPointerSize; - -#ifdef DEBUG - void Print() { - PrintF("StringDictionaryLookupStub\n"); - } -#endif - Major MajorKey() { return StringDictionaryNegativeLookup; } int MinorKey() { diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h index 94c7850289..a0648cec64 100644 --- a/deps/v8/src/x64/codegen-x64.h +++ b/deps/v8/src/x64/codegen-x64.h @@ -58,9 +58,7 @@ class CodeGenerator: public AstVisitor { // Print the code after compiling it. static void PrintCode(Handle<Code> code, CompilationInfo* info); -#ifdef ENABLE_LOGGING_AND_PROFILING static bool ShouldGenerateLog(Expression* type); -#endif static bool RecordPositions(MacroAssembler* masm, int pos, diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc index 6629927675..a54bff59bd 100644 --- a/deps/v8/src/x64/full-codegen-x64.cc +++ b/deps/v8/src/x64/full-codegen-x64.cc @@ -741,7 +741,7 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable, // IDs for bailouts from optimized code. ASSERT(prop->obj()->AsVariableProxy() != NULL); { AccumulatorValueContext for_object(this); - EmitVariableLoad(prop->obj()->AsVariableProxy()->var()); + EmitVariableLoad(prop->obj()->AsVariableProxy()); } __ push(rax); VisitForAccumulatorValue(function); @@ -1071,7 +1071,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info, void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) { Comment cmnt(masm_, "[ VariableProxy"); - EmitVariableLoad(expr->var()); + EmitVariableLoad(expr); } @@ -1222,7 +1222,11 @@ void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase( } -void FullCodeGenerator::EmitVariableLoad(Variable* var) { +void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) { + // Record position before possible IC call. + SetSourcePosition(proxy->position()); + Variable* var = proxy->var(); + // Three cases: non-this global variables, lookup slots, and all other // types of slots. Slot* slot = var->AsSlot(); @@ -1548,7 +1552,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) { { AccumulatorValueContext context(this); switch (assign_type) { case VARIABLE: - EmitVariableLoad(expr->target()->AsVariableProxy()->var()); + EmitVariableLoad(expr->target()->AsVariableProxy()); PrepareForBailout(expr->target(), TOS_REG); break; case NAMED_PROPERTY: @@ -2664,13 +2668,11 @@ void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) { // with '%2s' (see Logger::LogRuntime for all the formats). // 2 (array): Arguments to the format string. ASSERT_EQ(args->length(), 3); -#ifdef ENABLE_LOGGING_AND_PROFILING if (CodeGenerator::ShouldGenerateLog(args->at(0))) { VisitForStackValue(args->at(1)); VisitForStackValue(args->at(2)); __ CallRuntime(Runtime::kLog, 2); } -#endif // Finally, we're expected to leave a value on the top of the stack. __ LoadRoot(rax, Heap::kUndefinedValueRootIndex); context()->Plug(rax); @@ -3746,7 +3748,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { if (assign_type == VARIABLE) { ASSERT(expr->expression()->AsVariableProxy()->var() != NULL); AccumulatorValueContext context(this); - EmitVariableLoad(expr->expression()->AsVariableProxy()->var()); + EmitVariableLoad(expr->expression()->AsVariableProxy()); } else { // Reserve space for result of postfix operation. if (expr->is_postfix() && !context()->IsEffect()) { diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc index 342f672e64..339d2c19ce 100644 --- a/deps/v8/src/x64/ic-x64.cc +++ b/deps/v8/src/x64/ic-x64.cc @@ -225,110 +225,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm, } -static void GenerateNumberDictionaryLoad(MacroAssembler* masm, - Label* miss, - Register elements, - Register key, - Register r0, - Register r1, - Register r2, - Register result) { - // Register use: - // - // elements - holds the slow-case elements of the receiver on entry. - // Unchanged unless 'result' is the same register. - // - // key - holds the smi key on entry. - // Unchanged unless 'result' is the same register. - // - // Scratch registers: - // - // r0 - holds the untagged key on entry and holds the hash once computed. - // - // r1 - used to hold the capacity mask of the dictionary - // - // r2 - used for the index into the dictionary. - // - // result - holds the result on exit if the load succeeded. - // Allowed to be the same as 'key' or 'result'. - // Unchanged on bailout so 'key' or 'result' can be used - // in further computation. - - Label done; - - // Compute the hash code from the untagged key. This must be kept in sync - // with ComputeIntegerHash in utils.h. - // - // hash = ~hash + (hash << 15); - __ movl(r1, r0); - __ notl(r0); - __ shll(r1, Immediate(15)); - __ addl(r0, r1); - // hash = hash ^ (hash >> 12); - __ movl(r1, r0); - __ shrl(r1, Immediate(12)); - __ xorl(r0, r1); - // hash = hash + (hash << 2); - __ leal(r0, Operand(r0, r0, times_4, 0)); - // hash = hash ^ (hash >> 4); - __ movl(r1, r0); - __ shrl(r1, Immediate(4)); - __ xorl(r0, r1); - // hash = hash * 2057; - __ imull(r0, r0, Immediate(2057)); - // hash = hash ^ (hash >> 16); - __ movl(r1, r0); - __ shrl(r1, Immediate(16)); - __ xorl(r0, r1); - - // Compute capacity mask. - __ SmiToInteger32(r1, - FieldOperand(elements, NumberDictionary::kCapacityOffset)); - __ decl(r1); - - // Generate an unrolled loop that performs a few probes before giving up. - const int kProbes = 4; - for (int i = 0; i < kProbes; i++) { - // Use r2 for index calculations and keep the hash intact in r0. - __ movq(r2, r0); - // Compute the masked index: (hash + i + i * i) & mask. - if (i > 0) { - __ addl(r2, Immediate(NumberDictionary::GetProbeOffset(i))); - } - __ and_(r2, r1); - - // Scale the index by multiplying by the entry size. - ASSERT(NumberDictionary::kEntrySize == 3); - __ lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3 - - // Check if the key matches. - __ cmpq(key, FieldOperand(elements, - r2, - times_pointer_size, - NumberDictionary::kElementsStartOffset)); - if (i != (kProbes - 1)) { - __ j(equal, &done); - } else { - __ j(not_equal, miss); - } - } - - __ bind(&done); - // Check that the value is a normal propety. - const int kDetailsOffset = - NumberDictionary::kElementsStartOffset + 2 * kPointerSize; - ASSERT_EQ(NORMAL, 0); - __ Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset), - Smi::FromInt(PropertyDetails::TypeField::mask())); - __ j(not_zero, miss); - - // Get the value at the masked, scaled index. - const int kValueOffset = - NumberDictionary::kElementsStartOffset + kPointerSize; - __ movq(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset)); -} - - void LoadIC::GenerateArrayLength(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- rax : receiver @@ -535,7 +431,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset), Heap::kHashTableMapRootIndex); __ j(not_equal, &slow); - GenerateNumberDictionaryLoad(masm, &slow, rcx, rax, rbx, r9, rdi, rax); + __ LoadFromNumberDictionary(&slow, rcx, rax, rbx, r9, rdi, rax); __ ret(0); __ bind(&slow); @@ -1099,7 +995,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) { __ j(not_equal, &slow_load); __ SmiToInteger32(rbx, rcx); // ebx: untagged index - GenerateNumberDictionaryLoad(masm, &slow_load, rax, rcx, rbx, r9, rdi, rdi); + __ LoadFromNumberDictionary(&slow_load, rax, rcx, rbx, r9, rdi, rdi); __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1); __ jmp(&do_call); diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc index 3ebdc7cee7..98667ce87e 100644 --- a/deps/v8/src/x64/lithium-codegen-x64.cc +++ b/deps/v8/src/x64/lithium-codegen-x64.cc @@ -1341,6 +1341,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) { BinaryOpStub stub(instr->op(), NO_OVERWRITE); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + __ nop(); // Signals no inlined code. } diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc index dbed6e0fda..1df0228434 100644 --- a/deps/v8/src/x64/macro-assembler-x64.cc +++ b/deps/v8/src/x64/macro-assembler-x64.cc @@ -3204,6 +3204,109 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, } +void MacroAssembler::LoadFromNumberDictionary(Label* miss, + Register elements, + Register key, + Register r0, + Register r1, + Register r2, + Register result) { + // Register use: + // + // elements - holds the slow-case elements of the receiver on entry. + // Unchanged unless 'result' is the same register. + // + // key - holds the smi key on entry. + // Unchanged unless 'result' is the same register. + // + // Scratch registers: + // + // r0 - holds the untagged key on entry and holds the hash once computed. + // + // r1 - used to hold the capacity mask of the dictionary + // + // r2 - used for the index into the dictionary. + // + // result - holds the result on exit if the load succeeded. + // Allowed to be the same as 'key' or 'result'. + // Unchanged on bailout so 'key' or 'result' can be used + // in further computation. + + Label done; + + // Compute the hash code from the untagged key. This must be kept in sync + // with ComputeIntegerHash in utils.h. + // + // hash = ~hash + (hash << 15); + movl(r1, r0); + notl(r0); + shll(r1, Immediate(15)); + addl(r0, r1); + // hash = hash ^ (hash >> 12); + movl(r1, r0); + shrl(r1, Immediate(12)); + xorl(r0, r1); + // hash = hash + (hash << 2); + leal(r0, Operand(r0, r0, times_4, 0)); + // hash = hash ^ (hash >> 4); + movl(r1, r0); + shrl(r1, Immediate(4)); + xorl(r0, r1); + // hash = hash * 2057; + imull(r0, r0, Immediate(2057)); + // hash = hash ^ (hash >> 16); + movl(r1, r0); + shrl(r1, Immediate(16)); + xorl(r0, r1); + + // Compute capacity mask. + SmiToInteger32(r1, + FieldOperand(elements, NumberDictionary::kCapacityOffset)); + decl(r1); + + // Generate an unrolled loop that performs a few probes before giving up. + const int kProbes = 4; + for (int i = 0; i < kProbes; i++) { + // Use r2 for index calculations and keep the hash intact in r0. + movq(r2, r0); + // Compute the masked index: (hash + i + i * i) & mask. + if (i > 0) { + addl(r2, Immediate(NumberDictionary::GetProbeOffset(i))); + } + and_(r2, r1); + + // Scale the index by multiplying by the entry size. + ASSERT(NumberDictionary::kEntrySize == 3); + lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3 + + // Check if the key matches. + cmpq(key, FieldOperand(elements, + r2, + times_pointer_size, + NumberDictionary::kElementsStartOffset)); + if (i != (kProbes - 1)) { + j(equal, &done); + } else { + j(not_equal, miss); + } + } + + bind(&done); + // Check that the value is a normal propety. + const int kDetailsOffset = + NumberDictionary::kElementsStartOffset + 2 * kPointerSize; + ASSERT_EQ(NORMAL, 0); + Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset), + Smi::FromInt(PropertyDetails::TypeField::mask())); + j(not_zero, miss); + + // Get the value at the masked, scaled index. + const int kValueOffset = + NumberDictionary::kElementsStartOffset + kPointerSize; + movq(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset)); +} + + void MacroAssembler::LoadAllocationTopHelper(Register result, Register scratch, AllocationFlags flags) { diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h index f09fafc202..47ce01bd0c 100644 --- a/deps/v8/src/x64/macro-assembler-x64.h +++ b/deps/v8/src/x64/macro-assembler-x64.h @@ -846,6 +846,15 @@ class MacroAssembler: public Assembler { Label* miss); + void LoadFromNumberDictionary(Label* miss, + Register elements, + Register key, + Register r0, + Register r1, + Register r2, + Register result); + + // --------------------------------------------------------------------------- // Allocation support diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.h b/deps/v8/src/x64/regexp-macro-assembler-x64.h index 02b510fa07..7102225e64 100644 --- a/deps/v8/src/x64/regexp-macro-assembler-x64.h +++ b/deps/v8/src/x64/regexp-macro-assembler-x64.h @@ -28,6 +28,12 @@ #ifndef V8_X64_REGEXP_MACRO_ASSEMBLER_X64_H_ #define V8_X64_REGEXP_MACRO_ASSEMBLER_X64_H_ +#include "x64/assembler-x64.h" +#include "x64/assembler-x64-inl.h" +#include "macro-assembler.h" +#include "code.h" +#include "x64/macro-assembler-x64.h" + namespace v8 { namespace internal { diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc index da27fdf050..71ce856169 100644 --- a/deps/v8/src/x64/stub-cache-x64.cc +++ b/deps/v8/src/x64/stub-cache-x64.cc @@ -2538,7 +2538,10 @@ MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) { // -- rsp[0] : return address // ----------------------------------- Code* stub; - MaybeObject* maybe_stub = ComputeSharedKeyedStoreElementStub(receiver_map); + JSObject::ElementsKind elements_kind = receiver_map->elements_kind(); + bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE; + MaybeObject* maybe_stub = + KeyedStoreElementStub(is_js_array, elements_kind).TryGetCode(); if (!maybe_stub->To(&stub)) return maybe_stub; __ DispatchMap(rdx, Handle<Map>(receiver_map), @@ -2994,7 +2997,8 @@ MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) { // -- rsp[0] : return address // ----------------------------------- Code* stub; - MaybeObject* maybe_stub = ComputeSharedKeyedLoadElementStub(receiver_map); + JSObject::ElementsKind elements_kind = receiver_map->elements_kind(); + MaybeObject* maybe_stub = KeyedLoadElementStub(elements_kind).TryGetCode(); if (!maybe_stub->To(&stub)) return maybe_stub; __ DispatchMap(rdx, Handle<Map>(receiver_map), @@ -3177,6 +3181,51 @@ MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) { #define __ ACCESS_MASM(masm) +void KeyedLoadStubCompiler::GenerateLoadDictionaryElement( + MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- rax : key + // -- rdx : receiver + // -- rsp[0] : return address + // ----------------------------------- + Label slow, miss_force_generic; + + // This stub is meant to be tail-jumped to, the receiver must already + // have been verified by the caller to not be a smi. + + __ JumpIfNotSmi(rax, &miss_force_generic); + __ SmiToInteger32(rbx, rax); + __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset)); + + // Check whether the elements is a number dictionary. + // rdx: receiver + // rax: key + // rbx: key as untagged int32 + // rcx: elements + __ LoadFromNumberDictionary(&slow, rcx, rax, rbx, r9, rdi, rax); + __ ret(0); + + __ bind(&slow); + // ----------- S t a t e ------------- + // -- rax : key + // -- rdx : receiver + // -- rsp[0] : return address + // ----------------------------------- + Handle<Code> slow_ic = + masm->isolate()->builtins()->KeyedLoadIC_Slow(); + __ jmp(slow_ic, RelocInfo::CODE_TARGET); + + __ bind(&miss_force_generic); + // ----------- S t a t e ------------- + // -- rax : key + // -- rdx : receiver + // -- rsp[0] : return address + // ----------------------------------- + Handle<Code> miss_ic = + masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric(); + __ jmp(miss_ic, RelocInfo::CODE_TARGET); +} + void KeyedLoadStubCompiler::GenerateLoadExternalArray( MacroAssembler* masm, JSObject::ElementsKind elements_kind) { diff --git a/deps/v8/src/zone.h b/deps/v8/src/zone.h index a5e437f7fa..af9c916d70 100644 --- a/deps/v8/src/zone.h +++ b/deps/v8/src/zone.h @@ -164,7 +164,7 @@ class AssertNoZoneAllocation { class ZoneListAllocationPolicy { public: // Allocate 'size' bytes of memory in the zone. - INLINE(static void* New(int size)); + static void* New(int size); // De-allocation attempts are silently ignored. static void Delete(void* p) { } |