diff options
author | Ben Noordhuis <info@bnoordhuis.nl> | 2013-04-29 22:35:21 +0200 |
---|---|---|
committer | Ben Noordhuis <info@bnoordhuis.nl> | 2013-04-29 22:35:21 +0200 |
commit | 2f75785c015fecc33565fe5ee3a483b0d4e5cc6d (patch) | |
tree | ad66b4eaba8cedfeb7cfb40b0871307ae08d25e8 /deps/v8/src | |
parent | 5ddf7f4200894a7304d7c07bbbd8773fac3509d1 (diff) | |
download | node-new-2f75785c015fecc33565fe5ee3a483b0d4e5cc6d.tar.gz |
deps: upgrade v8 to 3.18.4
Diffstat (limited to 'deps/v8/src')
183 files changed, 6928 insertions, 3439 deletions
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc index 0b0f9b0757..64047a2847 100644 --- a/deps/v8/src/accessors.cc +++ b/deps/v8/src/accessors.cc @@ -441,6 +441,13 @@ const AccessorDescriptor Accessors::ScriptEvalFromFunctionName = { // +Handle<Object> Accessors::FunctionGetPrototype(Handle<Object> object) { + Isolate* isolate = Isolate::Current(); + CALL_HEAP_FUNCTION( + isolate, Accessors::FunctionGetPrototype(*object, 0), Object); +} + + MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) { Isolate* isolate = Isolate::Current(); JSFunction* function = FindInstanceOf<JSFunction>(isolate, object); diff --git a/deps/v8/src/accessors.h b/deps/v8/src/accessors.h index 0740d92e56..9a83ab8a85 100644 --- a/deps/v8/src/accessors.h +++ b/deps/v8/src/accessors.h @@ -79,6 +79,8 @@ class Accessors : public AllStatic { // Accessor functions called directly from the runtime system. MUST_USE_RESULT static MaybeObject* FunctionGetPrototype(Object* object, void*); + static Handle<Object> FunctionGetPrototype(Handle<Object> object); + MUST_USE_RESULT static MaybeObject* FunctionSetPrototype(JSObject* object, Object* value, void*); diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index 15831ec6a8..2b24ab07f6 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -27,8 +27,8 @@ #include "api.h" -#include <math.h> // For isnan. #include <string.h> // For memcpy, strlen. +#include <cmath> // For isnan. #include "../include/v8-debug.h" #include "../include/v8-profiler.h" #include "../include/v8-testing.h" @@ -52,6 +52,7 @@ #include "profile-generator-inl.h" #include "property-details.h" #include "property.h" +#include "runtime.h" #include "runtime-profiler.h" #include "scanner-character-streams.h" #include "snapshot.h" @@ -63,11 +64,9 @@ #define LOG_API(isolate, expr) LOG(isolate, ApiEntryCall(expr)) -#define ENTER_V8(isolate) \ - ASSERT((isolate)->IsInitialized()); \ - i::VMState __state__((isolate), i::OTHER) -#define LEAVE_V8(isolate) \ - i::VMState __state__((isolate), i::EXTERNAL) +#define ENTER_V8(isolate) \ + ASSERT((isolate)->IsInitialized()); \ + i::VMState<i::OTHER> __state__((isolate)) namespace v8 { @@ -131,7 +130,7 @@ static void DefaultFatalErrorHandler(const char* location, const char* message) { i::Isolate* isolate = i::Isolate::Current(); if (isolate->IsInitialized()) { - i::VMState __state__(isolate, i::OTHER); + i::VMState<i::OTHER> state(isolate); API_Fatal(location, message); } else { API_Fatal(location, message); @@ -216,14 +215,7 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool take_snapshot) { i::V8::SetFatalError(); FatalErrorCallback callback = GetFatalErrorHandler(); const char* message = "Allocation failed - process out of memory"; - { - if (isolate->IsInitialized()) { - LEAVE_V8(isolate); - callback(location, message); - } else { - callback(location, message); - } - } + callback(location, message); // If the callback returns, we stop execution. UNREACHABLE(); } @@ -1909,7 +1901,8 @@ v8::TryCatch::TryCatch() is_verbose_(false), can_continue_(true), capture_message_(true), - rethrow_(false) { + rethrow_(false), + has_terminated_(false) { isolate_->RegisterTryCatchHandler(this); } @@ -1937,6 +1930,11 @@ bool v8::TryCatch::CanContinue() const { } +bool v8::TryCatch::HasTerminated() const { + return has_terminated_; +} + + v8::Handle<v8::Value> v8::TryCatch::ReThrow() { if (!HasCaught()) return v8::Local<v8::Value>(); rethrow_ = true; @@ -2748,6 +2746,15 @@ void v8::Array::CheckCast(Value* that) { } +void v8::ArrayBuffer::CheckCast(Value* that) { + if (IsDeadCheck(i::Isolate::Current(), "v8::ArrayBuffer::Cast()")) return; + i::Handle<i::Object> obj = Utils::OpenHandle(that); + ApiCheck(obj->IsJSArrayBuffer(), + "v8::ArrayBuffer::Cast()", + "Could not convert to ArrayBuffer"); +} + + void v8::Date::CheckCast(v8::Value* that) { i::Isolate* isolate = i::Isolate::Current(); if (IsDeadCheck(isolate, "v8::Date::Cast()")) return; @@ -2984,7 +2991,7 @@ bool Value::StrictEquals(Handle<Value> that) const { double x = obj->Number(); double y = other->Number(); // Must check explicitly for NaN:s on Windows, but -0 works fine. - return x == y && !isnan(x) && !isnan(y); + return x == y && !std::isnan(x) && !std::isnan(y); } else if (*obj == *other) { // Also covers Booleans. return true; } else if (obj->IsSmi()) { @@ -4048,14 +4055,6 @@ int String::Length() const { return str->length(); } -bool String::MayContainNonAscii() const { - i::Handle<i::String> str = Utils::OpenHandle(this); - if (IsDeadCheck(str->GetIsolate(), "v8::String::MayContainNonAscii()")) { - return false; - } - return !str->HasOnlyAsciiChars(); -} - bool String::IsOneByte() const { i::Handle<i::String> str = Utils::OpenHandle(this); @@ -4509,25 +4508,6 @@ int String::WriteAscii(char* buffer, FlattenString(str); // Flatten the string for efficiency. } - if (str->HasOnlyAsciiChars()) { - // WriteToFlat is faster than using the StringCharacterStream. - if (length == -1) length = str->length() + 1; - int len = i::Min(length, str->length() - start); - i::String::WriteToFlat(*str, - reinterpret_cast<uint8_t*>(buffer), - start, - start + len); - if (!(options & PRESERVE_ASCII_NULL)) { - for (int i = 0; i < len; i++) { - if (buffer[i] == '\0') buffer[i] = ' '; - } - } - if (!(options & NO_NULL_TERMINATION) && length > len) { - buffer[len] = '\0'; - } - return len; - } - int end = length; if ((length == -1) || (length > str->length() - start)) { end = str->length() - start; @@ -5283,78 +5263,131 @@ Local<String> v8::String::Empty() { } -Local<String> v8::String::New(const char* data, int length) { - i::Isolate* isolate = i::Isolate::Current(); - EnsureInitializedForIsolate(isolate, "v8::String::New()"); - LOG_API(isolate, "String::New(char)"); - if (length == 0) return Empty(); - ENTER_V8(isolate); - if (length == -1) length = i::StrLength(data); - i::Handle<i::String> result = - isolate->factory()->NewStringFromUtf8( - i::Vector<const char>(data, length)); - return Utils::ToLocal(result); -} - +// anonymous namespace for string creation helper functions +namespace { -Local<String> v8::String::Concat(Handle<String> left, Handle<String> right) { - i::Handle<i::String> left_string = Utils::OpenHandle(*left); - i::Isolate* isolate = left_string->GetIsolate(); - EnsureInitializedForIsolate(isolate, "v8::String::New()"); - LOG_API(isolate, "String::New(char)"); - ENTER_V8(isolate); - i::Handle<i::String> right_string = Utils::OpenHandle(*right); - i::Handle<i::String> result = isolate->factory()->NewConsString(left_string, - right_string); - return Utils::ToLocal(result); +inline int StringLength(const char* string) { + return i::StrLength(string); } -Local<String> v8::String::NewUndetectable(const char* data, int length) { - i::Isolate* isolate = i::Isolate::Current(); - EnsureInitializedForIsolate(isolate, "v8::String::NewUndetectable()"); - LOG_API(isolate, "String::NewUndetectable(char)"); - ENTER_V8(isolate); - if (length == -1) length = i::StrLength(data); - i::Handle<i::String> result = - isolate->factory()->NewStringFromUtf8( - i::Vector<const char>(data, length)); - result->MarkAsUndetectable(); - return Utils::ToLocal(result); +inline int StringLength(const uint8_t* string) { + return i::StrLength(reinterpret_cast<const char*>(string)); } -static int TwoByteStringLength(const uint16_t* data) { +inline int StringLength(const uint16_t* string) { int length = 0; - while (data[length] != '\0') length++; + while (string[length] != '\0') + length++; return length; } -Local<String> v8::String::New(const uint16_t* data, int length) { - i::Isolate* isolate = i::Isolate::Current(); - EnsureInitializedForIsolate(isolate, "v8::String::New()"); - LOG_API(isolate, "String::New(uint16_)"); - if (length == 0) return Empty(); +inline i::Handle<i::String> NewString(i::Factory* factory, + String::NewStringType type, + i::Vector<const char> string) { + if (type ==String::kInternalizedString) { + return factory->InternalizeUtf8String(string); + } + return factory->NewStringFromUtf8(string); +} + + +inline i::Handle<i::String> NewString(i::Factory* factory, + String::NewStringType type, + i::Vector<const uint8_t> string) { + if (type == String::kInternalizedString) { + return factory->InternalizeOneByteString(string); + } + return factory->NewStringFromOneByte(string); +} + + +inline i::Handle<i::String> NewString(i::Factory* factory, + String::NewStringType type, + i::Vector<const uint16_t> string) { + if (type == String::kInternalizedString) { + return factory->InternalizeTwoByteString(string); + } + return factory->NewStringFromTwoByte(string); +} + + +template<typename Char> +inline Local<String> NewString(Isolate* v8_isolate, + const char* location, + const char* env, + const Char* data, + String::NewStringType type, + int length) { + i::Isolate* isolate = reinterpret_cast<internal::Isolate*>(v8_isolate); + EnsureInitializedForIsolate(isolate, location); + LOG_API(isolate, env); + if (length == 0 && type != String::kUndetectableString) { + return String::Empty(); + } ENTER_V8(isolate); - if (length == -1) length = TwoByteStringLength(data); - i::Handle<i::String> result = - isolate->factory()->NewStringFromTwoByte( - i::Vector<const uint16_t>(data, length)); + if (length == -1) length = StringLength(data); + i::Handle<i::String> result = NewString( + isolate->factory(), type, i::Vector<const Char>(data, length)); + if (type == String::kUndetectableString) { + result->MarkAsUndetectable(); + } return Utils::ToLocal(result); } +} // anonymous namespace -Local<String> v8::String::NewUndetectable(const uint16_t* data, int length) { - i::Isolate* isolate = i::Isolate::Current(); - EnsureInitializedForIsolate(isolate, "v8::String::NewUndetectable()"); - LOG_API(isolate, "String::NewUndetectable(uint16_)"); + +Local<String> String::NewFromUtf8(Isolate* isolate, + const char* data, + NewStringType type, + int length) { + return NewString(isolate, + "v8::String::NewFromUtf8()", + "String::NewFromUtf8", + data, + type, + length); +} + + +Local<String> String::NewFromOneByte(Isolate* isolate, + const uint8_t* data, + NewStringType type, + int length) { + return NewString(isolate, + "v8::String::NewFromOneByte()", + "String::NewFromOneByte", + data, + type, + length); +} + + +Local<String> String::NewFromTwoByte(Isolate* isolate, + const uint16_t* data, + NewStringType type, + int length) { + return NewString(isolate, + "v8::String::NewFromTwoByte()", + "String::NewFromTwoByte", + data, + type, + length); +} + + +Local<String> v8::String::Concat(Handle<String> left, Handle<String> right) { + i::Handle<i::String> left_string = Utils::OpenHandle(*left); + i::Isolate* isolate = left_string->GetIsolate(); + EnsureInitializedForIsolate(isolate, "v8::String::New()"); + LOG_API(isolate, "String::New(char)"); ENTER_V8(isolate); - if (length == -1) length = TwoByteStringLength(data); - i::Handle<i::String> result = - isolate->factory()->NewStringFromTwoByte( - i::Vector<const uint16_t>(data, length)); - result->MarkAsUndetectable(); + i::Handle<i::String> right_string = Utils::OpenHandle(*right); + i::Handle<i::String> result = isolate->factory()->NewConsString(left_string, + right_string); return Utils::ToLocal(result); } @@ -5568,7 +5601,7 @@ Local<v8::Value> v8::Date::New(double time) { i::Isolate* isolate = i::Isolate::Current(); EnsureInitializedForIsolate(isolate, "v8::Date::New()"); LOG_API(isolate, "Date::New"); - if (isnan(time)) { + if (std::isnan(time)) { // Introduce only canonical NaN value into the VM, to avoid signaling NaNs. time = i::OS::nan_value(); } @@ -5733,15 +5766,43 @@ Local<Object> Array::CloneElementAt(uint32_t index) { } -Local<String> v8::String::NewSymbol(const char* data, int length) { +size_t v8::ArrayBuffer::ByteLength() const { + i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); + if (IsDeadCheck(isolate, "v8::ArrayBuffer::ByteLength()")) return 0; + i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(this); + return static_cast<size_t>(obj->byte_length()->Number()); +} + + +void* v8::ArrayBuffer::Data() const { + i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); + if (IsDeadCheck(isolate, "v8::ArrayBuffer::Data()")) return 0; + i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(this); + return obj->backing_store(); +} + + +Local<ArrayBuffer> v8::ArrayBuffer::New(size_t byte_length) { i::Isolate* isolate = i::Isolate::Current(); - EnsureInitializedForIsolate(isolate, "v8::String::NewSymbol()"); - LOG_API(isolate, "String::NewSymbol(char)"); + EnsureInitializedForIsolate(isolate, "v8::ArrayBuffer::New(size_t)"); + LOG_API(isolate, "v8::ArrayBuffer::New(size_t)"); ENTER_V8(isolate); - if (length == -1) length = i::StrLength(data); - i::Handle<i::String> result = isolate->factory()->InternalizeUtf8String( - i::Vector<const char>(data, length)); - return Utils::ToLocal(result); + i::Handle<i::JSArrayBuffer> obj = + isolate->factory()->NewJSArrayBuffer(); + i::Runtime::SetupArrayBufferAllocatingData(isolate, obj, byte_length); + return Utils::ToLocal(obj); +} + + +Local<ArrayBuffer> v8::ArrayBuffer::New(void* data, size_t byte_length) { + i::Isolate* isolate = i::Isolate::Current(); + EnsureInitializedForIsolate(isolate, "v8::ArrayBuffer::New(void*, size_t)"); + LOG_API(isolate, "v8::ArrayBuffer::New(void*, size_t)"); + ENTER_V8(isolate); + i::Handle<i::JSArrayBuffer> obj = + isolate->factory()->NewJSArrayBuffer(); + i::Runtime::SetupArrayBuffer(isolate, obj, data, byte_length); + return Utils::ToLocal(obj); } @@ -5772,7 +5833,7 @@ Local<Symbol> v8::Symbol::New(Isolate* isolate, const char* data, int length) { Local<Number> v8::Number::New(double value) { i::Isolate* isolate = i::Isolate::Current(); EnsureInitializedForIsolate(isolate, "v8::Number::New()"); - if (isnan(value)) { + if (std::isnan(value)) { // Introduce only canonical NaN value into the VM, to avoid signaling NaNs. value = i::OS::nan_value(); } @@ -5981,6 +6042,31 @@ v8::Local<v8::Context> Isolate::GetCurrentContext() { } +void Isolate::SetObjectGroupId(const Persistent<Value>& object, + UniqueId id) { + i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(this); + internal_isolate->global_handles()->SetObjectGroupId( + reinterpret_cast<i::Object**>(*object), id); +} + + +void Isolate::SetReferenceFromGroup(UniqueId id, + const Persistent<Value>& object) { + i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(this); + internal_isolate->global_handles() + ->SetReferenceFromGroup(id, reinterpret_cast<i::Object**>(*object)); +} + + +void Isolate::SetReference(const Persistent<Object>& parent, + const Persistent<Value>& child) { + i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(this); + internal_isolate->global_handles()->SetReference( + i::Handle<i::HeapObject>::cast(Utils::OpenHandle(*parent)).location(), + reinterpret_cast<i::Object**>(*child)); +} + + void V8::SetGlobalGCPrologueCallback(GCCallback callback) { i::Isolate* isolate = i::Isolate::Current(); if (IsDeadCheck(isolate, "v8::V8::SetGlobalGCPrologueCallback()")) return; @@ -6116,6 +6202,12 @@ bool V8::IsExecutionTerminating(Isolate* isolate) { } +void V8::CancelTerminateExecution(Isolate* isolate) { + i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate); + i_isolate->stack_guard()->CancelTerminateExecution(); +} + + Isolate* Isolate::GetCurrent() { i::Isolate* isolate = i::Isolate::UncheckedCurrent(); return reinterpret_cast<Isolate*>(isolate); @@ -7174,6 +7266,12 @@ size_t HeapProfiler::GetProfilerMemorySize() { } +void HeapProfiler::SetRetainedObjectInfo(UniqueId id, + RetainedObjectInfo* info) { + reinterpret_cast<i::HeapProfiler*>(this)->SetRetainedObjectInfo(id, info); +} + + v8::Testing::StressType internal::Testing::stress_type_ = v8::Testing::kStressTypeOpt; diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h index 0cd16f1f01..f62541dc03 100644 --- a/deps/v8/src/api.h +++ b/deps/v8/src/api.h @@ -170,6 +170,7 @@ class RegisteredExtension { V(RegExp, JSRegExp) \ V(Object, JSObject) \ V(Array, JSArray) \ + V(ArrayBuffer, JSArrayBuffer) \ V(String, String) \ V(Symbol, Symbol) \ V(Script, Object) \ @@ -205,6 +206,8 @@ class Utils { v8::internal::Handle<v8::internal::JSObject> obj); static inline Local<Array> ToLocal( v8::internal::Handle<v8::internal::JSArray> obj); + static inline Local<ArrayBuffer> ToLocal( + v8::internal::Handle<v8::internal::JSArrayBuffer> obj); static inline Local<Message> MessageToLocal( v8::internal::Handle<v8::internal::Object> obj); static inline Local<StackTrace> StackTraceToLocal( @@ -275,6 +278,7 @@ MAKE_TO_LOCAL(ToLocal, Symbol, Symbol) MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp) MAKE_TO_LOCAL(ToLocal, JSObject, Object) MAKE_TO_LOCAL(ToLocal, JSArray, Array) +MAKE_TO_LOCAL(ToLocal, JSArrayBuffer, ArrayBuffer) MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate) MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate) MAKE_TO_LOCAL(ToLocal, SignatureInfo, Signature) diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc index b473c6b52b..b39d9ee122 100644 --- a/deps/v8/src/arm/assembler-arm.cc +++ b/deps/v8/src/arm/assembler-arm.cc @@ -305,16 +305,20 @@ void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) { // See assembler-arm-inl.h for inlined constructors Operand::Operand(Handle<Object> handle) { +#ifdef DEBUG + Isolate* isolate = Isolate::Current(); +#endif + ALLOW_HANDLE_DEREF(isolate, "using and embedding raw address"); rm_ = no_reg; // Verify all Objects referred by code are NOT in new space. Object* obj = *handle; - ASSERT(!HEAP->InNewSpace(obj)); + ASSERT(!isolate->heap()->InNewSpace(obj)); if (obj->IsHeapObject()) { imm32_ = reinterpret_cast<intptr_t>(handle.location()); rmode_ = RelocInfo::EMBEDDED_OBJECT; } else { // no relocation needed - imm32_ = reinterpret_cast<intptr_t>(obj); + imm32_ = reinterpret_cast<intptr_t>(obj); rmode_ = RelocInfo::NONE32; } } diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc index ebb9e1235f..3cc2797e94 100644 --- a/deps/v8/src/arm/builtins-arm.cc +++ b/deps/v8/src/arm/builtins-arm.cc @@ -306,8 +306,7 @@ static void AllocateJSArray(MacroAssembler* masm, // entering the generic code. In both cases argc in r0 needs to be preserved. // Both registers are preserved by this code so no need to differentiate between // construct call and normal call. -static void ArrayNativeCode(MacroAssembler* masm, - Label* call_generic_code) { +void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code) { Counters* counters = masm->isolate()->counters(); Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array, has_non_smi_element, finish, cant_transition_map, not_double; @@ -532,7 +531,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) { } -void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) { +void Builtins::Generate_CommonArrayConstructCode(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- r0 : number of arguments // -- r1 : constructor function @@ -550,51 +549,17 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) { __ Assert(ne, "Unexpected initial map for Array function"); __ CompareObjectType(r3, r3, r4, MAP_TYPE); __ Assert(eq, "Unexpected initial map for Array function"); - - if (FLAG_optimize_constructed_arrays) { - // We should either have undefined in r2 or a valid jsglobalpropertycell - Label okay_here; - Handle<Object> undefined_sentinel( - masm->isolate()->heap()->undefined_value(), masm->isolate()); - Handle<Map> global_property_cell_map( - masm->isolate()->heap()->global_property_cell_map()); - __ cmp(r2, Operand(undefined_sentinel)); - __ b(eq, &okay_here); - __ ldr(r3, FieldMemOperand(r2, 0)); - __ cmp(r3, Operand(global_property_cell_map)); - __ Assert(eq, "Expected property cell in register ebx"); - __ bind(&okay_here); - } - } - - if (FLAG_optimize_constructed_arrays) { - Label not_zero_case, not_one_case; - __ tst(r0, r0); - __ b(ne, ¬_zero_case); - ArrayNoArgumentConstructorStub no_argument_stub; - __ TailCallStub(&no_argument_stub); - - __ bind(¬_zero_case); - __ cmp(r0, Operand(1)); - __ b(gt, ¬_one_case); - ArraySingleArgumentConstructorStub single_argument_stub; - __ TailCallStub(&single_argument_stub); - - __ bind(¬_one_case); - ArrayNArgumentsConstructorStub n_argument_stub; - __ TailCallStub(&n_argument_stub); - } else { - Label generic_constructor; - // Run the native code for the Array function called as a constructor. - ArrayNativeCode(masm, &generic_constructor); - - // Jump to the generic construct code in case the specialized code cannot - // handle the construction. - __ bind(&generic_constructor); - Handle<Code> generic_construct_stub = - masm->isolate()->builtins()->JSConstructStubGeneric(); - __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); } + Label generic_constructor; + // Run the native code for the Array function called as a constructor. + ArrayNativeCode(masm, &generic_constructor); + + // Jump to the generic construct code in case the specialized code cannot + // handle the construction. + __ bind(&generic_constructor); + Handle<Code> generic_construct_stub = + masm->isolate()->builtins()->JSConstructStubGeneric(); + __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); } diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc index ef2dbb3892..cc6caca3d8 100644 --- a/deps/v8/src/arm/code-stubs-arm.cc +++ b/deps/v8/src/arm/code-stubs-arm.cc @@ -96,16 +96,33 @@ void TransitionElementsKindStub::InitializeInterfaceDescriptor( } -static void InitializeArrayConstructorDescriptor(Isolate* isolate, +void CompareNilICStub::InitializeInterfaceDescriptor( + Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { + static Register registers[] = { r0 }; + descriptor->register_param_count_ = 1; + descriptor->register_params_ = registers; + descriptor->deoptimization_handler_ = + FUNCTION_ADDR(CompareNilIC_Miss); + descriptor->miss_handler_ = + ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate); +} + + +static void InitializeArrayConstructorDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor, + int constant_stack_parameter_count) { // register state - // r1 -- constructor function + // r0 -- number of arguments // r2 -- type info cell with elements kind - // r0 -- number of arguments to the constructor function - static Register registers[] = { r1, r2 }; - descriptor->register_param_count_ = 2; - // stack param count needs (constructor pointer, and single argument) - descriptor->stack_parameter_count_ = &r0; + static Register registers[] = { r2 }; + descriptor->register_param_count_ = 1; + if (constant_stack_parameter_count != 0) { + // stack param count needs (constructor pointer, and single argument) + descriptor->stack_parameter_count_ = &r0; + } + descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count; descriptor->register_params_ = registers; descriptor->function_mode_ = JS_FUNCTION_STUB_MODE; descriptor->deoptimization_handler_ = @@ -116,21 +133,21 @@ static void InitializeArrayConstructorDescriptor(Isolate* isolate, void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor( Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeArrayConstructorDescriptor(isolate, descriptor); + InitializeArrayConstructorDescriptor(isolate, descriptor, 0); } void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor( Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeArrayConstructorDescriptor(isolate, descriptor); + InitializeArrayConstructorDescriptor(isolate, descriptor, 1); } void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeArrayConstructorDescriptor(isolate, descriptor); + InitializeArrayConstructorDescriptor(isolate, descriptor, -1); } @@ -161,6 +178,30 @@ static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand, } +void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) { + // Update the static counter each time a new code stub is generated. + Isolate* isolate = masm->isolate(); + isolate->counters()->code_stubs()->Increment(); + + CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate); + int param_count = descriptor->register_param_count_; + { + // Call the runtime system in a fresh internal frame. + FrameScope scope(masm, StackFrame::INTERNAL); + ASSERT(descriptor->register_param_count_ == 0 || + r0.is(descriptor->register_params_[param_count - 1])); + // Push arguments + for (int i = 0; i < param_count; ++i) { + __ push(descriptor->register_params_[i]); + } + ExternalReference miss = descriptor->miss_handler_; + __ CallExternalReference(miss, descriptor->register_param_count_); + } + + __ Ret(); +} + + void ToNumberStub::Generate(MacroAssembler* masm) { // The ToNumber stub takes one argument in eax. Label check_heap_number, call_builtin; @@ -506,318 +547,6 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) { } -void FloatingPointHelper::LoadSmis(MacroAssembler* masm, - FloatingPointHelper::Destination destination, - Register scratch1, - Register scratch2) { - __ mov(scratch1, Operand(r0, ASR, kSmiTagSize)); - __ vmov(d7.high(), scratch1); - __ vcvt_f64_s32(d7, d7.high()); - __ mov(scratch1, Operand(r1, ASR, kSmiTagSize)); - __ vmov(d6.high(), scratch1); - __ vcvt_f64_s32(d6, d6.high()); - if (destination == kCoreRegisters) { - __ vmov(r2, r3, d7); - __ vmov(r0, r1, d6); - } -} - - -void FloatingPointHelper::LoadNumber(MacroAssembler* masm, - Destination destination, - Register object, - DwVfpRegister dst, - Register dst1, - Register dst2, - Register heap_number_map, - Register scratch1, - Register scratch2, - Label* not_number) { - __ AssertRootValue(heap_number_map, - Heap::kHeapNumberMapRootIndex, - "HeapNumberMap register clobbered."); - - Label is_smi, done; - - // Smi-check - __ UntagAndJumpIfSmi(scratch1, object, &is_smi); - // Heap number check - __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); - - // Handle loading a double from a heap number. - if (destination == kVFPRegisters) { - // Load the double from tagged HeapNumber to double register. - __ sub(scratch1, object, Operand(kHeapObjectTag)); - __ vldr(dst, scratch1, HeapNumber::kValueOffset); - } else { - ASSERT(destination == kCoreRegisters); - // Load the double from heap number to dst1 and dst2 in double format. - __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset)); - } - __ jmp(&done); - - // Handle loading a double from a smi. - __ bind(&is_smi); - // Convert smi to double using VFP instructions. - __ vmov(dst.high(), scratch1); - __ vcvt_f64_s32(dst, dst.high()); - if (destination == kCoreRegisters) { - // Load the converted smi to dst1 and dst2 in double format. - __ vmov(dst1, dst2, dst); - } - - __ bind(&done); -} - - -void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm, - Register object, - Register dst, - Register heap_number_map, - Register scratch1, - Register scratch2, - Register scratch3, - DwVfpRegister double_scratch1, - DwVfpRegister double_scratch2, - Label* not_number) { - Label done; - __ AssertRootValue(heap_number_map, - Heap::kHeapNumberMapRootIndex, - "HeapNumberMap register clobbered."); - - __ UntagAndJumpIfSmi(dst, object, &done); - __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset)); - __ cmp(scratch1, heap_number_map); - __ b(ne, not_number); - __ ECMAConvertNumberToInt32(object, dst, - scratch1, scratch2, scratch3, - double_scratch1, double_scratch2); - __ bind(&done); -} - - -void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm, - Register int_scratch, - Destination destination, - DwVfpRegister double_dst, - Register dst_mantissa, - Register dst_exponent, - Register scratch2, - SwVfpRegister single_scratch) { - ASSERT(!int_scratch.is(scratch2)); - ASSERT(!int_scratch.is(dst_mantissa)); - ASSERT(!int_scratch.is(dst_exponent)); - - Label done; - - __ vmov(single_scratch, int_scratch); - __ vcvt_f64_s32(double_dst, single_scratch); - if (destination == kCoreRegisters) { - __ vmov(dst_mantissa, dst_exponent, double_dst); - } - __ bind(&done); -} - - -void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, - Register object, - Destination destination, - DwVfpRegister double_dst, - DwVfpRegister double_scratch, - Register dst_mantissa, - Register dst_exponent, - Register heap_number_map, - Register scratch1, - Register scratch2, - SwVfpRegister single_scratch, - Label* not_int32) { - ASSERT(!scratch1.is(object) && !scratch2.is(object)); - ASSERT(!scratch1.is(scratch2)); - ASSERT(!heap_number_map.is(object) && - !heap_number_map.is(scratch1) && - !heap_number_map.is(scratch2)); - - Label done, obj_is_not_smi; - - __ JumpIfNotSmi(object, &obj_is_not_smi); - __ SmiUntag(scratch1, object); - ConvertIntToDouble(masm, scratch1, destination, double_dst, dst_mantissa, - dst_exponent, scratch2, single_scratch); - __ b(&done); - - __ bind(&obj_is_not_smi); - __ AssertRootValue(heap_number_map, - Heap::kHeapNumberMapRootIndex, - "HeapNumberMap register clobbered."); - __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); - - // Load the number. - // Load the double value. - __ sub(scratch1, object, Operand(kHeapObjectTag)); - __ vldr(double_dst, scratch1, HeapNumber::kValueOffset); - - __ TestDoubleIsInt32(double_dst, double_scratch); - // Jump to not_int32 if the operation did not succeed. - __ b(ne, not_int32); - - if (destination == kCoreRegisters) { - __ vmov(dst_mantissa, dst_exponent, double_dst); - } - __ bind(&done); -} - - -void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, - Register object, - Register dst, - Register heap_number_map, - Register scratch1, - Register scratch2, - Register scratch3, - DwVfpRegister double_scratch0, - DwVfpRegister double_scratch1, - Label* not_int32) { - ASSERT(!dst.is(object)); - ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object)); - ASSERT(!scratch1.is(scratch2) && - !scratch1.is(scratch3) && - !scratch2.is(scratch3)); - - Label done, maybe_undefined; - - __ UntagAndJumpIfSmi(dst, object, &done); - - __ AssertRootValue(heap_number_map, - Heap::kHeapNumberMapRootIndex, - "HeapNumberMap register clobbered."); - - __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined); - - // Object is a heap number. - // Convert the floating point value to a 32-bit integer. - // Load the double value. - __ sub(scratch1, object, Operand(kHeapObjectTag)); - __ vldr(double_scratch0, scratch1, HeapNumber::kValueOffset); - - __ TryDoubleToInt32Exact(dst, double_scratch0, double_scratch1); - // Jump to not_int32 if the operation did not succeed. - __ b(ne, not_int32); - __ b(&done); - - __ bind(&maybe_undefined); - __ CompareRoot(object, Heap::kUndefinedValueRootIndex); - __ b(ne, not_int32); - // |undefined| is truncated to 0. - __ mov(dst, Operand(Smi::FromInt(0))); - // Fall through. - - __ bind(&done); -} - - -void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm, - Register src_exponent, - Register src_mantissa, - Register dst, - Register scratch, - Label* not_int32) { - // Get exponent alone in scratch. - __ Ubfx(scratch, - src_exponent, - HeapNumber::kExponentShift, - HeapNumber::kExponentBits); - - // Substract the bias from the exponent. - __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias), SetCC); - - // src1: higher (exponent) part of the double value. - // src2: lower (mantissa) part of the double value. - // scratch: unbiased exponent. - - // Fast cases. Check for obvious non 32-bit integer values. - // Negative exponent cannot yield 32-bit integers. - __ b(mi, not_int32); - // Exponent greater than 31 cannot yield 32-bit integers. - // Also, a positive value with an exponent equal to 31 is outside of the - // signed 32-bit integer range. - // Another way to put it is that if (exponent - signbit) > 30 then the - // number cannot be represented as an int32. - Register tmp = dst; - __ sub(tmp, scratch, Operand(src_exponent, LSR, 31)); - __ cmp(tmp, Operand(30)); - __ b(gt, not_int32); - // - Bits [21:0] in the mantissa are not null. - __ tst(src_mantissa, Operand(0x3fffff)); - __ b(ne, not_int32); - - // Otherwise the exponent needs to be big enough to shift left all the - // non zero bits left. So we need the (30 - exponent) last bits of the - // 31 higher bits of the mantissa to be null. - // Because bits [21:0] are null, we can check instead that the - // (32 - exponent) last bits of the 32 higher bits of the mantissa are null. - - // Get the 32 higher bits of the mantissa in dst. - __ Ubfx(dst, - src_mantissa, - HeapNumber::kMantissaBitsInTopWord, - 32 - HeapNumber::kMantissaBitsInTopWord); - __ orr(dst, - dst, - Operand(src_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord)); - - // Create the mask and test the lower bits (of the higher bits). - __ rsb(scratch, scratch, Operand(32)); - __ mov(src_mantissa, Operand(1)); - __ mov(src_exponent, Operand(src_mantissa, LSL, scratch)); - __ sub(src_exponent, src_exponent, Operand(1)); - __ tst(dst, src_exponent); - __ b(ne, not_int32); -} - - -void FloatingPointHelper::CallCCodeForDoubleOperation( - MacroAssembler* masm, - Token::Value op, - Register heap_number_result, - Register scratch) { - // Using core registers: - // r0: Left value (least significant part of mantissa). - // r1: Left value (sign, exponent, top of mantissa). - // r2: Right value (least significant part of mantissa). - // r3: Right value (sign, exponent, top of mantissa). - - // Assert that heap_number_result is callee-saved. - // We currently always use r5 to pass it. - ASSERT(heap_number_result.is(r5)); - - // Push the current return address before the C call. Return will be - // through pop(pc) below. - __ push(lr); - __ PrepareCallCFunction(0, 2, scratch); - if (masm->use_eabi_hardfloat()) { - __ vmov(d0, r0, r1); - __ vmov(d1, r2, r3); - } - { - AllowExternalCallThatCantCauseGC scope(masm); - __ CallCFunction( - ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2); - } - // Store answer in the overwritable heap number. Double returned in - // registers r0 and r1 or in d0. - if (masm->use_eabi_hardfloat()) { - __ vstr(d0, - FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); - } else { - __ Strd(r0, r1, FieldMemOperand(heap_number_result, - HeapNumber::kValueOffset)); - } - // Place heap_number_result in r0 and return to the pushed return address. - __ mov(r0, Operand(heap_number_result)); - __ pop(pc); -} - - bool WriteInt32ToHeapNumberStub::IsPregenerated() { // These variants are compiled ahead of time. See next method. if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) { @@ -1055,57 +784,6 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, } -void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond) { - bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); - Register rhs_exponent = exp_first ? r0 : r1; - Register lhs_exponent = exp_first ? r2 : r3; - Register rhs_mantissa = exp_first ? r1 : r0; - Register lhs_mantissa = exp_first ? r3 : r2; - Label one_is_nan, neither_is_nan; - - __ Sbfx(r4, - lhs_exponent, - HeapNumber::kExponentShift, - HeapNumber::kExponentBits); - // NaNs have all-one exponents so they sign extend to -1. - __ cmp(r4, Operand(-1)); - __ b(ne, lhs_not_nan); - __ mov(r4, - Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord), - SetCC); - __ b(ne, &one_is_nan); - __ cmp(lhs_mantissa, Operand::Zero()); - __ b(ne, &one_is_nan); - - __ bind(lhs_not_nan); - __ Sbfx(r4, - rhs_exponent, - HeapNumber::kExponentShift, - HeapNumber::kExponentBits); - // NaNs have all-one exponents so they sign extend to -1. - __ cmp(r4, Operand(-1)); - __ b(ne, &neither_is_nan); - __ mov(r4, - Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord), - SetCC); - __ b(ne, &one_is_nan); - __ cmp(rhs_mantissa, Operand::Zero()); - __ b(eq, &neither_is_nan); - - __ bind(&one_is_nan); - // NaN comparisons always fail. - // Load whatever we need in r0 to make the comparison fail. - if (cond == lt || cond == le) { - __ mov(r0, Operand(GREATER)); - } else { - __ mov(r0, Operand(LESS)); - } - __ Ret(); - - __ bind(&neither_is_nan); -} - - // See comment at call site. static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register lhs, @@ -1627,33 +1305,19 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { const Register scratch = r1; if (save_doubles_ == kSaveFPRegs) { - // Check CPU flags for number of registers, setting the Z condition flag. - __ CheckFor32DRegs(scratch); - - __ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kMaxNumRegisters)); - for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; i++) { - DwVfpRegister reg = DwVfpRegister::from_code(i); - __ vstr(reg, MemOperand(sp, i * kDoubleSize), i < 16 ? al : ne); - } + __ SaveFPRegs(sp, scratch); } const int argument_count = 1; const int fp_argument_count = 0; AllowExternalCallThatCantCauseGC scope(masm); __ PrepareCallCFunction(argument_count, fp_argument_count, scratch); - __ mov(r0, Operand(ExternalReference::isolate_address())); + __ mov(r0, Operand(ExternalReference::isolate_address(masm->isolate()))); __ CallCFunction( ExternalReference::store_buffer_overflow_function(masm->isolate()), argument_count); if (save_doubles_ == kSaveFPRegs) { - // Check CPU flags for number of registers, setting the Z condition flag. - __ CheckFor32DRegs(scratch); - - for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; i++) { - DwVfpRegister reg = DwVfpRegister::from_code(i); - __ vldr(reg, MemOperand(sp, i * kDoubleSize), i < 16 ? al : ne); - } - __ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kMaxNumRegisters)); + __ RestoreFPRegs(sp, scratch); } __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0). } @@ -1835,8 +1499,10 @@ void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm, void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow) { EmitCheckForHeapNumber(masm, r0, r1, r6, slow); + // Convert the heap number in r0 to an untagged integer in r1. - __ ECMAConvertNumberToInt32(r0, r1, r2, r3, r4, d0, d1); + __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset)); + __ ECMAToInt32(r1, d0, r2, r3, r4, d1); // Do the bitwise operation and check if the result fits in a smi. Label try_float; @@ -1928,6 +1594,50 @@ void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) { } +// Generates code to call a C function to do a double operation. +// This code never falls through, but returns with a heap number containing +// the result in r0. +// Register heapnumber_result must be a heap number in which the +// result of the operation will be stored. +// Requires the following layout on entry: +// d0: Left value. +// d1: Right value. +// If soft float ABI, use also r0, r1, r2, r3. +static void CallCCodeForDoubleOperation(MacroAssembler* masm, + Token::Value op, + Register heap_number_result, + Register scratch) { + // Assert that heap_number_result is callee-saved. + // We currently always use r5 to pass it. + ASSERT(heap_number_result.is(r5)); + + // Push the current return address before the C call. Return will be + // through pop(pc) below. + __ push(lr); + __ PrepareCallCFunction(0, 2, scratch); + if (!masm->use_eabi_hardfloat()) { + __ vmov(r0, r1, d0); + __ vmov(r2, r3, d1); + } + { + AllowExternalCallThatCantCauseGC scope(masm); + __ CallCFunction( + ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2); + } + // Store answer in the overwritable heap number. Double returned in + // registers r0 and r1 or in d0. + if (masm->use_eabi_hardfloat()) { + __ vstr(d0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); + } else { + __ Strd(r0, r1, + FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); + } + // Place heap_number_result in r0 and return to the pushed return address. + __ mov(r0, Operand(heap_number_result)); + __ pop(pc); +} + + void BinaryOpStub::Initialize() { platform_specific_bit_ = true; // VFP2 is a base requirement for V8 } @@ -2205,64 +1915,56 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, case Token::MUL: case Token::DIV: case Token::MOD: { - // Load left and right operands into d6 and d7 or r0/r1 and r2/r3 - // depending on whether VFP3 is available or not. - FloatingPointHelper::Destination destination = - op != Token::MOD ? - FloatingPointHelper::kVFPRegisters : - FloatingPointHelper::kCoreRegisters; - // Allocate new heap number for result. Register result = r5; BinaryOpStub_GenerateHeapResultAllocation( masm, result, heap_number_map, scratch1, scratch2, gc_required, mode); - // Load the operands. + // Load left and right operands into d0 and d1. if (smi_operands) { - FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2); + __ SmiUntag(scratch1, right); + __ vmov(d1.high(), scratch1); + __ vcvt_f64_s32(d1, d1.high()); + __ SmiUntag(scratch1, left); + __ vmov(d0.high(), scratch1); + __ vcvt_f64_s32(d0, d0.high()); } else { - // Load right operand to d7 or r2/r3. + // Load right operand into d1. if (right_type == BinaryOpIC::INT32) { - FloatingPointHelper::LoadNumberAsInt32Double( - masm, right, destination, d7, d8, r2, r3, heap_number_map, - scratch1, scratch2, s0, miss); + __ LoadNumberAsInt32Double( + right, d1, heap_number_map, scratch1, d8, miss); } else { Label* fail = (right_type == BinaryOpIC::NUMBER) ? miss : not_numbers; - FloatingPointHelper::LoadNumber( - masm, destination, right, d7, r2, r3, heap_number_map, - scratch1, scratch2, fail); + __ LoadNumber(right, d1, heap_number_map, scratch1, fail); } - // Load left operand to d6 or r0/r1. This keeps r0/r1 intact if it - // jumps to |miss|. + // Load left operand into d0. if (left_type == BinaryOpIC::INT32) { - FloatingPointHelper::LoadNumberAsInt32Double( - masm, left, destination, d6, d8, r0, r1, heap_number_map, - scratch1, scratch2, s0, miss); + __ LoadNumberAsInt32Double( + left, d0, heap_number_map, scratch1, d8, miss); } else { Label* fail = (left_type == BinaryOpIC::NUMBER) ? miss : not_numbers; - FloatingPointHelper::LoadNumber( - masm, destination, left, d6, r0, r1, heap_number_map, - scratch1, scratch2, fail); + __ LoadNumber( + left, d0, heap_number_map, scratch1, fail); } } // Calculate the result. - if (destination == FloatingPointHelper::kVFPRegisters) { + if (op != Token::MOD) { // Using VFP registers: - // d6: Left value - // d7: Right value + // d0: Left value + // d1: Right value switch (op) { case Token::ADD: - __ vadd(d5, d6, d7); + __ vadd(d5, d0, d1); break; case Token::SUB: - __ vsub(d5, d6, d7); + __ vsub(d5, d0, d1); break; case Token::MUL: - __ vmul(d5, d6, d7); + __ vmul(d5, d0, d1); break; case Token::DIV: - __ vdiv(d5, d6, d7); + __ vdiv(d5, d0, d1); break; default: UNREACHABLE(); @@ -2274,10 +1976,7 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, __ Ret(); } else { // Call the C function to handle the double operation. - FloatingPointHelper::CallCCodeForDoubleOperation(masm, - op, - result, - scratch1); + CallCCodeForDoubleOperation(masm, op, result, scratch1); if (FLAG_debug_code) { __ stop("Unreachable code."); } @@ -2295,26 +1994,12 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, __ SmiUntag(r2, right); } else { // Convert operands to 32-bit integers. Right in r2 and left in r3. - FloatingPointHelper::ConvertNumberToInt32(masm, - left, - r3, - heap_number_map, - scratch1, - scratch2, - scratch3, - d0, - d1, - not_numbers); - FloatingPointHelper::ConvertNumberToInt32(masm, - right, - r2, - heap_number_map, - scratch1, - scratch2, - scratch3, - d0, - d1, - not_numbers); + __ ConvertNumberToInt32( + left, r3, heap_number_map, + scratch1, scratch2, scratch3, d0, d1, not_numbers); + __ ConvertNumberToInt32( + right, r2, heap_number_map, + scratch1, scratch2, scratch3, d0, d1, not_numbers); } Label result_not_a_smi; @@ -2533,49 +2218,25 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { // Load both operands and check that they are 32-bit integer. // Jump to type transition if they are not. The registers r0 and r1 (right // and left) are preserved for the runtime call. - FloatingPointHelper::Destination destination = (op_ != Token::MOD) - ? FloatingPointHelper::kVFPRegisters - : FloatingPointHelper::kCoreRegisters; - - FloatingPointHelper::LoadNumberAsInt32Double(masm, - right, - destination, - d7, - d8, - r2, - r3, - heap_number_map, - scratch1, - scratch2, - s0, - &transition); - FloatingPointHelper::LoadNumberAsInt32Double(masm, - left, - destination, - d6, - d8, - r4, - r5, - heap_number_map, - scratch1, - scratch2, - s0, - &transition); - - if (destination == FloatingPointHelper::kVFPRegisters) { + __ LoadNumberAsInt32Double( + right, d1, heap_number_map, scratch1, d8, &transition); + __ LoadNumberAsInt32Double( + left, d0, heap_number_map, scratch1, d8, &transition); + + if (op_ != Token::MOD) { Label return_heap_number; switch (op_) { case Token::ADD: - __ vadd(d5, d6, d7); + __ vadd(d5, d0, d1); break; case Token::SUB: - __ vsub(d5, d6, d7); + __ vsub(d5, d0, d1); break; case Token::MUL: - __ vmul(d5, d6, d7); + __ vmul(d5, d0, d1); break; case Token::DIV: - __ vdiv(d5, d6, d7); + __ vdiv(d5, d0, d1); break; default: UNREACHABLE(); @@ -2601,13 +2262,15 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { __ add(scratch2, scratch1, Operand(0x40000000), SetCC); // If not try to return a heap number. __ b(mi, &return_heap_number); - // Check for minus zero. Return heap number for minus zero. + // Check for minus zero. Return heap number for minus zero if + // double results are allowed; otherwise transition. Label not_zero; __ cmp(scratch1, Operand::Zero()); __ b(ne, ¬_zero); __ vmov(scratch2, d5.high()); __ tst(scratch2, Operand(HeapNumber::kSignMask)); - __ b(ne, &return_heap_number); + __ b(ne, result_type_ <= BinaryOpIC::INT32 ? &transition + : &return_heap_number); __ bind(¬_zero); // Tag the result and return. @@ -2620,22 +2283,19 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { __ bind(&return_heap_number); // Return a heap number, or fall through to type transition or runtime // call if we can't. - if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::NUMBER - : BinaryOpIC::INT32)) { - // We are using vfp registers so r5 is available. - heap_number_result = r5; - BinaryOpStub_GenerateHeapResultAllocation(masm, - heap_number_result, - heap_number_map, - scratch1, - scratch2, - &call_runtime, - mode_); - __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); - __ vstr(d5, r0, HeapNumber::kValueOffset); - __ mov(r0, heap_number_result); - __ Ret(); - } + // We are using vfp registers so r5 is available. + heap_number_result = r5; + BinaryOpStub_GenerateHeapResultAllocation(masm, + heap_number_result, + heap_number_map, + scratch1, + scratch2, + &call_runtime, + mode_); + __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); + __ vstr(d5, r0, HeapNumber::kValueOffset); + __ mov(r0, heap_number_result); + __ Ret(); // A DIV operation expecting an integer result falls through // to type transition. @@ -2661,8 +2321,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { __ Pop(r1, r0); // Call the C function to handle the double operation. - FloatingPointHelper::CallCCodeForDoubleOperation( - masm, op_, heap_number_result, scratch1); + CallCCodeForDoubleOperation(masm, op_, heap_number_result, scratch1); if (FLAG_debug_code) { __ stop("Unreachable code."); } @@ -2682,30 +2341,13 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { case Token::SHR: case Token::SHL: { Label return_heap_number; - Register scratch3 = r5; // Convert operands to 32-bit integers. Right in r2 and left in r3. The // registers r0 and r1 (right and left) are preserved for the runtime // call. - FloatingPointHelper::LoadNumberAsInt32(masm, - left, - r3, - heap_number_map, - scratch1, - scratch2, - scratch3, - d0, - d1, - &transition); - FloatingPointHelper::LoadNumberAsInt32(masm, - right, - r2, - heap_number_map, - scratch1, - scratch2, - scratch3, - d0, - d1, - &transition); + __ LoadNumberAsInt32(left, r3, heap_number_map, + scratch1, d0, d1, &transition); + __ LoadNumberAsInt32(right, r2, heap_number_map, + scratch1, d0, d1, &transition); // The ECMA-262 standard specifies that, for shift operations, only the // 5 least significant bits of the shift value should be used. @@ -3385,6 +3027,9 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); StubFailureTrampolineStub::GenerateAheadOfTime(isolate); RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate); + if (FLAG_optimize_constructed_arrays) { + ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); + } } @@ -3477,7 +3122,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, } #endif - __ mov(r2, Operand(ExternalReference::isolate_address())); + __ mov(r2, Operand(ExternalReference::isolate_address(isolate))); // To let the GC traverse the return address of the exit frames, we need to // know where the return address is. The CEntryStub is unmovable, so @@ -4694,7 +4339,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Arguments are before that on the stack or in registers. // Argument 9 (sp[20]): Pass current isolate address. - __ mov(r0, Operand(ExternalReference::isolate_address())); + __ mov(r0, Operand(ExternalReference::isolate_address(isolate))); __ str(r0, MemOperand(sp, 5 * kPointerSize)); // Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript. @@ -5089,7 +4734,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) { TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(), LAST_FAST_ELEMENTS_KIND); __ cmp(r3, Operand(terminal_kind_sentinel)); - __ b(ne, &miss); + __ b(gt, &miss); // Make sure the function is the Array() function __ LoadArrayFunction(r3); __ cmp(r1, r3); @@ -6305,16 +5950,16 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ bind(&non_ascii); // At least one of the strings is two-byte. Check whether it happens - // to contain only ASCII characters. + // to contain only one byte characters. // r4: first instance type. // r5: second instance type. - __ tst(r4, Operand(kAsciiDataHintMask)); - __ tst(r5, Operand(kAsciiDataHintMask), ne); + __ tst(r4, Operand(kOneByteDataHintMask)); + __ tst(r5, Operand(kOneByteDataHintMask), ne); __ b(ne, &ascii_data); __ eor(r4, r4, Operand(r5)); - STATIC_ASSERT(kOneByteStringTag != 0 && kAsciiDataHintTag != 0); - __ and_(r4, r4, Operand(kOneByteStringTag | kAsciiDataHintTag)); - __ cmp(r4, Operand(kOneByteStringTag | kAsciiDataHintTag)); + STATIC_ASSERT(kOneByteStringTag != 0 && kOneByteDataHintTag != 0); + __ and_(r4, r4, Operand(kOneByteStringTag | kOneByteDataHintTag)); + __ cmp(r4, Operand(kOneByteStringTag | kOneByteDataHintTag)); __ b(eq, &ascii_data); // Allocate a two byte cons string. @@ -7170,6 +6815,9 @@ void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime( Isolate* isolate) { StoreBufferOverflowStub stub1(kDontSaveFPRegs); stub1.GetCode(isolate)->set_is_pregenerated(true); + // Hydrogen code stubs need stub2 at snapshot time. + StoreBufferOverflowStub stub2(kSaveFPRegs); + stub2.GetCode(isolate)->set_is_pregenerated(true); } @@ -7288,7 +6936,7 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) { __ Move(address, regs_.address()); __ Move(r0, regs_.object()); __ Move(r1, address); - __ mov(r2, Operand(ExternalReference::isolate_address())); + __ mov(r2, Operand(ExternalReference::isolate_address(masm->isolate()))); AllowExternalCallThatCantCauseGC scope(masm); if (mode == INCREMENTAL_COMPACTION) { @@ -7445,10 +7093,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS. __ bind(&double_elements); __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); - __ StoreNumberToDoubleElements(r0, r3, - // Overwrites all regs after this. - r5, r9, r6, r7, r2, - &slow_elements); + __ StoreNumberToDoubleElements(r0, r3, r5, r6, &slow_elements); __ Ret(); } @@ -7528,6 +7173,196 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) { __ Ret(); } + +template<class T> +static void CreateArrayDispatch(MacroAssembler* masm) { + int last_index = GetSequenceIndexFromFastElementsKind( + TERMINAL_FAST_ELEMENTS_KIND); + for (int i = 0; i <= last_index; ++i) { + Label next; + ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); + __ cmp(r3, Operand(kind)); + __ b(ne, &next); + T stub(kind); + __ TailCallStub(&stub); + __ bind(&next); + } + + // If we reached this point there is a problem. + __ Abort("Unexpected ElementsKind in array constructor"); +} + + +static void CreateArrayDispatchOneArgument(MacroAssembler* masm) { + // r2 - type info cell + // r3 - kind + // r0 - number of arguments + // r1 - constructor? + // sp[0] - last argument + ASSERT(FAST_SMI_ELEMENTS == 0); + ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); + ASSERT(FAST_ELEMENTS == 2); + ASSERT(FAST_HOLEY_ELEMENTS == 3); + ASSERT(FAST_DOUBLE_ELEMENTS == 4); + ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5); + + Handle<Object> undefined_sentinel( + masm->isolate()->heap()->undefined_value(), + masm->isolate()); + + // is the low bit set? If so, we are holey and that is good. + __ tst(r3, Operand(1)); + Label normal_sequence; + __ b(ne, &normal_sequence); + + // look at the first argument + __ ldr(r5, MemOperand(sp, 0)); + __ cmp(r5, Operand::Zero()); + __ b(eq, &normal_sequence); + + // We are going to create a holey array, but our kind is non-holey. + // Fix kind and retry + __ add(r3, r3, Operand(1)); + __ cmp(r2, Operand(undefined_sentinel)); + __ b(eq, &normal_sequence); + + // Save the resulting elements kind in type info + __ SmiTag(r3); + __ str(r3, FieldMemOperand(r2, kPointerSize)); + __ SmiUntag(r3); + + __ bind(&normal_sequence); + int last_index = GetSequenceIndexFromFastElementsKind( + TERMINAL_FAST_ELEMENTS_KIND); + for (int i = 0; i <= last_index; ++i) { + Label next; + ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); + __ cmp(r3, Operand(kind)); + __ b(ne, &next); + ArraySingleArgumentConstructorStub stub(kind); + __ TailCallStub(&stub); + __ bind(&next); + } + + // If we reached this point there is a problem. + __ Abort("Unexpected ElementsKind in array constructor"); +} + + +template<class T> +static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) { + int to_index = GetSequenceIndexFromFastElementsKind( + TERMINAL_FAST_ELEMENTS_KIND); + for (int i = 0; i <= to_index; ++i) { + ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); + T stub(kind); + stub.GetCode(isolate)->set_is_pregenerated(true); + } +} + + +void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) { + ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>( + isolate); + ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>( + isolate); + ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>( + isolate); +} + + +void ArrayConstructorStub::Generate(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- r0 : argc (only if argument_count_ == ANY) + // -- r1 : constructor + // -- r2 : type info cell + // -- sp[0] : return address + // -- sp[4] : last argument + // ----------------------------------- + Handle<Object> undefined_sentinel( + masm->isolate()->heap()->undefined_value(), + masm->isolate()); + + if (FLAG_debug_code) { + // The array construct code is only set for the global and natives + // builtin Array functions which always have maps. + + // Initial map for the builtin Array function should be a map. + __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); + // Will both indicate a NULL and a Smi. + __ tst(r3, Operand(kSmiTagMask)); + __ Assert(ne, "Unexpected initial map for Array function"); + __ CompareObjectType(r3, r3, r4, MAP_TYPE); + __ Assert(eq, "Unexpected initial map for Array function"); + + // We should either have undefined in ebx or a valid jsglobalpropertycell + Label okay_here; + Handle<Map> global_property_cell_map( + masm->isolate()->heap()->global_property_cell_map()); + __ cmp(r2, Operand(undefined_sentinel)); + __ b(eq, &okay_here); + __ ldr(r3, FieldMemOperand(r2, 0)); + __ cmp(r3, Operand(global_property_cell_map)); + __ Assert(eq, "Expected property cell in register ebx"); + __ bind(&okay_here); + } + + if (FLAG_optimize_constructed_arrays) { + Label no_info, switch_ready; + // Get the elements kind and case on that. + __ cmp(r2, Operand(undefined_sentinel)); + __ b(eq, &no_info); + __ ldr(r3, FieldMemOperand(r2, kPointerSize)); + + // There is no info if the call site went megamorphic either + // TODO(mvstanton): Really? I thought if it was the array function that + // the cell wouldn't get stamped as megamorphic. + __ cmp(r3, + Operand(TypeFeedbackCells::MegamorphicSentinel(masm->isolate()))); + __ b(eq, &no_info); + __ SmiUntag(r3); + __ jmp(&switch_ready); + __ bind(&no_info); + __ mov(r3, Operand(GetInitialFastElementsKind())); + __ bind(&switch_ready); + + if (argument_count_ == ANY) { + Label not_zero_case, not_one_case; + __ tst(r0, r0); + __ b(ne, ¬_zero_case); + CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm); + + __ bind(¬_zero_case); + __ cmp(r0, Operand(1)); + __ b(gt, ¬_one_case); + CreateArrayDispatchOneArgument(masm); + + __ bind(¬_one_case); + CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm); + } else if (argument_count_ == NONE) { + CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm); + } else if (argument_count_ == ONE) { + CreateArrayDispatchOneArgument(masm); + } else if (argument_count_ == MORE_THAN_ONE) { + CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm); + } else { + UNREACHABLE(); + } + } else { + Label generic_constructor; + // Run the native code for the Array function called as a constructor. + ArrayNativeCode(masm, &generic_constructor); + + // Jump to the generic construct code in case the specialized code cannot + // handle the construction. + __ bind(&generic_constructor); + Handle<Code> generic_construct_stub = + masm->isolate()->builtins()->JSConstructStubGeneric(); + __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); + } +} + + #undef __ } } // namespace v8::internal diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h index 75cbf6582c..0b1a8b8472 100644 --- a/deps/v8/src/arm/code-stubs-arm.h +++ b/deps/v8/src/arm/code-stubs-arm.h @@ -34,6 +34,9 @@ namespace v8 { namespace internal { +void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code); + + // Compute a transcendental math function natively, or call the // TranscendentalCache runtime function. class TranscendentalCacheStub: public PlatformCodeStub { @@ -469,34 +472,14 @@ class RecordWriteStub: public PlatformCodeStub { void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) { masm->stm(db_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit()); if (mode == kSaveFPRegs) { - // Number of d-regs not known at snapshot time. - ASSERT(!Serializer::enabled()); - masm->sub(sp, - sp, - Operand(kDoubleSize * (DwVfpRegister::NumRegisters() - 1))); - // Save all VFP registers except d0. - // TODO(hans): We should probably save d0 too. And maybe use vstm. - for (int i = DwVfpRegister::NumRegisters() - 1; i > 0; i--) { - DwVfpRegister reg = DwVfpRegister::from_code(i); - masm->vstr(reg, MemOperand(sp, (i - 1) * kDoubleSize)); - } + masm->SaveFPRegs(sp, scratch0_); } } inline void RestoreCallerSaveRegisters(MacroAssembler*masm, SaveFPRegsMode mode) { if (mode == kSaveFPRegs) { - // Number of d-regs not known at snapshot time. - ASSERT(!Serializer::enabled()); - // Restore all VFP registers except d0. - // TODO(hans): We should probably restore d0 too. And maybe use vldm. - for (int i = DwVfpRegister::NumRegisters() - 1; i > 0; i--) { - DwVfpRegister reg = DwVfpRegister::from_code(i); - masm->vldr(reg, MemOperand(sp, (i - 1) * kDoubleSize)); - } - masm->add(sp, - sp, - Operand(kDoubleSize * (DwVfpRegister::NumRegisters() - 1))); + masm->RestoreFPRegs(sp, scratch0_); } masm->ldm(ia_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit()); } @@ -608,142 +591,6 @@ class DirectCEntryStub: public PlatformCodeStub { }; -class FloatingPointHelper : public AllStatic { - public: - enum Destination { - kVFPRegisters, - kCoreRegisters - }; - - - // Loads smis from r0 and r1 (right and left in binary operations) into - // floating point registers. Depending on the destination the values ends up - // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is - // floating point registers VFP3 must be supported. If core registers are - // requested when VFP3 is supported d6 and d7 will be scratched. - static void LoadSmis(MacroAssembler* masm, - Destination destination, - Register scratch1, - Register scratch2); - - // Convert the smi or heap number in object to an int32 using the rules - // for ToInt32 as described in ECMAScript 9.5.: the value is truncated - // and brought into the range -2^31 .. +2^31 - 1. - static void ConvertNumberToInt32(MacroAssembler* masm, - Register object, - Register dst, - Register heap_number_map, - Register scratch1, - Register scratch2, - Register scratch3, - DwVfpRegister double_scratch1, - DwVfpRegister double_scratch2, - Label* not_int32); - - // Converts the integer (untagged smi) in |int_scratch| to a double, storing - // the result either in |double_dst| or |dst2:dst1|, depending on - // |destination|. - // Warning: The value in |int_scratch| will be changed in the process! - static void ConvertIntToDouble(MacroAssembler* masm, - Register int_scratch, - Destination destination, - DwVfpRegister double_dst, - Register dst1, - Register dst2, - Register scratch2, - SwVfpRegister single_scratch); - - // Load the number from object into double_dst in the double format. - // Control will jump to not_int32 if the value cannot be exactly represented - // by a 32-bit integer. - // Floating point value in the 32-bit integer range that are not exact integer - // won't be loaded. - static void LoadNumberAsInt32Double(MacroAssembler* masm, - Register object, - Destination destination, - DwVfpRegister double_dst, - DwVfpRegister double_scratch, - Register dst1, - Register dst2, - Register heap_number_map, - Register scratch1, - Register scratch2, - SwVfpRegister single_scratch, - Label* not_int32); - - // Loads the number from object into dst as a 32-bit integer. - // Control will jump to not_int32 if the object cannot be exactly represented - // by a 32-bit integer. - // Floating point value in the 32-bit integer range that are not exact integer - // won't be converted. - // scratch3 is not used when VFP3 is supported. - static void LoadNumberAsInt32(MacroAssembler* masm, - Register object, - Register dst, - Register heap_number_map, - Register scratch1, - Register scratch2, - Register scratch3, - DwVfpRegister double_scratch0, - DwVfpRegister double_scratch1, - Label* not_int32); - - // Generate non VFP3 code to check if a double can be exactly represented by a - // 32-bit integer. This does not check for 0 or -0, which need - // to be checked for separately. - // Control jumps to not_int32 if the value is not a 32-bit integer, and falls - // through otherwise. - // src1 and src2 will be cloberred. - // - // Expected input: - // - src1: higher (exponent) part of the double value. - // - src2: lower (mantissa) part of the double value. - // Output status: - // - dst: 32 higher bits of the mantissa. (mantissa[51:20]) - // - src2: contains 1. - // - other registers are clobbered. - static void DoubleIs32BitInteger(MacroAssembler* masm, - Register src1, - Register src2, - Register dst, - Register scratch, - Label* not_int32); - - // Generates code to call a C function to do a double operation using core - // registers. (Used when VFP3 is not supported.) - // This code never falls through, but returns with a heap number containing - // the result in r0. - // Register heapnumber_result must be a heap number in which the - // result of the operation will be stored. - // Requires the following layout on entry: - // r0: Left value (least significant part of mantissa). - // r1: Left value (sign, exponent, top of mantissa). - // r2: Right value (least significant part of mantissa). - // r3: Right value (sign, exponent, top of mantissa). - static void CallCCodeForDoubleOperation(MacroAssembler* masm, - Token::Value op, - Register heap_number_result, - Register scratch); - - // Loads the objects from |object| into floating point registers. - // Depending on |destination| the value ends up either in |dst| or - // in |dst1|/|dst2|. If |destination| is kVFPRegisters, then VFP3 - // must be supported. If kCoreRegisters are requested and VFP3 is - // supported, |dst| will be scratched. If |object| is neither smi nor - // heap number, |not_number| is jumped to with |object| still intact. - static void LoadNumber(MacroAssembler* masm, - FloatingPointHelper::Destination destination, - Register object, - DwVfpRegister dst, - Register dst1, - Register dst2, - Register heap_number_map, - Register scratch1, - Register scratch2, - Label* not_number); -}; - - class NameDictionaryLookupStub: public PlatformCodeStub { public: enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP }; diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc index 25ad85c4bc..001d3c830d 100644 --- a/deps/v8/src/arm/deoptimizer-arm.cc +++ b/deps/v8/src/arm/deoptimizer-arm.cc @@ -604,8 +604,6 @@ void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) { void Deoptimizer::EntryGenerator::Generate() { GeneratePrologue(); - Isolate* isolate = masm()->isolate(); - // Save all general purpose registers before messing with them. const int kNumberOfRegisters = Register::kNumRegisters; @@ -665,12 +663,12 @@ void Deoptimizer::EntryGenerator::Generate() { // r2: bailout id already loaded. // r3: code address or 0 already loaded. __ str(r4, MemOperand(sp, 0 * kPointerSize)); // Fp-to-sp delta. - __ mov(r5, Operand(ExternalReference::isolate_address())); + __ mov(r5, Operand(ExternalReference::isolate_address(isolate()))); __ str(r5, MemOperand(sp, 1 * kPointerSize)); // Isolate. // Call Deoptimizer::New(). { AllowExternalCallThatCantCauseGC scope(masm()); - __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6); + __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6); } // Preserve "deoptimizer" object in register r0 and get the input @@ -731,7 +729,7 @@ void Deoptimizer::EntryGenerator::Generate() { { AllowExternalCallThatCantCauseGC scope(masm()); __ CallCFunction( - ExternalReference::compute_output_frames_function(isolate), 1); + ExternalReference::compute_output_frames_function(isolate()), 1); } __ pop(r0); // Restore deoptimizer object (class Deoptimizer). diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc index 6a33234031..0ef4be064d 100644 --- a/deps/v8/src/arm/full-codegen-arm.cc +++ b/deps/v8/src/arm/full-codegen-arm.cc @@ -1922,6 +1922,158 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) { } +void FullCodeGenerator::VisitYield(Yield* expr) { + Comment cmnt(masm_, "[ Yield"); + // Evaluate yielded value first; the initial iterator definition depends on + // this. It stays on the stack while we update the iterator. + VisitForStackValue(expr->expression()); + + switch (expr->yield_kind()) { + case Yield::INITIAL: + case Yield::SUSPEND: { + VisitForStackValue(expr->generator_object()); + __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1); + __ ldr(context_register(), + MemOperand(fp, StandardFrameConstants::kContextOffset)); + + Label resume; + __ CompareRoot(result_register(), Heap::kTheHoleValueRootIndex); + __ b(ne, &resume); + __ pop(result_register()); + if (expr->yield_kind() == Yield::SUSPEND) { + // TODO(wingo): Box into { value: VALUE, done: false }. + } + EmitReturnSequence(); + + __ bind(&resume); + context()->Plug(result_register()); + break; + } + + case Yield::FINAL: { + VisitForAccumulatorValue(expr->generator_object()); + __ mov(r1, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorClosed))); + __ str(r1, FieldMemOperand(result_register(), + JSGeneratorObject::kContinuationOffset)); + __ pop(result_register()); + // TODO(wingo): Box into { value: VALUE, done: true }. + + // Exit all nested statements. + NestedStatement* current = nesting_stack_; + int stack_depth = 0; + int context_length = 0; + while (current != NULL) { + current = current->Exit(&stack_depth, &context_length); + } + __ Drop(stack_depth); + EmitReturnSequence(); + break; + } + + case Yield::DELEGATING: + UNIMPLEMENTED(); + } +} + + +void FullCodeGenerator::EmitGeneratorResume(Expression *generator, + Expression *value, + JSGeneratorObject::ResumeMode resume_mode) { + // The value stays in r0, and is ultimately read by the resumed generator, as + // if the CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. r1 + // will hold the generator object until the activation has been resumed. + VisitForStackValue(generator); + VisitForAccumulatorValue(value); + __ pop(r1); + + // Check generator state. + Label wrong_state, done; + __ ldr(r3, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset)); + STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting <= 0); + STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed <= 0); + __ cmp(r3, Operand(Smi::FromInt(0))); + __ b(le, &wrong_state); + + // Load suspended function and context. + __ ldr(cp, FieldMemOperand(r1, JSGeneratorObject::kContextOffset)); + __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset)); + + // Load receiver and store as the first argument. + __ ldr(r2, FieldMemOperand(r1, JSGeneratorObject::kReceiverOffset)); + __ push(r2); + + // Push holes for the rest of the arguments to the generator function. + __ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset)); + __ ldr(r3, + FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset)); + __ LoadRoot(r2, Heap::kTheHoleValueRootIndex); + Label push_argument_holes, push_frame; + __ bind(&push_argument_holes); + __ sub(r3, r3, Operand(1), SetCC); + __ b(mi, &push_frame); + __ push(r2); + __ jmp(&push_argument_holes); + + // Enter a new JavaScript frame, and initialize its slots as they were when + // the generator was suspended. + Label resume_frame; + __ bind(&push_frame); + __ bl(&resume_frame); + __ jmp(&done); + __ bind(&resume_frame); + __ push(lr); // Return address. + __ push(fp); // Caller's frame pointer. + __ mov(fp, sp); + __ push(cp); // Callee's context. + __ push(r4); // Callee's JS Function. + + // Load the operand stack size. + __ ldr(r3, FieldMemOperand(r1, JSGeneratorObject::kOperandStackOffset)); + __ ldr(r3, FieldMemOperand(r3, FixedArray::kLengthOffset)); + __ SmiUntag(r3); + + // If we are sending a value and there is no operand stack, we can jump back + // in directly. + if (resume_mode == JSGeneratorObject::SEND) { + Label slow_resume; + __ cmp(r3, Operand(0)); + __ b(ne, &slow_resume); + __ ldr(r3, FieldMemOperand(r4, JSFunction::kCodeEntryOffset)); + __ ldr(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset)); + __ SmiUntag(r2); + __ add(r3, r3, r2); + __ mov(r2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting))); + __ str(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset)); + __ Jump(r3); + __ bind(&slow_resume); + } + + // Otherwise, we push holes for the operand stack and call the runtime to fix + // up the stack and the handlers. + Label push_operand_holes, call_resume; + __ bind(&push_operand_holes); + __ sub(r3, r3, Operand(1), SetCC); + __ b(mi, &call_resume); + __ push(r2); + __ b(&push_operand_holes); + __ bind(&call_resume); + __ push(r1); + __ push(result_register()); + __ Push(Smi::FromInt(resume_mode)); + __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3); + // Not reached: the runtime call returns elsewhere. + __ stop("not-reached"); + + // Throw error if we attempt to operate on a running generator. + __ bind(&wrong_state); + __ push(r1); + __ CallRuntime(Runtime::kThrowGeneratorStateError, 1); + + __ bind(&done); + context()->Plug(result_register()); +} + + void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) { SetSourcePosition(prop->position()); Literal* key = prop->key()->AsLiteral(); @@ -4383,28 +4535,22 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr, VisitForAccumulatorValue(sub_expr); PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); - Heap::RootListIndex nil_value = nil == kNullValue ? - Heap::kNullValueRootIndex : - Heap::kUndefinedValueRootIndex; - __ LoadRoot(r1, nil_value); - __ cmp(r0, r1); - if (expr->op() == Token::EQ_STRICT) { - Split(eq, if_true, if_false, fall_through); - } else { - Heap::RootListIndex other_nil_value = nil == kNullValue ? - Heap::kUndefinedValueRootIndex : - Heap::kNullValueRootIndex; - __ b(eq, if_true); - __ LoadRoot(r1, other_nil_value); + EqualityKind kind = expr->op() == Token::EQ_STRICT + ? kStrictEquality : kNonStrictEquality; + if (kind == kStrictEquality) { + Heap::RootListIndex nil_value = nil == kNullValue ? + Heap::kNullValueRootIndex : + Heap::kUndefinedValueRootIndex; + __ LoadRoot(r1, nil_value); __ cmp(r0, r1); - __ b(eq, if_true); - __ JumpIfSmi(r0, if_false); - // It can be an undetectable object. - __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); - __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset)); - __ and_(r1, r1, Operand(1 << Map::kIsUndetectable)); - __ cmp(r1, Operand(1 << Map::kIsUndetectable)); Split(eq, if_true, if_false, fall_through); + } else { + Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), + kNonStrictEquality, + nil); + CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId()); + __ cmp(r0, Operand(0)); + Split(ne, if_true, if_false, fall_through); } context()->Plug(if_true, if_false); } diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc index 84a11b6144..893ac4e116 100644 --- a/deps/v8/src/arm/ic-arm.cc +++ b/deps/v8/src/arm/ic-arm.cc @@ -1340,13 +1340,7 @@ static void KeyedStoreGenerateGenericHelper( __ b(ne, slow); } __ bind(&fast_double_without_map_check); - __ StoreNumberToDoubleElements(value, - key, - elements, // Overwritten. - r3, // Scratch regs... - r4, - r5, - r6, + __ StoreNumberToDoubleElements(value, key, elements, r3, &transition_double_elements); if (increment_length == kIncrementLength) { // Add 1 to receiver->length. diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc index 64083e8597..66c108d4fe 100644 --- a/deps/v8/src/arm/lithium-arm.cc +++ b/deps/v8/src/arm/lithium-arm.cc @@ -192,6 +192,11 @@ const char* LArithmeticT::Mnemonic() const { } +bool LGoto::HasInterestingComment(LCodeGen* gen) const { + return !gen->IsNextEmittedBlock(block_id()); +} + + void LGoto::PrintDataTo(StringStream* stream) { stream->Add("B%d", block_id()); } @@ -989,12 +994,14 @@ LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) { LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* instr) { + info()->MarkAsRequiresFrame(); LOperand* value = UseRegister(instr->value()); return DefineAsRegister(new(zone()) LArgumentsLength(value)); } LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) { + info()->MarkAsRequiresFrame(); return DefineAsRegister(new(zone()) LArgumentsElements); } @@ -2424,7 +2431,8 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) { ASSERT(info()->IsStub()); CodeStubInterfaceDescriptor* descriptor = info()->code_stub()->GetInterfaceDescriptor(info()->isolate()); - Register reg = descriptor->register_params_[instr->index()]; + int index = static_cast<int>(instr->index()); + Register reg = DESCRIPTOR_GET_PARAMETER_REGISTER(descriptor, index); return DefineFixed(result, reg); } } @@ -2456,9 +2464,17 @@ LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) { LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) { + info()->MarkAsRequiresFrame(); LOperand* args = UseRegister(instr->arguments()); - LOperand* length = UseTempRegister(instr->length()); - LOperand* index = UseRegister(instr->index()); + LOperand* length; + LOperand* index; + if (instr->length()->IsConstant() && instr->index()->IsConstant()) { + length = UseRegisterOrConstant(instr->length()); + index = UseOrConstant(instr->index()); + } else { + length = UseTempRegister(instr->length()); + index = Use(instr->index()); + } return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index)); } diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h index 6486cad2bb..d81881e6fb 100644 --- a/deps/v8/src/arm/lithium-arm.h +++ b/deps/v8/src/arm/lithium-arm.h @@ -282,6 +282,8 @@ class LInstruction: public ZoneObject { LOperand* FirstInput() { return InputAt(0); } LOperand* Output() { return HasResult() ? result() : NULL; } + virtual bool HasInterestingComment(LCodeGen* gen) const { return true; } + #ifdef DEBUG void VerifyCall(); #endif @@ -381,6 +383,10 @@ class LInstructionGap: public LGap { public: explicit LInstructionGap(HBasicBlock* block) : LGap(block) { } + virtual bool HasInterestingComment(LCodeGen* gen) const { + return !IsRedundant(); + } + DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap") }; @@ -389,6 +395,7 @@ class LGoto: public LTemplateInstruction<0, 0, 0> { public: explicit LGoto(int block_id) : block_id_(block_id) { } + virtual bool HasInterestingComment(LCodeGen* gen) const; DECLARE_CONCRETE_INSTRUCTION(Goto, "goto") virtual void PrintDataTo(StringStream* stream); virtual bool IsControl() const { return true; } @@ -436,12 +443,14 @@ class LLabel: public LGap { explicit LLabel(HBasicBlock* block) : LGap(block), replacement_(NULL) { } + virtual bool HasInterestingComment(LCodeGen* gen) const { return false; } DECLARE_CONCRETE_INSTRUCTION(Label, "label") virtual void PrintDataTo(StringStream* stream); int block_id() const { return block()->block_id(); } bool is_loop_header() const { return block()->IsLoopHeader(); } + bool is_osr_entry() const { return block()->is_osr_entry(); } Label* label() { return &label_; } LLabel* replacement() const { return replacement_; } void set_replacement(LLabel* label) { replacement_ = label; } @@ -455,6 +464,7 @@ class LLabel: public LGap { class LParameter: public LTemplateInstruction<1, 0, 0> { public: + virtual bool HasInterestingComment(LCodeGen* gen) const { return false; } DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter") }; @@ -472,6 +482,7 @@ class LCallStub: public LTemplateInstruction<1, 0, 0> { class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> { public: + virtual bool HasInterestingComment(LCodeGen* gen) const { return false; } DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value") }; @@ -1843,7 +1854,6 @@ class LInvokeFunction: public LTemplateInstruction<1, 1, 0> { virtual void PrintDataTo(StringStream* stream); int arity() const { return hydrogen()->argument_count() - 1; } - Handle<JSFunction> known_function() { return hydrogen()->known_function(); } }; @@ -1911,7 +1921,6 @@ class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> { virtual void PrintDataTo(StringStream* stream); - Handle<JSFunction> target() const { return hydrogen()->target(); } int arity() const { return hydrogen()->argument_count() - 1; } }; @@ -2488,8 +2497,6 @@ class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal") DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral) - - Handle<SharedFunctionInfo> shared_info() { return hydrogen()->shared_info(); } }; @@ -2566,6 +2573,7 @@ class LOsrEntry: public LTemplateInstruction<0, 0, 0> { public: LOsrEntry(); + virtual bool HasInterestingComment(LCodeGen* gen) const { return false; } DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry") LOperand** SpilledRegisterArray() { return register_spills_; } diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc index d2f44b05c0..29e01b9182 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.cc +++ b/deps/v8/src/arm/lithium-codegen-arm.cc @@ -238,7 +238,12 @@ bool LCodeGen::GeneratePrologue() { __ str(r0, target); // Update the write barrier. This clobbers r3 and r0. __ RecordWriteContextSlot( - cp, target.offset(), r0, r3, GetLinkRegisterState(), kSaveFPRegs); + cp, + target.offset(), + r0, + r3, + GetLinkRegisterState(), + kSaveFPRegs); } } Comment(";;; End allocate local context"); @@ -259,38 +264,21 @@ bool LCodeGen::GenerateBody() { !is_aborted() && current_instruction_ < instructions_->length(); current_instruction_++) { LInstruction* instr = instructions_->at(current_instruction_); + + // Don't emit code for basic blocks with a replacement. if (instr->IsLabel()) { - LLabel* label = LLabel::cast(instr); - emit_instructions = !label->HasReplacement(); + emit_instructions = !LLabel::cast(instr)->HasReplacement(); } + if (!emit_instructions) continue; - if (emit_instructions) { - if (FLAG_code_comments) { - HValue* hydrogen = instr->hydrogen_value(); - if (hydrogen != NULL) { - if (hydrogen->IsChange()) { - HValue* changed_value = HChange::cast(hydrogen)->value(); - int use_id = 0; - const char* use_mnemo = "dead"; - if (hydrogen->UseCount() >= 1) { - HValue* use_value = hydrogen->uses().value(); - use_id = use_value->id(); - use_mnemo = use_value->Mnemonic(); - } - Comment(";;; @%d: %s. <of #%d %s for #%d %s>", - current_instruction_, instr->Mnemonic(), - changed_value->id(), changed_value->Mnemonic(), - use_id, use_mnemo); - } else { - Comment(";;; @%d: %s. <#%d>", current_instruction_, - instr->Mnemonic(), hydrogen->id()); - } - } else { - Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic()); - } - } - instr->CompileToNative(this); + if (FLAG_code_comments && instr->HasInterestingComment(this)) { + Comment(";;; <@%d,#%d> %s", + current_instruction_, + instr->hydrogen_value()->id(), + instr->Mnemonic()); } + + instr->CompileToNative(this); } EnsureSpaceForLazyDeopt(); return !is_aborted(); @@ -302,11 +290,14 @@ bool LCodeGen::GenerateDeferredCode() { if (deferred_.length() > 0) { for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { LDeferredCode* code = deferred_[i]; + Comment(";;; <@%d,#%d> " + "-------------------- Deferred %s --------------------", + code->instruction_index(), + code->instr()->hydrogen_value()->id(), + code->instr()->Mnemonic()); __ bind(code->entry()); if (NeedsDeferredFrame()) { - Comment(";;; Deferred build frame @%d: %s.", - code->instruction_index(), - code->instr()->Mnemonic()); + Comment(";;; Build frame"); ASSERT(!frame_is_built_); ASSERT(info()->IsStub()); frame_is_built_ = true; @@ -314,15 +305,11 @@ bool LCodeGen::GenerateDeferredCode() { __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); __ push(scratch0()); __ add(fp, sp, Operand(2 * kPointerSize)); + Comment(";;; Deferred code"); } - Comment(";;; Deferred code @%d: %s.", - code->instruction_index(), - code->instr()->Mnemonic()); code->Generate(); if (NeedsDeferredFrame()) { - Comment(";;; Deferred destroy frame @%d: %s.", - code->instruction_index(), - code->instr()->Mnemonic()); + Comment(";;; Destroy frame"); ASSERT(frame_is_built_); __ pop(ip); __ ldm(ia_w, sp, cp.bit() | fp.bit() | lr.bit()); @@ -353,7 +340,9 @@ bool LCodeGen::GenerateDeoptJumpTable() { Abort("Generated code is too large"); } - __ RecordComment("[ Deoptimisation jump table"); + if (deopt_jump_table_.length() > 0) { + Comment(";;; -------------------- Jump table --------------------"); + } Label table_start; __ bind(&table_start); Label needs_frame_not_call; @@ -414,7 +403,6 @@ bool LCodeGen::GenerateDeoptJumpTable() { } masm()->CheckConstPool(false, false); } - __ RecordComment("]"); // Force constant pool emission at the end of the deopt jump table to make // sure that no constant pools are emitted after. @@ -607,7 +595,7 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, pushed_arguments_index, pushed_arguments_count); bool has_closure_id = !info()->closure().is_null() && - *info()->closure() != *environment->closure(); + !info()->closure().is_identical_to(environment->closure()); int closure_id = has_closure_id ? DefineDeoptimizationLiteral(environment->closure()) : Translation::kSelfLiteralId; @@ -923,10 +911,13 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { Handle<FixedArray> literals = factory()->NewFixedArray(deoptimization_literals_.length(), TENURED); - for (int i = 0; i < deoptimization_literals_.length(); i++) { - literals->set(i, *deoptimization_literals_[i]); + { ALLOW_HANDLE_DEREF(isolate(), + "copying a ZoneList of handles into a FixedArray"); + for (int i = 0; i < deoptimization_literals_.length(); i++) { + literals->set(i, *deoptimization_literals_[i]); + } + data->SetLiteralArray(*literals); } - data->SetLiteralArray(*literals); data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt())); data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_)); @@ -1042,10 +1033,19 @@ void LCodeGen::RecordPosition(int position) { } +static const char* LabelType(LLabel* label) { + if (label->is_loop_header()) return " (loop header)"; + if (label->is_osr_entry()) return " (OSR entry)"; + return ""; +} + + void LCodeGen::DoLabel(LLabel* label) { - Comment(";;; -------------------- B%d%s --------------------", + Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------", + current_instruction_, + label->hydrogen_value()->id(), label->block_id(), - label->is_loop_header() ? " (loop header)" : ""); + LabelType(label)); __ bind(label->label()); current_block_ = label->block_id(); DoGap(label); @@ -1904,6 +1904,7 @@ void LCodeGen::DoConstantD(LConstantD* instr) { void LCodeGen::DoConstantT(LConstantT* instr) { Handle<Object> value = instr->value(); + ALLOW_HANDLE_DEREF(isolate(), "smi check"); if (value->IsSmi()) { __ mov(ToRegister(instr->result()), Operand(value)); } else { @@ -2170,17 +2171,16 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) { } -int LCodeGen::GetNextEmittedBlock(int block) { - for (int i = block + 1; i < graph()->blocks()->length(); ++i) { - LLabel* label = chunk_->GetLabel(i); - if (!label->HasReplacement()) return i; +int LCodeGen::GetNextEmittedBlock() const { + for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) { + if (!chunk_->GetLabel(i)->HasReplacement()) return i; } return -1; } void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) { - int next_block = GetNextEmittedBlock(current_block_); + int next_block = GetNextEmittedBlock(); right_block = chunk_->LookupDestination(right_block); left_block = chunk_->LookupDestination(left_block); @@ -2317,10 +2317,8 @@ void LCodeGen::DoBranch(LBranch* instr) { void LCodeGen::EmitGoto(int block) { - block = chunk_->LookupDestination(block); - int next_block = GetNextEmittedBlock(current_block_); - if (block != next_block) { - __ jmp(chunk_->GetAssemblyLabel(block)); + if (!IsNextEmittedBlock(block)) { + __ jmp(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block))); } } @@ -2944,19 +2942,20 @@ void LCodeGen::DoReturn(LReturn* instr) { if (NeedsEagerFrame()) { __ mov(sp, fp); __ ldm(ia_w, sp, fp.bit() | lr.bit()); - - if (instr->has_constant_parameter_count()) { - int parameter_count = ToInteger32(instr->constant_parameter_count()); - int32_t sp_delta = (parameter_count + 1) * kPointerSize; - if (sp_delta != 0) { - __ add(sp, sp, Operand(sp_delta)); - } - } else { - Register reg = ToRegister(instr->parameter_count()); - __ add(reg, reg, Operand(1)); - __ add(sp, sp, Operand(reg, LSL, kPointerSizeLog2)); + } + if (instr->has_constant_parameter_count()) { + int parameter_count = ToInteger32(instr->constant_parameter_count()); + int32_t sp_delta = (parameter_count + 1) * kPointerSize; + if (sp_delta != 0) { + __ add(sp, sp, Operand(sp_delta)); } + } else { + Register reg = ToRegister(instr->parameter_count()); + // The argument count parameter is a smi + __ SmiUntag(reg); + __ add(sp, sp, Operand(reg, LSL, kPointerSizeLog2)); } + __ Jump(lr); } @@ -3274,14 +3273,22 @@ void LCodeGen::DoLoadExternalArrayPointer( void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { Register arguments = ToRegister(instr->arguments()); - Register length = ToRegister(instr->length()); - Register index = ToRegister(instr->index()); Register result = ToRegister(instr->result()); - // There are two words between the frame pointer and the last argument. - // Subtracting from length accounts for one of them add one more. - __ sub(length, length, index); - __ add(length, length, Operand(1)); - __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2)); + if (instr->length()->IsConstantOperand() && + instr->index()->IsConstantOperand()) { + int const_index = ToInteger32(LConstantOperand::cast(instr->index())); + int const_length = ToInteger32(LConstantOperand::cast(instr->length())); + int index = (const_length - const_index) + 1; + __ ldr(result, MemOperand(arguments, index * kPointerSize)); + } else { + Register length = ToRegister(instr->length()); + Register index = ToRegister(instr->index()); + // There are two words between the frame pointer and the last argument. + // Subtracting from length accounts for one of them add one more. + __ sub(length, length, index); + __ add(length, length, Operand(1)); + __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2)); + } } @@ -3703,12 +3710,15 @@ void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) { void LCodeGen::CallKnownFunction(Handle<JSFunction> function, + int formal_parameter_count, int arity, LInstruction* instr, CallKind call_kind, R1State r1_state) { - bool can_invoke_directly = !function->NeedsArgumentsAdaption() || - function->shared()->formal_parameter_count() == arity; + bool dont_adapt_arguments = + formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel; + bool can_invoke_directly = + dont_adapt_arguments || formal_parameter_count == arity; LPointerMap* pointers = instr->pointer_map(); RecordPosition(pointers->position()); @@ -3723,7 +3733,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function, // Set r0 to arguments count if adaption is not needed. Assumes that r0 // is available to write to at this point. - if (!function->NeedsArgumentsAdaption()) { + if (dont_adapt_arguments) { __ mov(r0, Operand(arity)); } @@ -3737,7 +3747,9 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function, } else { SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); ParameterCount count(arity); - __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind); + ParameterCount expected(formal_parameter_count); + __ InvokeFunction( + function, expected, count, CALL_FUNCTION, generator, call_kind); } // Restore context. @@ -3747,7 +3759,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function, void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) { ASSERT(ToRegister(instr->result()).is(r0)); - CallKnownFunction(instr->function(), + CallKnownFunction(instr->hydrogen()->function(), + instr->hydrogen()->formal_parameter_count(), instr->arity(), instr, CALL_AS_METHOD, @@ -4119,7 +4132,8 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { ASSERT(ToRegister(instr->function()).is(r1)); ASSERT(instr->HasPointerMap()); - if (instr->known_function().is_null()) { + Handle<JSFunction> known_function = instr->hydrogen()->known_function(); + if (known_function.is_null()) { LPointerMap* pointers = instr->pointer_map(); RecordPosition(pointers->position()); SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); @@ -4127,7 +4141,8 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { __ InvokeFunction(r1, count, CALL_FUNCTION, generator, CALL_AS_METHOD); __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); } else { - CallKnownFunction(instr->known_function(), + CallKnownFunction(known_function, + instr->hydrogen()->formal_parameter_count(), instr->arity(), instr, CALL_AS_METHOD, @@ -4187,7 +4202,8 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) { void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) { ASSERT(ToRegister(instr->result()).is(r0)); - CallKnownFunction(instr->target(), + CallKnownFunction(instr->hydrogen()->target(), + instr->hydrogen()->formal_parameter_count(), instr->arity(), instr, CALL_AS_FUNCTION, @@ -4218,10 +4234,18 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) { __ mov(r0, Operand(instr->arity())); __ mov(r2, Operand(instr->hydrogen()->property_cell())); - Handle<Code> array_construct_code = - isolate()->builtins()->ArrayConstructCode(); - - CallCode(array_construct_code, RelocInfo::CONSTRUCT_CALL, instr); + Object* cell_value = instr->hydrogen()->property_cell()->value(); + ElementsKind kind = static_cast<ElementsKind>(Smi::cast(cell_value)->value()); + if (instr->arity() == 0) { + ArrayNoArgumentConstructorStub stub(kind); + CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + } else if (instr->arity() == 1) { + ArraySingleArgumentConstructorStub stub(kind); + CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + } else { + ArrayNArgumentsConstructorStub stub(kind); + CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + } } @@ -5038,8 +5062,8 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { __ sub(scratch1, input_reg, Operand(kHeapObjectTag)); __ vldr(double_scratch2, scratch1, HeapNumber::kValueOffset); - __ ECMAToInt32(input_reg, double_scratch2, double_scratch, - scratch1, scratch2, scratch3); + __ ECMAToInt32(input_reg, double_scratch2, + scratch1, scratch2, scratch3, double_scratch); } else { // Deoptimize if we don't have a heap number. @@ -5136,8 +5160,8 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) { if (instr->truncating()) { Register scratch3 = ToRegister(instr->temp2()); - __ ECMAToInt32(result_reg, double_input, double_scratch, - scratch1, scratch2, scratch3); + __ ECMAToInt32(result_reg, double_input, + scratch1, scratch2, scratch3, double_scratch); } else { __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch); // Deoptimize if the input wasn't a int32 (inside a double). @@ -5207,6 +5231,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { void LCodeGen::DoCheckFunction(LCheckFunction* instr) { Register reg = ToRegister(instr->value()); Handle<JSFunction> target = instr->hydrogen()->target(); + ALLOW_HANDLE_DEREF(isolate(), "smi check"); if (isolate()->heap()->InNewSpace(*target)) { Register reg = ToRegister(instr->value()); Handle<JSGlobalPropertyCell> cell = @@ -5348,16 +5373,12 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) { Register scratch = ToRegister(instr->temp()); Register scratch2 = ToRegister(instr->temp2()); Handle<JSFunction> constructor = instr->hydrogen()->constructor(); - Handle<Map> initial_map(constructor->initial_map()); + Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map(); int instance_size = initial_map->instance_size(); ASSERT(initial_map->pre_allocated_property_fields() + initial_map->unused_property_fields() - initial_map->inobject_properties() == 0); - // Allocate memory for the object. The initial map might change when - // the constructor's prototype changes, but instance size and property - // counts remain unchanged (if slack tracking finished). - ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress()); __ Allocate(instance_size, result, scratch, scratch2, deferred->entry(), TAG_OBJECT); @@ -5392,8 +5413,7 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) { void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) { Register result = ToRegister(instr->result()); - Handle<JSFunction> constructor = instr->hydrogen()->constructor(); - Handle<Map> initial_map(constructor->initial_map()); + Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map(); int instance_size = initial_map->instance_size(); // TODO(3095996): Get rid of this. For now, we need to make the @@ -5476,7 +5496,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) { void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { - Handle<FixedArray> literals(instr->environment()->closure()->literals()); + Handle<FixedArray> literals = instr->hydrogen()->literals(); ElementsKind boilerplate_elements_kind = instr->hydrogen()->boilerplate_elements_kind(); AllocationSiteMode allocation_site_mode = @@ -5531,7 +5551,7 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) { - Handle<FixedArray> literals(instr->environment()->closure()->literals()); + Handle<FixedArray> literals = instr->hydrogen()->literals(); Handle<FixedArray> constant_properties = instr->hydrogen()->constant_properties(); @@ -5545,7 +5565,7 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) { __ mov(r0, Operand(Smi::FromInt(flags))); // Pick the right runtime function or stub to call. - int properties_count = constant_properties->length() / 2; + int properties_count = instr->hydrogen()->constant_properties_length() / 2; if (instr->hydrogen()->depth() > 1) { __ Push(r3, r2, r1, r0); CallRuntime(Runtime::kCreateObjectLiteral, 4, instr); @@ -5614,19 +5634,17 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { // Use the fast case closure allocation code that allocates in new // space for nested functions that don't need literals cloning. - Handle<SharedFunctionInfo> shared_info = instr->shared_info(); bool pretenure = instr->hydrogen()->pretenure(); - if (!pretenure && shared_info->num_literals() == 0) { - FastNewClosureStub stub(shared_info->language_mode(), - shared_info->is_generator()); - __ mov(r1, Operand(shared_info)); + if (!pretenure && instr->hydrogen()->has_no_literals()) { + FastNewClosureStub stub(instr->hydrogen()->language_mode(), + instr->hydrogen()->is_generator()); + __ mov(r1, Operand(instr->hydrogen()->shared_info())); __ push(r1); CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } else { - __ mov(r2, Operand(shared_info)); - __ mov(r1, Operand(pretenure - ? factory()->true_value() - : factory()->false_value())); + __ mov(r2, Operand(instr->hydrogen()->shared_info())); + __ mov(r1, Operand(pretenure ? factory()->true_value() + : factory()->false_value())); __ Push(cp, r2, r1); CallRuntime(Runtime::kNewClosure, 3, instr); } diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h index c55558cff5..ae175e52d3 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.h +++ b/deps/v8/src/arm/lithium-codegen-arm.h @@ -80,10 +80,20 @@ class LCodeGen BASE_EMBEDDED { Heap* heap() const { return isolate()->heap(); } Zone* zone() const { return zone_; } + // TODO(svenpanne) Use this consistently. + int LookupDestination(int block_id) const { + return chunk()->LookupDestination(block_id); + } + + bool IsNextEmittedBlock(int block_id) const { + return LookupDestination(block_id) == GetNextEmittedBlock(); + } + bool NeedsEagerFrame() const { return GetStackSlotCount() > 0 || info()->is_non_deferred_calling() || - !info()->IsStub(); + !info()->IsStub() || + info()->requires_frame(); } bool NeedsDeferredFrame() const { return !NeedsEagerFrame() && info()->is_deferred_calling(); @@ -195,12 +205,12 @@ class LCodeGen BASE_EMBEDDED { LPlatformChunk* chunk() const { return chunk_; } Scope* scope() const { return scope_; } - HGraph* graph() const { return chunk_->graph(); } + HGraph* graph() const { return chunk()->graph(); } Register scratch0() { return r9; } DwVfpRegister double_scratch0() { return kScratchDoubleReg; } - int GetNextEmittedBlock(int block); + int GetNextEmittedBlock() const; LInstruction* GetNextInstruction(); void EmitClassOfTest(Label* if_true, @@ -266,6 +276,7 @@ class LCodeGen BASE_EMBEDDED { // Generate a direct call to a known function. Expects the function // to be in r1. void CallKnownFunction(Handle<JSFunction> function, + int formal_parameter_count, int arity, LInstruction* instr, CallKind call_kind, diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index 616d02d867..b7cd3db046 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -74,6 +74,7 @@ void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond) { ASSERT(RelocInfo::IsCodeTarget(rmode)); // 'code' is always generated ARM code, never THUMB code + ALLOW_HANDLE_DEREF(isolate(), "embedding raw address"); Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond); } @@ -162,6 +163,7 @@ int MacroAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode, TypeFeedbackId ast_id, Condition cond) { + ALLOW_HANDLE_DEREF(isolate(), "using raw address"); return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond); } @@ -179,6 +181,7 @@ void MacroAssembler::Call(Handle<Code> code, rmode = RelocInfo::CODE_TARGET_WITH_ID; } // 'code' is always generated ARM code, never THUMB code + ALLOW_HANDLE_DEREF(isolate(), "embedding raw address"); Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode); } @@ -395,6 +398,7 @@ void MacroAssembler::StoreRoot(Register source, void MacroAssembler::LoadHeapObject(Register result, Handle<HeapObject> object) { + ALLOW_HANDLE_DEREF(isolate(), "using raw address"); if (isolate()->heap()->InNewSpace(*object)) { Handle<JSGlobalPropertyCell> cell = isolate()->factory()->NewJSGlobalPropertyCell(object); @@ -790,6 +794,116 @@ void MacroAssembler::Vmov(const DwVfpRegister dst, } +void MacroAssembler::ConvertNumberToInt32(Register object, + Register dst, + Register heap_number_map, + Register scratch1, + Register scratch2, + Register scratch3, + DwVfpRegister double_scratch1, + DwVfpRegister double_scratch2, + Label* not_number) { + Label done; + UntagAndJumpIfSmi(dst, object, &done); + JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); + vldr(double_scratch1, FieldMemOperand(object, HeapNumber::kValueOffset)); + ECMAToInt32(dst, double_scratch1, + scratch1, scratch2, scratch3, double_scratch2); + + bind(&done); +} + + +void MacroAssembler::LoadNumber(Register object, + DwVfpRegister dst, + Register heap_number_map, + Register scratch, + Label* not_number) { + Label is_smi, done; + + UntagAndJumpIfSmi(scratch, object, &is_smi); + JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number); + + vldr(dst, FieldMemOperand(object, HeapNumber::kValueOffset)); + b(&done); + + // Handle loading a double from a smi. + bind(&is_smi); + vmov(dst.high(), scratch); + vcvt_f64_s32(dst, dst.high()); + + bind(&done); +} + + +void MacroAssembler::LoadNumberAsInt32Double(Register object, + DwVfpRegister double_dst, + Register heap_number_map, + Register scratch, + DwVfpRegister double_scratch, + Label* not_int32) { + ASSERT(!scratch.is(object)); + ASSERT(!heap_number_map.is(object) && !heap_number_map.is(scratch)); + + Label done, obj_is_not_smi; + + UntagAndJumpIfNotSmi(scratch, object, &obj_is_not_smi); + vmov(double_scratch.low(), scratch); + vcvt_f64_s32(double_dst, double_scratch.low()); + b(&done); + + bind(&obj_is_not_smi); + JumpIfNotHeapNumber(object, heap_number_map, scratch, not_int32); + + // Load the number. + // Load the double value. + vldr(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset)); + + TestDoubleIsInt32(double_dst, double_scratch); + // Jump to not_int32 if the operation did not succeed. + b(ne, not_int32); + + bind(&done); +} + + +void MacroAssembler::LoadNumberAsInt32(Register object, + Register dst, + Register heap_number_map, + Register scratch, + DwVfpRegister double_scratch0, + DwVfpRegister double_scratch1, + Label* not_int32) { + ASSERT(!dst.is(object)); + ASSERT(!scratch.is(object)); + + Label done, maybe_undefined; + + UntagAndJumpIfSmi(dst, object, &done); + + JumpIfNotHeapNumber(object, heap_number_map, scratch, &maybe_undefined); + + // Object is a heap number. + // Convert the floating point value to a 32-bit integer. + // Load the double value. + vldr(double_scratch0, FieldMemOperand(object, HeapNumber::kValueOffset)); + + TryDoubleToInt32Exact(dst, double_scratch0, double_scratch1); + // Jump to not_int32 if the operation did not succeed. + b(ne, not_int32); + b(&done); + + bind(&maybe_undefined); + CompareRoot(object, Heap::kUndefinedValueRootIndex); + b(ne, not_int32); + // |undefined| is truncated to 0. + mov(dst, Operand(Smi::FromInt(0))); + // Fall through. + + bind(&done); +} + + void MacroAssembler::EnterFrame(StackFrame::Type type) { // r0-r3: preserved stm(db_w, sp, cp.bit() | fp.bit() | lr.bit()); @@ -837,14 +951,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) { // Optionally save all double registers. if (save_doubles) { - // Check CPU flags for number of registers, setting the Z condition flag. - CheckFor32DRegs(ip); - - // Push registers d0-d15, and possibly d16-d31, on the stack. - // If d16-d31 are not pushed, decrease the stack pointer instead. - vstm(db_w, sp, d16, d31, ne); - sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq); - vstm(db_w, sp, d0, d15); + SaveFPRegs(sp, ip); // Note that d0 will be accessible at // fp - 2 * kPointerSize - DwVfpRegister::kMaxNumRegisters * kDoubleSize, // since the sp slot and code slot were pushed after the fp. @@ -905,15 +1012,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, const int offset = 2 * kPointerSize; sub(r3, fp, Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize)); - - // Check CPU flags for number of registers, setting the Z condition flag. - CheckFor32DRegs(ip); - - // Pop registers d0-d15, and possibly d16-d31, from r3. - // If d16-d31 are not popped, increase r3 instead. - vldm(ia_w, r3, d0, d15); - vldm(ia_w, r3, d16, d31, ne); - add(r3, r3, Operand(16 * kDoubleSize), LeaveCC, eq); + RestoreFPRegs(r3, ip); } // Clear top frame. @@ -1132,6 +1231,7 @@ void MacroAssembler::InvokeFunction(Register fun, void MacroAssembler::InvokeFunction(Handle<JSFunction> function, + const ParameterCount& expected, const ParameterCount& actual, InvokeFlag flag, const CallWrapper& call_wrapper, @@ -1143,7 +1243,6 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function, LoadHeapObject(r1, function); ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); - ParameterCount expected(function->shared()->formal_parameter_count()); // We call indirectly through the code field in the function to // allow recompilation to take effect without changing any of the // call sites. @@ -1945,14 +2044,9 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, Register key_reg, Register elements_reg, Register scratch1, - Register scratch2, - Register scratch3, - Register scratch4, Label* fail, int elements_offset) { Label smi_value, store; - Register mantissa_reg = scratch2; - Register exponent_reg = scratch3; // Handle smi values specially. JumpIfSmi(value_reg, &smi_value); @@ -1977,9 +2071,8 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, bind(&smi_value); Register untagged_value = scratch1; SmiUntag(untagged_value, value_reg); - FloatingPointHelper::ConvertIntToDouble( - this, untagged_value, FloatingPointHelper::kVFPRegisters, d0, - mantissa_reg, exponent_reg, scratch4, s2); + vmov(s2, untagged_value); + vcvt_f64_s32(d0, s2); bind(&store); add(scratch1, elements_reg, @@ -2171,8 +2264,9 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function, if (FLAG_log_timer_events) { FrameScope frame(this, StackFrame::MANUAL); PushSafepointRegisters(); - PrepareCallCFunction(0, r0); - CallCFunction(ExternalReference::log_enter_external_function(isolate()), 0); + PrepareCallCFunction(1, r0); + mov(r0, Operand(ExternalReference::isolate_address(isolate()))); + CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1); PopSafepointRegisters(); } @@ -2185,8 +2279,9 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function, if (FLAG_log_timer_events) { FrameScope frame(this, StackFrame::MANUAL); PushSafepointRegisters(); - PrepareCallCFunction(0, r0); - CallCFunction(ExternalReference::log_leave_external_function(isolate()), 0); + PrepareCallCFunction(1, r0); + mov(r0, Operand(ExternalReference::isolate_address(isolate()))); + CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1); PopSafepointRegisters(); } @@ -2238,7 +2333,7 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function, str(r5, MemOperand(r7, kLimitOffset)); mov(r4, r0); PrepareCallCFunction(1, r5); - mov(r0, Operand(ExternalReference::isolate_address())); + mov(r0, Operand(ExternalReference::isolate_address(isolate()))); CallCFunction( ExternalReference::delete_handle_scope_extensions(isolate()), 1); mov(r0, r4); @@ -2401,34 +2496,21 @@ void MacroAssembler::TryInt32Floor(Register result, } -void MacroAssembler::ECMAConvertNumberToInt32(Register source, - Register result, - Register input_low, - Register input_high, - Register scratch, - DwVfpRegister double_scratch1, - DwVfpRegister double_scratch2) { - vldr(double_scratch1, FieldMemOperand(source, HeapNumber::kValueOffset)); - ECMAToInt32(result, double_scratch1, double_scratch2, - scratch, input_high, input_low); -} - - void MacroAssembler::ECMAToInt32(Register result, DwVfpRegister double_input, - DwVfpRegister double_scratch, Register scratch, - Register input_high, - Register input_low) { - ASSERT(!input_high.is(result)); - ASSERT(!input_low.is(result)); - ASSERT(!input_low.is(input_high)); + Register scratch_high, + Register scratch_low, + DwVfpRegister double_scratch) { + ASSERT(!scratch_high.is(result)); + ASSERT(!scratch_low.is(result)); + ASSERT(!scratch_low.is(scratch_high)); ASSERT(!scratch.is(result) && - !scratch.is(input_high) && - !scratch.is(input_low)); + !scratch.is(scratch_high) && + !scratch.is(scratch_low)); ASSERT(!double_input.is(double_scratch)); - Label out_of_range, negate, done; + Label out_of_range, only_low, negate, done; vcvt_s32_f64(double_scratch.low(), double_input); vmov(result, double_scratch.low()); @@ -2438,8 +2520,8 @@ void MacroAssembler::ECMAToInt32(Register result, cmp(scratch, Operand(0x7ffffffe)); b(lt, &done); - vmov(input_low, input_high, double_input); - Ubfx(scratch, input_high, + vmov(scratch_low, scratch_high, double_input); + Ubfx(scratch, scratch_high, HeapNumber::kExponentShift, HeapNumber::kExponentBits); // Load scratch with exponent - 1. This is faster than loading // with exponent because Bias + 1 = 1024 which is an *ARM* immediate value. @@ -2454,59 +2536,45 @@ void MacroAssembler::ECMAToInt32(Register result, // If we reach this code, 31 <= exponent <= 83. // So, we don't have to handle cases where 0 <= exponent <= 20 for // which we would need to shift right the high part of the mantissa. - ECMAToInt32Tail(result, scratch, input_high, input_low, - &out_of_range, &negate, &done); -} - - -void MacroAssembler::ECMAToInt32Tail(Register result, - Register scratch, - Register input_high, - Register input_low, - Label* out_of_range, - Label* negate, - Label* done) { - Label only_low; - - // On entry, scratch contains exponent - 1. + // Scratch contains exponent - 1. // Load scratch with 52 - exponent (load with 51 - (exponent - 1)). rsb(scratch, scratch, Operand(51), SetCC); b(ls, &only_low); - // 21 <= exponent <= 51, shift input_low and input_high + // 21 <= exponent <= 51, shift scratch_low and scratch_high // to generate the result. - mov(input_low, Operand(input_low, LSR, scratch)); + mov(scratch_low, Operand(scratch_low, LSR, scratch)); // Scratch contains: 52 - exponent. // We needs: exponent - 20. // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20. rsb(scratch, scratch, Operand(32)); - Ubfx(result, input_high, + Ubfx(result, scratch_high, 0, HeapNumber::kMantissaBitsInTopWord); - // Set the implicit 1 before the mantissa part in input_high. + // Set the implicit 1 before the mantissa part in scratch_high. orr(result, result, Operand(1 << HeapNumber::kMantissaBitsInTopWord)); - orr(result, input_low, Operand(result, LSL, scratch)); - b(negate); + orr(result, scratch_low, Operand(result, LSL, scratch)); + b(&negate); - bind(out_of_range); + bind(&out_of_range); mov(result, Operand::Zero()); - b(done); + b(&done); bind(&only_low); - // 52 <= exponent <= 83, shift only input_low. + // 52 <= exponent <= 83, shift only scratch_low. // On entry, scratch contains: 52 - exponent. rsb(scratch, scratch, Operand::Zero()); - mov(result, Operand(input_low, LSL, scratch)); + mov(result, Operand(scratch_low, LSL, scratch)); - bind(negate); - // If input was positive, input_high ASR 31 equals 0 and - // input_high LSR 31 equals zero. + bind(&negate); + // If input was positive, scratch_high ASR 31 equals 0 and + // scratch_high LSR 31 equals zero. // New result = (result eor 0) + 0 = result. // If the input was negative, we have to negate the result. - // Input_high ASR 31 equals 0xffffffff and input_high LSR 31 equals 1. + // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1. // New result = (result eor 0xffffffff) + 1 = 0 - result. - eor(result, result, Operand(input_high, ASR, 31)); - add(result, result, Operand(input_high, LSR, 31)); + eor(result, result, Operand(scratch_high, ASR, 31)); + add(result, result, Operand(scratch_high, LSR, 31)); - bind(done); + bind(&done); } @@ -2688,16 +2756,6 @@ void MacroAssembler::Assert(Condition cond, const char* msg) { } -void MacroAssembler::AssertRegisterIsRoot(Register reg, - Heap::RootListIndex index) { - if (emit_debug_code()) { - LoadRoot(ip, index); - cmp(reg, ip); - Check(eq, "Register did not match expected root"); - } -} - - void MacroAssembler::AssertFastElements(Register elements) { if (emit_debug_code()) { ASSERT(!elements.is(ip)); @@ -2991,12 +3049,10 @@ void MacroAssembler::AssertName(Register object) { -void MacroAssembler::AssertRootValue(Register src, - Heap::RootListIndex root_value_index, - const char* message) { +void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) { if (emit_debug_code()) { - CompareRoot(src, root_value_index); - Check(eq, message); + CompareRoot(reg, index); + Check(eq, "HeapNumberMap register clobbered."); } } @@ -3006,7 +3062,7 @@ void MacroAssembler::JumpIfNotHeapNumber(Register object, Register scratch, Label* on_not_heap_number) { ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); - AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); cmp(scratch, heap_number_map); b(ne, on_not_heap_number); } @@ -3063,7 +3119,7 @@ void MacroAssembler::AllocateHeapNumber(Register result, tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS); // Store heap number map in the allocated object. - AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); if (tagging_mode == TAG_RESULT) { str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset)); } else { @@ -3183,6 +3239,22 @@ void MacroAssembler::CheckFor32DRegs(Register scratch) { } +void MacroAssembler::SaveFPRegs(Register location, Register scratch) { + CheckFor32DRegs(scratch); + vstm(db_w, location, d16, d31, ne); + sub(location, location, Operand(16 * kDoubleSize), LeaveCC, eq); + vstm(db_w, location, d0, d15); +} + + +void MacroAssembler::RestoreFPRegs(Register location, Register scratch) { + CheckFor32DRegs(scratch); + vldm(ia_w, location, d0, d15); + vldm(ia_w, location, d16, d31, ne); + add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq); +} + + void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii( Register first, Register second, diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index f9f672bac6..b736c8f3af 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -178,6 +178,7 @@ class MacroAssembler: public Assembler { void LoadHeapObject(Register dst, Handle<HeapObject> object); void LoadObject(Register result, Handle<Object> object) { + ALLOW_HANDLE_DEREF(isolate(), "heap object check"); if (object->IsHeapObject()) { LoadHeapObject(result, Handle<HeapObject>::cast(object)); } else { @@ -495,6 +496,54 @@ class MacroAssembler: public Assembler { const double imm, const Register scratch = no_reg); + // Converts the smi or heap number in object to an int32 using the rules + // for ToInt32 as described in ECMAScript 9.5.: the value is truncated + // and brought into the range -2^31 .. +2^31 - 1. + void ConvertNumberToInt32(Register object, + Register dst, + Register heap_number_map, + Register scratch1, + Register scratch2, + Register scratch3, + DwVfpRegister double_scratch1, + DwVfpRegister double_scratch2, + Label* not_int32); + + // Loads the number from object into dst register. + // If |object| is neither smi nor heap number, |not_number| is jumped to + // with |object| still intact. + void LoadNumber(Register object, + DwVfpRegister dst, + Register heap_number_map, + Register scratch, + Label* not_number); + + // Loads the number from object into double_dst in the double format. + // Control will jump to not_int32 if the value cannot be exactly represented + // by a 32-bit integer. + // Floating point value in the 32-bit integer range that are not exact integer + // won't be loaded. + void LoadNumberAsInt32Double(Register object, + DwVfpRegister double_dst, + Register heap_number_map, + Register scratch, + DwVfpRegister double_scratch, + Label* not_int32); + + // Loads the number from object into dst as a 32-bit integer. + // Control will jump to not_int32 if the object cannot be exactly represented + // by a 32-bit integer. + // Floating point value in the 32-bit integer range that are not exact integer + // won't be converted. + void LoadNumberAsInt32(Register object, + Register dst, + Register heap_number_map, + Register scratch, + DwVfpRegister double_scratch0, + DwVfpRegister double_scratch1, + Label* not_int32); + + // Enter exit frame. // stack_space - extra stack space, used for alignment before call to C. void EnterExitFrame(bool save_doubles, int stack_space = 0); @@ -573,6 +622,7 @@ class MacroAssembler: public Assembler { CallKind call_kind); void InvokeFunction(Handle<JSFunction> function, + const ParameterCount& expected, const ParameterCount& actual, InvokeFlag flag, const CallWrapper& call_wrapper, @@ -831,16 +881,11 @@ class MacroAssembler: public Assembler { // Check to see if maybe_number can be stored as a double in // FastDoubleElements. If it can, store it at the index specified by key in - // the FastDoubleElements array elements. Otherwise jump to fail, in which - // case scratch2, scratch3 and scratch4 are unmodified. + // the FastDoubleElements array elements. Otherwise jump to fail. void StoreNumberToDoubleElements(Register value_reg, Register key_reg, - // All regs below here overwritten. Register elements_reg, Register scratch1, - Register scratch2, - Register scratch3, - Register scratch4, Label* fail, int elements_offset = 0); @@ -972,31 +1017,28 @@ class MacroAssembler: public Assembler { Label* done, Label* exact); - // Performs a truncating conversion of a heap floating point number as used by - // the JS bitwise operations. See ECMA-262 9.5: ToInt32. - // Exits with 'result' holding the answer. - void ECMAConvertNumberToInt32(Register source, - Register result, - Register input_low, - Register input_high, - Register scratch, - DwVfpRegister double_scratch1, - DwVfpRegister double_scratch2); - // Performs a truncating conversion of a floating point number as used by // the JS bitwise operations. See ECMA-262 9.5: ToInt32. + // Double_scratch must be between d0 and d15. // Exits with 'result' holding the answer and all other registers clobbered. void ECMAToInt32(Register result, DwVfpRegister double_input, - DwVfpRegister double_scratch, Register scratch, - Register input_high, - Register input_low); + Register scratch_high, + Register scratch_low, + DwVfpRegister double_scratch); // Check whether d16-d31 are available on the CPU. The result is given by the // Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise. void CheckFor32DRegs(Register scratch); + // Does a runtime check for 16/32 FP registers. Either way, pushes 32 double + // values to location, saving [d0..(d15|d31)]. + void SaveFPRegs(Register location, Register scratch); + + // Does a runtime check for 16/32 FP registers. Either way, pops 32 double + // values to location, restoring [d0..(d15|d31)]. + void RestoreFPRegs(Register location, Register scratch); // --------------------------------------------------------------------------- // Runtime calls @@ -1120,7 +1162,6 @@ class MacroAssembler: public Assembler { // Calls Abort(msg) if the condition cond is not satisfied. // Use --debug_code to enable. void Assert(Condition cond, const char* msg); - void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index); void AssertFastElements(Register elements); // Like Assert(), but always enabled. @@ -1230,11 +1271,9 @@ class MacroAssembler: public Assembler { // Abort execution if argument is not a name, enabled via --debug-code. void AssertName(Register object); - // Abort execution if argument is not the root value with the given index, + // Abort execution if reg is not the root value with the given index, // enabled via --debug-code. - void AssertRootValue(Register src, - Heap::RootListIndex root_value_index, - const char* message); + void AssertIsRoot(Register reg, Heap::RootListIndex index); // --------------------------------------------------------------------------- // HeapNumber utilities @@ -1365,16 +1404,6 @@ class MacroAssembler: public Assembler { // it. See the implementation for register usage. void JumpToHandlerEntry(); - // Helper for ECMAToInt32VFP and ECMAToInt32NoVFP. - // It is expected that 31 <= exponent <= 83, and scratch is exponent - 1. - void ECMAToInt32Tail(Register result, - Register scratch, - Register input_high, - Register input_low, - Label* out_of_range, - Label* negate, - Label* done); - // Compute memory operands for safepoint stack slots. static int SafepointRegisterStackIndex(int reg_code); MemOperand SafepointRegisterSlot(Register reg); diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc index 0cb80c0ac2..da7afee3fb 100644 --- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc +++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc @@ -380,12 +380,12 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase( // Address of current input position. __ add(r1, current_input_offset(), Operand(end_of_input_address())); // Isolate. - __ mov(r3, Operand(ExternalReference::isolate_address())); + __ mov(r3, Operand(ExternalReference::isolate_address(isolate()))); { AllowExternalCallThatCantCauseGC scope(masm_); ExternalReference function = - ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate()); + ExternalReference::re_case_insensitive_compare_uc16(isolate()); __ CallCFunction(function, argument_count); } @@ -682,7 +682,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) { Label stack_ok; ExternalReference stack_limit = - ExternalReference::address_of_stack_limit(masm_->isolate()); + ExternalReference::address_of_stack_limit(isolate()); __ mov(r0, Operand(stack_limit)); __ ldr(r0, MemOperand(r0)); __ sub(r0, sp, r0, SetCC); @@ -893,9 +893,9 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) { __ PrepareCallCFunction(num_arguments, r0); __ mov(r0, backtrack_stackpointer()); __ add(r1, frame_pointer(), Operand(kStackHighEnd)); - __ mov(r2, Operand(ExternalReference::isolate_address())); + __ mov(r2, Operand(ExternalReference::isolate_address(isolate()))); ExternalReference grow_stack = - ExternalReference::re_grow_stack(masm_->isolate()); + ExternalReference::re_grow_stack(isolate()); __ CallCFunction(grow_stack, num_arguments); // If return NULL, we have failed to grow the stack, and // must exit with a stack-overflow exception. @@ -1111,7 +1111,7 @@ void RegExpMacroAssemblerARM::CallCheckStackGuardState(Register scratch) { __ mov(r1, Operand(masm_->CodeObject())); // r0 becomes return address pointer. ExternalReference stack_guard_check = - ExternalReference::re_check_stack_guard_state(masm_->isolate()); + ExternalReference::re_check_stack_guard_state(isolate()); CallCFunctionUsingStub(stack_guard_check, num_arguments); } @@ -1292,7 +1292,7 @@ void RegExpMacroAssemblerARM::Pop(Register target) { void RegExpMacroAssemblerARM::CheckPreemption() { // Check for preemption. ExternalReference stack_limit = - ExternalReference::address_of_stack_limit(masm_->isolate()); + ExternalReference::address_of_stack_limit(isolate()); __ mov(r0, Operand(stack_limit)); __ ldr(r0, MemOperand(r0)); __ cmp(sp, r0); @@ -1302,7 +1302,7 @@ void RegExpMacroAssemblerARM::CheckPreemption() { void RegExpMacroAssemblerARM::CheckStackLimit() { ExternalReference stack_limit = - ExternalReference::address_of_regexp_stack_limit(masm_->isolate()); + ExternalReference::address_of_regexp_stack_limit(isolate()); __ mov(r0, Operand(stack_limit)); __ ldr(r0, MemOperand(r0)); __ cmp(backtrack_stackpointer(), Operand(r0)); diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.h b/deps/v8/src/arm/regexp-macro-assembler-arm.h index c45669ae89..921d8f5474 100644 --- a/deps/v8/src/arm/regexp-macro-assembler-arm.h +++ b/deps/v8/src/arm/regexp-macro-assembler-arm.h @@ -30,6 +30,7 @@ #include "arm/assembler-arm.h" #include "arm/assembler-arm-inl.h" +#include "macro-assembler.h" namespace v8 { namespace internal { @@ -223,6 +224,7 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler { inline void CallCFunctionUsingStub(ExternalReference function, int num_arguments); + Isolate* isolate() const { return masm_->isolate(); } MacroAssembler* masm_; diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc index ea79310447..036fd7f877 100644 --- a/deps/v8/src/arm/simulator-arm.cc +++ b/deps/v8/src/arm/simulator-arm.cc @@ -26,7 +26,7 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include <stdlib.h> -#include <math.h> +#include <cmath> #include <cstdarg> #include "v8.h" @@ -331,7 +331,7 @@ void ArmDebugger::Debug() { PrintF("\n"); } } - for (int i = 0; i < kNumVFPDoubleRegisters; i++) { + for (int i = 0; i < DwVfpRegister::NumRegisters(); i++) { dvalue = GetVFPDoubleRegisterValue(i); uint64_t as_words = BitCast<uint64_t>(dvalue); PrintF("%3s: %f 0x%08x %08x\n", @@ -1297,7 +1297,7 @@ bool Simulator::OverflowFrom(int32_t alu_out, // Support for VFP comparisons. void Simulator::Compute_FPSCR_Flags(double val1, double val2) { - if (isnan(val1) || isnan(val2)) { + if (std::isnan(val1) || std::isnan(val2)) { n_flag_FPSCR_ = false; z_flag_FPSCR_ = false; c_flag_FPSCR_ = true; @@ -1866,7 +1866,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) { double Simulator::canonicalizeNaN(double value) { - return (FPSCR_default_NaN_mode_ && isnan(value)) ? + return (FPSCR_default_NaN_mode_ && std::isnan(value)) ? FixedDoubleArray::canonical_not_the_hole_nan_as_double() : value; } @@ -2947,7 +2947,7 @@ void Simulator::DecodeVCMP(Instruction* instr) { // Raise exceptions for quiet NaNs if necessary. if (instr->Bit(7) == 1) { - if (isnan(dd_value)) { + if (std::isnan(dd_value)) { inv_op_vfp_flag_ = true; } } diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc index f22acb4709..ddcbd623ba 100644 --- a/deps/v8/src/arm/stub-cache-arm.cc +++ b/deps/v8/src/arm/stub-cache-arm.cc @@ -726,7 +726,7 @@ static void PushInterceptorArguments(MacroAssembler* masm, __ push(holder); __ ldr(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset)); __ push(scratch); - __ mov(scratch, Operand(ExternalReference::isolate_address())); + __ mov(scratch, Operand(ExternalReference::isolate_address(masm->isolate()))); __ push(scratch); } @@ -798,7 +798,7 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm, } else { __ Move(r6, call_data); } - __ mov(r7, Operand(ExternalReference::isolate_address())); + __ mov(r7, Operand(ExternalReference::isolate_address(masm->isolate()))); // Store JS function, call data and isolate. __ stm(ib, sp, r5.bit() | r6.bit() | r7.bit()); @@ -954,7 +954,9 @@ class CallInterceptorCompiler BASE_EMBEDDED { CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_) ? CALL_AS_FUNCTION : CALL_AS_METHOD; - __ InvokeFunction(optimization.constant_function(), arguments_, + Handle<JSFunction> function = optimization.constant_function(); + ParameterCount expected(function); + __ InvokeFunction(function, expected, arguments_, JUMP_FUNCTION, NullCallWrapper(), call_kind); } @@ -1165,7 +1167,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object, } // Log the check depth. - LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1)); + LOG(isolate(), IntEvent("check-maps-depth", depth + 1)); if (!holder.is_identical_to(first) || check == CHECK_ALL_MAPS) { // Check the holder map. @@ -1293,11 +1295,11 @@ void BaseLoadStubCompiler::GenerateLoadCallback( __ ldr(scratch3(), FieldMemOperand(scratch3(), ExecutableAccessorInfo::kDataOffset)); } else { - __ Move(scratch3(), Handle<Object>(callback->data(), - callback->GetIsolate())); + __ Move(scratch3(), Handle<Object>(callback->data(), isolate())); } __ Push(reg, scratch3()); - __ mov(scratch3(), Operand(ExternalReference::isolate_address())); + __ mov(scratch3(), + Operand(ExternalReference::isolate_address(isolate()))); __ Push(scratch3(), name()); __ mov(r0, sp); // r0 = Handle<Name> @@ -1313,10 +1315,8 @@ void BaseLoadStubCompiler::GenerateLoadCallback( const int kStackUnwindSpace = 5; Address getter_address = v8::ToCData<Address>(callback->getter()); ApiFunction fun(getter_address); - ExternalReference ref = - ExternalReference(&fun, - ExternalReference::DIRECT_GETTER_CALL, - masm()->isolate()); + ExternalReference ref = ExternalReference( + &fun, ExternalReference::DIRECT_GETTER_CALL, isolate()); __ CallApiFunctionAndReturn(ref, kStackUnwindSpace); } @@ -1404,7 +1404,7 @@ void BaseLoadStubCompiler::GenerateLoadInterceptor( ExternalReference ref = ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), - masm()->isolate()); + isolate()); __ TailCallExternalReference(ref, 6, 1); } } @@ -1620,9 +1620,8 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( __ b(gt, &call_builtin); __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize)); - __ StoreNumberToDoubleElements( - r4, r0, elements, r5, r2, r3, r9, - &call_builtin, argc * kDoubleSize); + __ StoreNumberToDoubleElements(r4, r0, elements, r5, + &call_builtin, argc * kDoubleSize); // Save new length. __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); @@ -1715,11 +1714,10 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( __ CheckFastObjectElements(r7, r7, &call_builtin); __ bind(&no_fast_elements_check); - Isolate* isolate = masm()->isolate(); ExternalReference new_space_allocation_top = - ExternalReference::new_space_allocation_top_address(isolate); + ExternalReference::new_space_allocation_top_address(isolate()); ExternalReference new_space_allocation_limit = - ExternalReference::new_space_allocation_limit_address(isolate); + ExternalReference::new_space_allocation_limit_address(isolate()); const int kAllocationDelta = 4; // Load top and check if it is the end of elements. @@ -1758,10 +1756,8 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( __ Ret(); } __ bind(&call_builtin); - __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush, - masm()->isolate()), - argc + 1, - 1); + __ TailCallExternalReference( + ExternalReference(Builtins::c_ArrayPush, isolate()), argc + 1, 1); } // Handle call cache miss. @@ -1845,10 +1841,8 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall( __ Ret(); __ bind(&call_builtin); - __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop, - masm()->isolate()), - argc + 1, - 1); + __ TailCallExternalReference( + ExternalReference(Builtins::c_ArrayPop, isolate()), argc + 1, 1); // Handle call cache miss. __ bind(&miss); @@ -2085,8 +2079,9 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall( // Tail call the full function. We do not have to patch the receiver // because the function makes no use of it. __ bind(&slow); - __ InvokeFunction( - function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); + ParameterCount expected(function); + __ InvokeFunction(function, expected, arguments(), + JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); __ bind(&miss); // r2: function name. @@ -2196,8 +2191,9 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall( __ bind(&slow); // Tail call the full function. We do not have to patch the receiver // because the function makes no use of it. - __ InvokeFunction( - function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); + ParameterCount expected(function); + __ InvokeFunction(function, expected, arguments(), + JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); __ bind(&miss); // r2: function name. @@ -2295,8 +2291,9 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall( // Tail call the full function. We do not have to patch the receiver // because the function makes no use of it. __ bind(&slow); - __ InvokeFunction( - function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); + ParameterCount expected(function); + __ InvokeFunction(function, expected, arguments(), + JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); __ bind(&miss); // r2: function name. @@ -2384,8 +2381,7 @@ void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object, ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK); switch (check) { case RECEIVER_MAP_CHECK: - __ IncrementCounter(masm()->isolate()->counters()->call_const(), - 1, r0, r3); + __ IncrementCounter(isolate()->counters()->call_const(), 1, r0, r3); // Check that the maps haven't changed. CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4, @@ -2470,8 +2466,9 @@ void CallStubCompiler::CompileHandlerBackend(Handle<JSFunction> function) { CallKind call_kind = CallICBase::Contextual::decode(extra_state_) ? CALL_AS_FUNCTION : CALL_AS_METHOD; - __ InvokeFunction( - function, arguments(), JUMP_FUNCTION, NullCallWrapper(), call_kind); + ParameterCount expected(function); + __ InvokeFunction(function, expected, arguments(), + JUMP_FUNCTION, NullCallWrapper(), call_kind); } @@ -2574,7 +2571,7 @@ Handle<Code> CallStubCompiler::CompileCallGlobal( __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); // Jump to the cached code (tail call). - Counters* counters = masm()->isolate()->counters(); + Counters* counters = isolate()->counters(); __ IncrementCounter(counters->call_global_inline(), 1, r3, r4); ParameterCount expected(function->shared()->formal_parameter_count()); CallKind call_kind = CallICBase::Contextual::decode(extra_state_) @@ -2617,8 +2614,7 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback( // Do tail-call to the runtime system. ExternalReference store_callback_property = - ExternalReference(IC_Utility(IC::kStoreCallbackProperty), - masm()->isolate()); + ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate()); __ TailCallExternalReference(store_callback_property, 4, 1); // Handle store cache miss. @@ -2653,8 +2649,9 @@ void StoreStubCompiler::GenerateStoreViaSetter( // Call the JavaScript setter with receiver and value on the stack. __ Push(r1, r0); ParameterCount actual(1); - __ InvokeFunction(setter, actual, CALL_FUNCTION, NullCallWrapper(), - CALL_AS_METHOD); + ParameterCount expected(setter); + __ InvokeFunction(setter, expected, actual, + CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); } else { // If we generate a global code snippet for deoptimization only, remember // the place to continue after deoptimization. @@ -2700,8 +2697,7 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor( // Do tail-call to the runtime system. ExternalReference store_ic_property = - ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), - masm()->isolate()); + ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate()); __ TailCallExternalReference(store_ic_property, 4, 1); // Handle store cache miss. @@ -2740,7 +2736,7 @@ Handle<Code> StoreStubCompiler::CompileStoreGlobal( FieldMemOperand(scratch1(), JSGlobalPropertyCell::kValueOffset)); // Cells are always rescanned, so no write barrier here. - Counters* counters = masm()->isolate()->counters(); + Counters* counters = isolate()->counters(); __ IncrementCounter( counters->named_store_global_inline(), 1, scratch1(), scratch2()); __ Ret(); @@ -2838,8 +2834,9 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm, // Call the JavaScript getter with the receiver on the stack. __ push(r0); ParameterCount actual(0); - __ InvokeFunction(getter, actual, CALL_FUNCTION, NullCallWrapper(), - CALL_AS_METHOD); + ParameterCount expected(getter); + __ InvokeFunction(getter, expected, actual, + CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); } else { // If we generate a global code snippet for deoptimization only, remember // the place to continue after deoptimization. @@ -2884,7 +2881,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal( HandlerFrontendFooter(&success, &miss); __ bind(&success); - Counters* counters = masm()->isolate()->counters(); + Counters* counters = isolate()->counters(); __ IncrementCounter(counters->named_load_global_stub(), 1, r1, r3); __ mov(r0, r4); __ Ret(); @@ -3088,7 +3085,7 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub( // Remove caller arguments and receiver from the stack and return. __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2)); __ add(sp, sp, Operand(kPointerSize)); - Counters* counters = masm()->isolate()->counters(); + Counters* counters = isolate()->counters(); __ IncrementCounter(counters->constructed_objects(), 1, r1, r2); __ IncrementCounter(counters->constructed_objects_stub(), 1, r1, r2); __ Jump(lr); @@ -3096,7 +3093,7 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub( // Jump to the generic stub in case the specialized code cannot handle the // construction. __ bind(&generic_stub_call); - Handle<Code> code = masm()->isolate()->builtins()->JSConstructStubGeneric(); + Handle<Code> code = isolate()->builtins()->JSConstructStubGeneric(); __ Jump(code, RelocInfo::CODE_TARGET); // Return the generated code. @@ -3246,14 +3243,10 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( StoreIntAsFloat(masm, r3, r4, r5, r7); break; case EXTERNAL_DOUBLE_ELEMENTS: + __ vmov(s2, r5); + __ vcvt_f64_s32(d0, s2); __ add(r3, r3, Operand(key, LSL, 2)); // r3: effective address of the double element - FloatingPointHelper::Destination destination; - destination = FloatingPointHelper::kVFPRegisters; - FloatingPointHelper::ConvertIntToDouble( - masm, r5, destination, - d0, r6, r7, // These are: double_dst, dst_mantissa, dst_exponent. - r4, s2); // These are: scratch2, single_scratch. __ vstr(d0, r3, 0); break; case FAST_ELEMENTS: @@ -3303,7 +3296,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( // not include -kHeapObjectTag into it. __ sub(r5, value, Operand(kHeapObjectTag)); __ vldr(d0, r5, HeapNumber::kValueOffset); - __ ECMAToInt32(r5, d0, d1, r6, r7, r9); + __ ECMAToInt32(r5, d0, r6, r7, r9, d1); switch (elements_kind) { case EXTERNAL_BYTE_ELEMENTS: @@ -3537,9 +3530,6 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( // -- r3 : scratch (elements backing store) // -- r4 : scratch // -- r5 : scratch - // -- r6 : scratch - // -- r7 : scratch - // -- r9 : scratch // ----------------------------------- Label miss_force_generic, transition_elements_kind, grow, slow; Label finish_store, check_capacity; @@ -3550,9 +3540,6 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( Register elements_reg = r3; Register scratch1 = r4; Register scratch2 = r5; - Register scratch3 = r6; - Register scratch4 = r7; - Register scratch5 = r9; Register length_reg = r7; // This stub is meant to be tail-jumped to, the receiver must already @@ -3581,15 +3568,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( } __ bind(&finish_store); - __ StoreNumberToDoubleElements(value_reg, - key_reg, - // All registers after this are overwritten. - elements_reg, - scratch1, - scratch3, - scratch4, - scratch2, - &transition_elements_kind); + __ StoreNumberToDoubleElements(value_reg, key_reg, elements_reg, + scratch1, &transition_elements_kind); __ Ret(); // Handle store cache miss, replacing the ic with the generic stub. @@ -3636,15 +3616,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset)); __ mov(scratch1, elements_reg); - __ StoreNumberToDoubleElements(value_reg, - key_reg, - // All registers after this are overwritten. - scratch1, - scratch2, - scratch3, - scratch4, - scratch5, - &transition_elements_kind); + __ StoreNumberToDoubleElements(value_reg, key_reg, scratch1, + scratch2, &transition_elements_kind); __ mov(scratch1, Operand(kHoleNanLower32)); __ mov(scratch2, Operand(kHoleNanUpper32)); diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc index 5bde8c5383..fff588af35 100644 --- a/deps/v8/src/assembler.cc +++ b/deps/v8/src/assembler.cc @@ -34,7 +34,7 @@ #include "assembler.h" -#include <math.h> // For cos, log, pow, sin, tan, etc. +#include <cmath> #include "api.h" #include "builtins.h" #include "counters.h" @@ -969,8 +969,8 @@ ExternalReference::ExternalReference(const Runtime::Function* f, : address_(Redirect(isolate, f->entry)) {} -ExternalReference ExternalReference::isolate_address() { - return ExternalReference(Isolate::Current()); +ExternalReference ExternalReference::isolate_address(Isolate* isolate) { + return ExternalReference(isolate); } @@ -1459,10 +1459,11 @@ double power_helper(double x, double y) { return power_double_int(x, y_int); // Returns 1 if exponent is 0. } if (y == 0.5) { - return (isinf(x)) ? V8_INFINITY : fast_sqrt(x + 0.0); // Convert -0 to +0. + return (std::isinf(x)) ? V8_INFINITY + : fast_sqrt(x + 0.0); // Convert -0 to +0. } if (y == -0.5) { - return (isinf(x)) ? 0 : 1.0 / fast_sqrt(x + 0.0); // Convert -0 to +0. + return (std::isinf(x)) ? 0 : 1.0 / fast_sqrt(x + 0.0); // Convert -0 to +0. } return power_double_double(x, y); } @@ -1492,7 +1493,7 @@ double power_double_double(double x, double y) { (!defined(__MINGW64_VERSION_RC) || __MINGW64_VERSION_RC < 1) // MinGW64 has a custom implementation for pow. This handles certain // special cases that are different. - if ((x == 0.0 || isinf(x)) && isfinite(y)) { + if ((x == 0.0 || std::isinf(x)) && std::isfinite(y)) { double f; if (modf(y, &f) != 0.0) return ((x == 0.0) ^ (y > 0)) ? V8_INFINITY : 0; } @@ -1505,7 +1506,9 @@ double power_double_double(double x, double y) { // The checks for special cases can be dropped in ia32 because it has already // been done in generated code before bailing out here. - if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) return OS::nan_value(); + if (std::isnan(y) || ((x == 1 || x == -1) && std::isinf(y))) { + return OS::nan_value(); + } return pow(x, y); } diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h index 381ae0a801..32424cfb6b 100644 --- a/deps/v8/src/assembler.h +++ b/deps/v8/src/assembler.h @@ -50,7 +50,7 @@ class ApiFunction; namespace internal { -struct StatsCounter; +class StatsCounter; // ----------------------------------------------------------------------------- // Platform independent assembler base class. @@ -681,7 +681,7 @@ class ExternalReference BASE_EMBEDDED { explicit ExternalReference(const SCTableReference& table_ref); // Isolate::Current() as an external reference. - static ExternalReference isolate_address(); + static ExternalReference isolate_address(Isolate* isolate); // One-of-a-kind references. These references are not part of a general // pattern. This means that they have to be added to the diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc index e8b065c4ea..d241355fc1 100644 --- a/deps/v8/src/ast.cc +++ b/deps/v8/src/ast.cc @@ -27,7 +27,7 @@ #include "ast.h" -#include <math.h> // For isfinite. +#include <cmath> // For isfinite. #include "builtins.h" #include "code-stubs.h" #include "conversions.h" @@ -70,6 +70,11 @@ bool Expression::IsNullLiteral() { } +bool Expression::IsUndefinedLiteral() { + return AsLiteral() != NULL && AsLiteral()->handle()->IsUndefined(); +} + + VariableProxy::VariableProxy(Isolate* isolate, Variable* var) : Expression(isolate), name_(var->name()), @@ -241,8 +246,8 @@ bool IsEqualNumber(void* first, void* second) { if (h2->IsSmi()) return false; Handle<HeapNumber> n1 = Handle<HeapNumber>::cast(h1); Handle<HeapNumber> n2 = Handle<HeapNumber>::cast(h2); - ASSERT(isfinite(n1->value())); - ASSERT(isfinite(n2->value())); + ASSERT(std::isfinite(n1->value())); + ASSERT(std::isfinite(n2->value())); return n1->value() == n2->value(); } @@ -352,7 +357,8 @@ static bool IsVoidOfLiteral(Expression* expr) { } -// Check for the pattern: void <literal> equals <expression> +// Check for the pattern: void <literal> equals <expression> or +// undefined equals <expression> static bool MatchLiteralCompareUndefined(Expression* left, Token::Value op, Expression* right, @@ -361,6 +367,10 @@ static bool MatchLiteralCompareUndefined(Expression* left, *expr = right; return true; } + if (left->IsUndefinedLiteral() && Token::IsEqualityOp(op)) { + *expr = right; + return true; + } return false; } diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h index b7331388fd..10ae7de458 100644 --- a/deps/v8/src/ast.h +++ b/deps/v8/src/ast.h @@ -339,6 +339,9 @@ class Expression: public AstNode { // True iff the expression is the null literal. bool IsNullLiteral(); + // True iff the expression is the undefined literal. + bool IsUndefinedLiteral(); + // Type feedback information for assignments and properties. virtual bool IsMonomorphic() { UNREACHABLE(); @@ -939,15 +942,18 @@ class WithStatement: public Statement { public: DECLARE_NODE_TYPE(WithStatement) + Scope* scope() { return scope_; } Expression* expression() const { return expression_; } Statement* statement() const { return statement_; } protected: - WithStatement(Expression* expression, Statement* statement) - : expression_(expression), + WithStatement(Scope* scope, Expression* expression, Statement* statement) + : scope_(scope), + expression_(expression), statement_(statement) { } private: + Scope* scope_; Expression* expression_; Statement* statement_; }; @@ -1964,27 +1970,34 @@ class Yield: public Expression { public: DECLARE_NODE_TYPE(Yield) + enum Kind { + INITIAL, // The initial yield that returns the unboxed generator object. + SUSPEND, // A normal yield: { value: EXPRESSION, done: false } + DELEGATING, // A yield*. + FINAL // A return: { value: EXPRESSION, done: true } + }; + Expression* generator_object() const { return generator_object_; } Expression* expression() const { return expression_; } - bool is_delegating_yield() const { return is_delegating_yield_; } + Kind yield_kind() const { return yield_kind_; } virtual int position() const { return pos_; } protected: Yield(Isolate* isolate, Expression* generator_object, Expression* expression, - bool is_delegating_yield, + Kind yield_kind, int pos) : Expression(isolate), generator_object_(generator_object), expression_(expression), - is_delegating_yield_(is_delegating_yield), + yield_kind_(yield_kind), pos_(pos) { } private: Expression* generator_object_; Expression* expression_; - bool is_delegating_yield_; + Kind yield_kind_; int pos_; }; @@ -2777,9 +2790,11 @@ class AstNodeFactory BASE_EMBEDDED { VISIT_AND_RETURN(ReturnStatement, stmt) } - WithStatement* NewWithStatement(Expression* expression, + WithStatement* NewWithStatement(Scope* scope, + Expression* expression, Statement* statement) { - WithStatement* stmt = new(zone_) WithStatement(expression, statement); + WithStatement* stmt = new(zone_) WithStatement( + scope, expression, statement); VISIT_AND_RETURN(WithStatement, stmt) } @@ -2966,10 +2981,10 @@ class AstNodeFactory BASE_EMBEDDED { Yield* NewYield(Expression *generator_object, Expression* expression, - bool is_delegating_yield, + Yield::Kind yield_kind, int pos) { Yield* yield = new(zone_) Yield( - isolate_, generator_object, expression, is_delegating_yield, pos); + isolate_, generator_object, expression, yield_kind, pos); VISIT_AND_RETURN(Yield, yield) } diff --git a/deps/v8/src/bignum-dtoa.cc b/deps/v8/src/bignum-dtoa.cc index a9616909d0..c5ad4420c8 100644 --- a/deps/v8/src/bignum-dtoa.cc +++ b/deps/v8/src/bignum-dtoa.cc @@ -25,7 +25,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#include <math.h> +#include <cmath> #include "../include/v8stdint.h" #include "checks.h" diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc index 12f0cdac6b..85bf96e4d4 100644 --- a/deps/v8/src/bootstrapper.cc +++ b/deps/v8/src/bootstrapper.cc @@ -43,6 +43,7 @@ #include "extensions/externalize-string-extension.h" #include "extensions/gc-extension.h" #include "extensions/statistics-extension.h" +#include "code-stubs.h" namespace v8 { namespace internal { @@ -862,8 +863,6 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global, InstallFunction(global, "Array", JS_ARRAY_TYPE, JSArray::kSize, isolate->initial_object_prototype(), Builtins::kArrayCode, true); - array_function->shared()->set_construct_stub( - isolate->builtins()->builtin(Builtins::kArrayConstructCode)); array_function->shared()->DontAdaptArguments(); // This seems a bit hackish, but we need to make sure Array.length @@ -890,6 +889,17 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global, // as the constructor. 'Array' property on a global object can be // overwritten by JS code. native_context()->set_array_function(*array_function); + + if (FLAG_optimize_constructed_arrays) { + // Cache the array maps, needed by ArrayConstructorStub + CacheInitialJSArrayMaps(native_context(), initial_map); + ArrayConstructorStub array_constructor_stub(isolate); + Handle<Code> code = array_constructor_stub.GetCode(isolate); + array_function->shared()->set_construct_stub(*code); + } else { + array_function->shared()->set_construct_stub( + isolate->builtins()->builtin(Builtins::kCommonArrayConstructCode)); + } } { // --- N u m b e r --- @@ -1303,10 +1313,12 @@ void Genesis::InitializeExperimentalGlobal() { if (FLAG_harmony_typed_arrays) { { // -- A r r a y B u f f e r - InstallFunction(global, "__ArrayBuffer", JS_ARRAY_BUFFER_TYPE, - JSArrayBuffer::kSize, - isolate()->initial_object_prototype(), - Builtins::kIllegal, true); + Handle<JSFunction> array_buffer_fun = + InstallFunction(global, "__ArrayBuffer", JS_ARRAY_BUFFER_TYPE, + JSArrayBuffer::kSize, + isolate()->initial_object_prototype(), + Builtins::kIllegal, true); + native_context()->set_array_buffer_fun(*array_buffer_fun); } { // -- T y p e d A r r a y s @@ -1533,13 +1545,8 @@ Handle<JSFunction> Genesis::InstallInternalArray( factory()->NewJSObject(isolate()->object_function(), TENURED); SetPrototype(array_function, prototype); - // TODO(mvstanton): For performance reasons, this code would have to - // be changed to successfully run with FLAG_optimize_constructed_arrays. - // The next checkin to enable FLAG_optimize_constructed_arrays by - // default will address this. - CHECK(!FLAG_optimize_constructed_arrays); array_function->shared()->set_construct_stub( - isolate()->builtins()->builtin(Builtins::kArrayConstructCode)); + isolate()->builtins()->builtin(Builtins::kCommonArrayConstructCode)); array_function->shared()->DontAdaptArguments(); diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc index 30edf579e6..571818030b 100644 --- a/deps/v8/src/builtins.cc +++ b/deps/v8/src/builtins.cc @@ -192,9 +192,8 @@ BUILTIN(EmptyFunction) { RUNTIME_FUNCTION(MaybeObject*, ArrayConstructor_StubFailure) { CONVERT_ARG_STUB_CALLER_ARGS(caller_args); - // ASSERT(args.length() == 3); - Handle<JSFunction> function = args.at<JSFunction>(1); - Handle<Object> type_info = args.at<Object>(2); + ASSERT(args.length() == 2); + Handle<Object> type_info = args.at<Object>(1); JSArray* array = NULL; bool holey = false; @@ -226,8 +225,7 @@ RUNTIME_FUNCTION(MaybeObject*, ArrayConstructor_StubFailure) { } } - ASSERT(function->has_initial_map()); - ElementsKind kind = function->initial_map()->elements_kind(); + ElementsKind kind = GetInitialFastElementsKind(); if (holey) { kind = GetHoleyElementsKind(kind); } @@ -934,7 +932,7 @@ BUILTIN(ArraySplice) { if (start < kMinInt || start > kMaxInt) { return CallJsBuiltin(isolate, "ArraySplice", args); } - relative_start = static_cast<int>(start); + relative_start = std::isnan(start) ? 0 : static_cast<int>(start); } else if (!arg1->IsUndefined()) { return CallJsBuiltin(isolate, "ArraySplice", args); } @@ -1321,7 +1319,7 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallHelper( v8::Handle<v8::Value> value; { // Leaving JavaScript. - VMState state(isolate, EXTERNAL); + VMState<EXTERNAL> state(isolate); ExternalCallbackScope call_scope(isolate, v8::ToCData<Address>(callback_obj)); value = callback(new_args); @@ -1398,7 +1396,7 @@ MUST_USE_RESULT static MaybeObject* HandleApiCallAsFunctionOrConstructor( v8::Handle<v8::Value> value; { // Leaving JavaScript. - VMState state(isolate, EXTERNAL); + VMState<EXTERNAL> state(isolate); ExternalCallbackScope call_scope(isolate, v8::ToCData<Address>(callback_obj)); value = callback(new_args); diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h index 12ed56af79..ab7722832f 100644 --- a/deps/v8/src/builtins.h +++ b/deps/v8/src/builtins.h @@ -199,7 +199,7 @@ enum BuiltinExtraArguments { Code::kNoExtraICState) \ V(ArrayCode, BUILTIN, UNINITIALIZED, \ Code::kNoExtraICState) \ - V(ArrayConstructCode, BUILTIN, UNINITIALIZED, \ + V(CommonArrayConstructCode, BUILTIN, UNINITIALIZED, \ Code::kNoExtraICState) \ \ V(StringConstructCode, BUILTIN, UNINITIALIZED, \ @@ -388,7 +388,7 @@ class Builtins { static void Generate_InternalArrayCode(MacroAssembler* masm); static void Generate_ArrayCode(MacroAssembler* masm); - static void Generate_ArrayConstructCode(MacroAssembler* masm); + static void Generate_CommonArrayConstructCode(MacroAssembler* masm); static void Generate_StringConstructCode(MacroAssembler* masm); static void Generate_OnStackReplacement(MacroAssembler* masm); diff --git a/deps/v8/src/cached-powers.cc b/deps/v8/src/cached-powers.cc index 9241d26582..fbfaf26159 100644 --- a/deps/v8/src/cached-powers.cc +++ b/deps/v8/src/cached-powers.cc @@ -26,8 +26,8 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include <stdarg.h> -#include <math.h> #include <limits.h> +#include <cmath> #include "../include/v8stdint.h" #include "globals.h" diff --git a/deps/v8/src/code-stubs-hydrogen.cc b/deps/v8/src/code-stubs-hydrogen.cc index 60ddf9b462..b6720795ba 100644 --- a/deps/v8/src/code-stubs-hydrogen.cc +++ b/deps/v8/src/code-stubs-hydrogen.cc @@ -61,11 +61,7 @@ class CodeStubGraphBuilderBase : public HGraphBuilder { arguments_length_(NULL), info_(stub, isolate), context_(NULL) { - int major_key = stub->MajorKey(); - descriptor_ = isolate->code_stub_interface_descriptor(major_key); - if (descriptor_->register_param_count_ < 0) { - stub->InitializeInterfaceDescriptor(isolate, descriptor_); - } + descriptor_ = stub->GetInterfaceDescriptor(isolate); parameters_.Reset(new HParameter*[descriptor_->register_param_count_]); } virtual bool BuildGraph(); @@ -96,6 +92,9 @@ class CodeStubGraphBuilderBase : public HGraphBuilder { bool CodeStubGraphBuilderBase::BuildGraph() { + // Update the static counter each time a new code stub is generated. + isolate()->counters()->code_stubs()->Increment(); + if (FLAG_trace_hydrogen) { const char* name = CodeStub::MajorName(stub()->MajorKey(), false); PrintF("-----------------------------------------------------------\n"); @@ -130,9 +129,10 @@ bool CodeStubGraphBuilderBase::BuildGraph() { stack_parameter_count = new(zone) HParameter(param_count, HParameter::REGISTER_PARAMETER, Representation::Integer32()); + stack_parameter_count->set_type(HType::Smi()); // it's essential to bind this value to the environment in case of deopt - start_environment->Bind(param_count, stack_parameter_count); AddInstruction(stack_parameter_count); + start_environment->Bind(param_count, stack_parameter_count); arguments_length_ = stack_parameter_count; } else { ASSERT(descriptor_->environment_length() == param_count); @@ -154,17 +154,26 @@ bool CodeStubGraphBuilderBase::BuildGraph() { // arguments above HInstruction* stack_pop_count = stack_parameter_count; if (descriptor_->function_mode_ == JS_FUNCTION_STUB_MODE) { - HInstruction* amount = graph()->GetConstant1(); - stack_pop_count = AddInstruction( - HAdd::New(zone, context_, stack_parameter_count, amount)); - stack_pop_count->ChangeRepresentation(Representation::Integer32()); - stack_pop_count->ClearFlag(HValue::kCanOverflow); + if (!stack_parameter_count->IsConstant() && + descriptor_->hint_stack_parameter_count_ < 0) { + HInstruction* amount = graph()->GetConstant1(); + stack_pop_count = AddInstruction( + HAdd::New(zone, context_, stack_parameter_count, amount)); + stack_pop_count->ChangeRepresentation(Representation::Integer32()); + stack_pop_count->ClearFlag(HValue::kCanOverflow); + } else { + int count = descriptor_->hint_stack_parameter_count_; + stack_pop_count = AddInstruction(new(zone) + HConstant(count, Representation::Integer32())); + } } - HReturn* hreturn_instruction = new(zone) HReturn(return_value, - context_, - stack_pop_count); - current_block()->Finish(hreturn_instruction); + if (!current_block()->IsFinished()) { + HReturn* hreturn_instruction = new(zone) HReturn(return_value, + context_, + stack_pop_count); + current_block()->Finish(hreturn_instruction); + } return true; } @@ -176,16 +185,88 @@ class CodeStubGraphBuilder: public CodeStubGraphBuilderBase { : CodeStubGraphBuilderBase(Isolate::Current(), stub) {} protected: - virtual HValue* BuildCodeStub(); + virtual HValue* BuildCodeStub() { + if (casted_stub()->IsMiss()) { + return BuildCodeInitializedStub(); + } else { + return BuildCodeUninitializedStub(); + } + } + + virtual HValue* BuildCodeInitializedStub() { + UNIMPLEMENTED(); + return NULL; + } + + virtual HValue* BuildCodeUninitializedStub() { + // Force a deopt that falls back to the runtime. + HValue* undefined = graph()->GetConstantUndefined(); + IfBuilder builder(this); + builder.IfNot<HCompareObjectEqAndBranch, HValue*>(undefined, undefined); + builder.Then(); + builder.ElseDeopt(); + return undefined; + } + Stub* casted_stub() { return static_cast<Stub*>(stub()); } }; +Handle<Code> HydrogenCodeStub::GenerateLightweightMissCode(Isolate* isolate) { + Factory* factory = isolate->factory(); + + // Generate the new code. + MacroAssembler masm(isolate, NULL, 256); + + { + // Update the static counter each time a new code stub is generated. + isolate->counters()->code_stubs()->Increment(); + + // Nested stubs are not allowed for leaves. + AllowStubCallsScope allow_scope(&masm, false); + + // Generate the code for the stub. + masm.set_generating_stub(true); + NoCurrentFrameScope scope(&masm); + GenerateLightweightMiss(&masm); + } + + // Create the code object. + CodeDesc desc; + masm.GetCode(&desc); + + // Copy the generated code into a heap object. + Code::Flags flags = Code::ComputeFlags( + GetCodeKind(), + GetICState(), + GetExtraICState(), + GetStubType(), -1); + Handle<Code> new_object = factory->NewCode( + desc, flags, masm.CodeObject(), NeedsImmovableCode()); + return new_object; +} + + template <class Stub> static Handle<Code> DoGenerateCode(Stub* stub) { - CodeStubGraphBuilder<Stub> builder(stub); - LChunk* chunk = OptimizeGraph(builder.CreateGraph()); - return chunk->Codegen(); + Isolate* isolate = Isolate::Current(); + CodeStub::Major major_key = + static_cast<HydrogenCodeStub*>(stub)->MajorKey(); + CodeStubInterfaceDescriptor* descriptor = + isolate->code_stub_interface_descriptor(major_key); + if (descriptor->register_param_count_ < 0) { + stub->InitializeInterfaceDescriptor(isolate, descriptor); + } + // The miss case without stack parameters can use a light-weight stub to enter + // the runtime that is significantly faster than using the standard + // stub-failure deopt mechanism. + if (stub->IsMiss() && descriptor->stack_parameter_count_ == NULL) { + return stub->GenerateLightweightMissCode(isolate); + } else { + CodeStubGraphBuilder<Stub> builder(stub); + LChunk* chunk = OptimizeGraph(builder.CreateGraph()); + return chunk->Codegen(); + } } @@ -193,6 +274,7 @@ template <> HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() { Zone* zone = this->zone(); Factory* factory = isolate()->factory(); + HValue* undefined = graph()->GetConstantUndefined(); AllocationSiteMode alloc_site_mode = casted_stub()->allocation_site_mode(); FastCloneShallowArrayStub::Mode mode = casted_stub()->mode(); int length = casted_stub()->length(); @@ -203,8 +285,9 @@ HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() { NULL, FAST_ELEMENTS)); - CheckBuilder builder(this); - builder.CheckNotUndefined(boilerplate); + IfBuilder checker(this); + checker.IfNot<HCompareObjectEqAndBranch, HValue*>(boilerplate, undefined); + checker.Then(); if (mode == FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS) { HValue* elements = @@ -243,14 +326,14 @@ HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() { length)); } - return environment()->Pop(); + HValue* result = environment()->Pop(); + checker.ElseDeopt(); + return result; } Handle<Code> FastCloneShallowArrayStub::GenerateCode() { - CodeStubGraphBuilder<FastCloneShallowArrayStub> builder(this); - LChunk* chunk = OptimizeGraph(builder.CreateGraph()); - return chunk->Codegen(); + return DoGenerateCode(this); } @@ -258,6 +341,7 @@ template <> HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() { Zone* zone = this->zone(); Factory* factory = isolate()->factory(); + HValue* undefined = graph()->GetConstantUndefined(); HInstruction* boilerplate = AddInstruction(new(zone) HLoadKeyed(GetParameter(0), @@ -265,8 +349,9 @@ HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() { NULL, FAST_ELEMENTS)); - CheckBuilder builder(this); - builder.CheckNotUndefined(boilerplate); + IfBuilder checker(this); + checker.IfNot<HCompareObjectEqAndBranch, HValue*>(boilerplate, undefined); + checker.And(); int size = JSObject::kHeaderSize + casted_stub()->length() * kPointerSize; HValue* boilerplate_size = @@ -274,7 +359,8 @@ HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() { HValue* size_in_words = AddInstruction(new(zone) HConstant(size >> kPointerSizeLog2, Representation::Integer32())); - builder.CheckIntegerEq(boilerplate_size, size_in_words); + checker.IfCompare(boilerplate_size, size_in_words, Token::EQ); + checker.Then(); HValue* size_in_bytes = AddInstruction(new(zone) HConstant(size, Representation::Integer32())); @@ -298,7 +384,7 @@ HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() { true, i)); } - builder.End(); + checker.ElseDeopt(); return object; } @@ -401,10 +487,18 @@ Handle<Code> TransitionElementsKindStub::GenerateCode() { template <> HValue* CodeStubGraphBuilder<ArrayNoArgumentConstructorStub>::BuildCodeStub() { - HInstruction* deopt = new(zone()) HSoftDeoptimize(); - AddInstruction(deopt); - current_block()->MarkAsDeoptimizing(); - return GetParameter(0); + // ----------- S t a t e ------------- + // -- Parameter 1 : type info cell + // -- Parameter 0 : constructor + // ----------------------------------- + // Get the right map + // Should be a constant + JSArrayBuilder array_builder( + this, + casted_stub()->elements_kind(), + GetParameter(ArrayConstructorStubBase::kPropertyCell), + casted_stub()->mode()); + return array_builder.AllocateEmptyArray(); } @@ -416,10 +510,49 @@ Handle<Code> ArrayNoArgumentConstructorStub::GenerateCode() { template <> HValue* CodeStubGraphBuilder<ArraySingleArgumentConstructorStub>:: BuildCodeStub() { - HInstruction* deopt = new(zone()) HSoftDeoptimize(); - AddInstruction(deopt); - current_block()->MarkAsDeoptimizing(); - return GetParameter(0); + // Smi check and range check on the input arg. + HValue* constant_one = graph()->GetConstant1(); + HValue* constant_zero = graph()->GetConstant0(); + + HInstruction* elements = AddInstruction( + new(zone()) HArgumentsElements(false)); + HInstruction* argument = AddInstruction( + new(zone()) HAccessArgumentsAt(elements, constant_one, constant_zero)); + + HConstant* max_alloc_length = + new(zone()) HConstant(JSObject::kInitialMaxFastElementArray, + Representation::Tagged()); + AddInstruction(max_alloc_length); + const int initial_capacity = JSArray::kPreallocatedArrayElements; + HConstant* initial_capacity_node = + new(zone()) HConstant(initial_capacity, Representation::Tagged()); + AddInstruction(initial_capacity_node); + + // Since we're forcing Integer32 representation for this HBoundsCheck, + // there's no need to Smi-check the index. + HBoundsCheck* checked_arg = AddBoundsCheck(argument, max_alloc_length, + ALLOW_SMI_KEY, + Representation::Tagged()); + IfBuilder if_builder(this); + if_builder.IfCompare(checked_arg, constant_zero, Token::EQ); + if_builder.Then(); + Push(initial_capacity_node); // capacity + Push(constant_zero); // length + if_builder.Else(); + Push(checked_arg); // capacity + Push(checked_arg); // length + if_builder.End(); + + // Figure out total size + HValue* length = Pop(); + HValue* capacity = Pop(); + + JSArrayBuilder array_builder( + this, + casted_stub()->elements_kind(), + GetParameter(ArrayConstructorStubBase::kPropertyCell), + casted_stub()->mode()); + return array_builder.AllocateArray(capacity, length, true); } @@ -430,10 +563,46 @@ Handle<Code> ArraySingleArgumentConstructorStub::GenerateCode() { template <> HValue* CodeStubGraphBuilder<ArrayNArgumentsConstructorStub>::BuildCodeStub() { - HInstruction* deopt = new(zone()) HSoftDeoptimize(); - AddInstruction(deopt); - current_block()->MarkAsDeoptimizing(); - return GetParameter(0); + ElementsKind kind = casted_stub()->elements_kind(); + HValue* length = GetArgumentsLength(); + + JSArrayBuilder array_builder( + this, + kind, + GetParameter(ArrayConstructorStubBase::kPropertyCell), + casted_stub()->mode()); + + // We need to fill with the hole if it's a smi array in the multi-argument + // case because we might have to bail out while copying arguments into + // the array because they aren't compatible with a smi array. + // If it's a double array, no problem, and if it's fast then no + // problem either because doubles are boxed. + bool fill_with_hole = IsFastSmiElementsKind(kind); + HValue* new_object = array_builder.AllocateArray(length, + length, + fill_with_hole); + HValue* elements = array_builder.GetElementsLocation(); + ASSERT(elements != NULL); + + // Now populate the elements correctly. + LoopBuilder builder(this, + context(), + LoopBuilder::kPostIncrement); + HValue* start = graph()->GetConstant0(); + HValue* key = builder.BeginBody(start, length, Token::LT); + HInstruction* argument_elements = AddInstruction( + new(zone()) HArgumentsElements(false)); + HInstruction* argument = AddInstruction(new(zone()) HAccessArgumentsAt( + argument_elements, length, key)); + + // Checks to prevent incompatible stores + if (IsFastSmiElementsKind(kind)) { + AddInstruction(new(zone()) HCheckSmi(argument)); + } + + AddInstruction(new(zone()) HStoreKeyed(elements, key, argument, kind)); + builder.EndBody(); + return new_object; } @@ -441,4 +610,30 @@ Handle<Code> ArrayNArgumentsConstructorStub::GenerateCode() { return DoGenerateCode(this); } + +template <> +HValue* CodeStubGraphBuilder<CompareNilICStub>::BuildCodeUninitializedStub() { + CompareNilICStub* stub = casted_stub(); + HIfContinuation continuation; + Handle<Map> sentinel_map(graph()->isolate()->heap()->meta_map()); + BuildCompareNil(GetParameter(0), stub->GetKind(), + stub->GetTypes(), sentinel_map, + RelocInfo::kNoPosition, &continuation); + IfBuilder if_nil(this, &continuation); + if_nil.Then(); + if (continuation.IsFalseReachable()) { + if_nil.Else(); + if_nil.Return(graph()->GetConstantSmi0()); + } + if_nil.End(); + return continuation.IsTrueReachable() + ? graph()->GetConstantSmi1() + : graph()->GetConstantUndefined(); +} + + +Handle<Code> CompareNilICStub::GenerateCode() { + return DoGenerateCode(this); +} + } } // namespace v8::internal diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc index 497dde54e7..df9855d09c 100644 --- a/deps/v8/src/code-stubs.cc +++ b/deps/v8/src/code-stubs.cc @@ -37,6 +37,17 @@ namespace v8 { namespace internal { + +CodeStubInterfaceDescriptor::CodeStubInterfaceDescriptor() + : register_param_count_(-1), + stack_parameter_count_(NULL), + hint_stack_parameter_count_(-1), + function_mode_(NOT_JS_FUNCTION_STUB_MODE), + register_params_(NULL), + deoptimization_handler_(NULL), + miss_handler_(IC_Utility(IC::kUnreachable), Isolate::Current()) { } + + bool CodeStub::FindCodeInCache(Code** code_out, Isolate* isolate) { UnseededNumberDictionary* stubs = isolate->heap()->code_stubs(); int index = stubs->FindEntry(GetKey()); @@ -397,6 +408,42 @@ void ICCompareStub::Generate(MacroAssembler* masm) { } +CompareNilICStub::Types CompareNilICStub::GetPatchedICFlags( + Code::ExtraICState extra_ic_state, + Handle<Object> object, + bool* already_monomorphic) { + Types types = TypesField::decode(extra_ic_state); + NilValue nil = NilValueField::decode(extra_ic_state); + EqualityKind kind = EqualityKindField::decode(extra_ic_state); + ASSERT(types != CompareNilICStub::kFullCompare); + *already_monomorphic = + (types & CompareNilICStub::kCompareAgainstMonomorphicMap) != 0; + if (kind == kStrictEquality) { + if (nil == kNullValue) { + return CompareNilICStub::kCompareAgainstNull; + } else { + return CompareNilICStub::kCompareAgainstUndefined; + } + } else { + if (object->IsNull()) { + types = static_cast<CompareNilICStub::Types>( + types | CompareNilICStub::kCompareAgainstNull); + } else if (object->IsUndefined()) { + types = static_cast<CompareNilICStub::Types>( + types | CompareNilICStub::kCompareAgainstUndefined); + } else if (object->IsUndetectableObject() || !object->IsHeapObject()) { + types = CompareNilICStub::kFullCompare; + } else if ((types & CompareNilICStub::kCompareAgainstMonomorphicMap) != 0) { + types = CompareNilICStub::kFullCompare; + } else { + types = static_cast<CompareNilICStub::Types>( + types | CompareNilICStub::kCompareAgainstMonomorphicMap); + } + } + return types; +} + + void InstanceofStub::PrintName(StringStream* stream) { const char* args = ""; if (HasArgsInRegisters()) { @@ -557,7 +604,7 @@ bool ToBooleanStub::Types::Record(Handle<Object> object) { ASSERT(!object->IsUndetectableObject()); Add(HEAP_NUMBER); double value = HeapNumber::cast(*object)->value(); - return value != 0 && !isnan(value); + return value != 0 && !std::isnan(value); } else { // We should never see an internal object at runtime here! UNREACHABLE(); @@ -647,4 +694,45 @@ bool ProfileEntryHookStub::SetFunctionEntryHook(FunctionEntryHook entry_hook) { } +static void InstallDescriptor(Isolate* isolate, HydrogenCodeStub* stub) { + int major_key = stub->MajorKey(); + CodeStubInterfaceDescriptor* descriptor = + isolate->code_stub_interface_descriptor(major_key); + if (!descriptor->initialized()) { + stub->InitializeInterfaceDescriptor(isolate, descriptor); + } +} + + +void ArrayConstructorStubBase::InstallDescriptors(Isolate* isolate) { + ArrayNoArgumentConstructorStub stub1(GetInitialFastElementsKind()); + InstallDescriptor(isolate, &stub1); + ArraySingleArgumentConstructorStub stub2(GetInitialFastElementsKind()); + InstallDescriptor(isolate, &stub2); + ArrayNArgumentsConstructorStub stub3(GetInitialFastElementsKind()); + InstallDescriptor(isolate, &stub3); +} + + +ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate) + : argument_count_(ANY) { + ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); +} + + +ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate, + int argument_count) { + if (argument_count == 0) { + argument_count_ = NONE; + } else if (argument_count == 1) { + argument_count_ = ONE; + } else if (argument_count >= 2) { + argument_count_ = MORE_THAN_ONE; + } else { + UNREACHABLE(); + } + ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); +} + + } } // namespace v8::internal diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h index 56b595583d..ea895d669f 100644 --- a/deps/v8/src/code-stubs.h +++ b/deps/v8/src/code-stubs.h @@ -29,6 +29,7 @@ #define V8_CODE_STUBS_H_ #include "allocation.h" +#include "assembler.h" #include "globals.h" #include "codegen.h" @@ -46,6 +47,7 @@ namespace internal { V(StringCompare) \ V(Compare) \ V(CompareIC) \ + V(CompareNilIC) \ V(MathPow) \ V(StringLength) \ V(FunctionPrototype) \ @@ -82,6 +84,7 @@ namespace internal { V(TransitionElementsKind) \ V(StoreArrayLiteralElement) \ V(StubFailureTrampoline) \ + V(ArrayConstructor) \ V(ProfileEntryHook) \ /* IC Handler stubs */ \ V(LoadField) @@ -260,17 +263,18 @@ class PlatformCodeStub : public CodeStub { enum StubFunctionMode { NOT_JS_FUNCTION_STUB_MODE, JS_FUNCTION_STUB_MODE }; + struct CodeStubInterfaceDescriptor { - CodeStubInterfaceDescriptor() - : register_param_count_(-1), - stack_parameter_count_(NULL), - function_mode_(NOT_JS_FUNCTION_STUB_MODE), - register_params_(NULL) { } + CodeStubInterfaceDescriptor(); int register_param_count_; const Register* stack_parameter_count_; + // if hint_stack_parameter_count_ > 0, the code stub can optimize the + // return sequence. Default value is -1, which means it is ignored. + int hint_stack_parameter_count_; StubFunctionMode function_mode_; Register* register_params_; Address deoptimization_handler_; + ExternalReference miss_handler_; int environment_length() const { if (stack_parameter_count_ != NULL) { @@ -278,13 +282,28 @@ struct CodeStubInterfaceDescriptor { } return register_param_count_; } + + bool initialized() const { return register_param_count_ >= 0; } }; +// A helper to make up for the fact that type Register is not fully +// defined outside of the platform directories +#define DESCRIPTOR_GET_PARAMETER_REGISTER(descriptor, index) \ + ((index) == (descriptor)->register_param_count_) \ + ? *((descriptor)->stack_parameter_count_) \ + : (descriptor)->register_params_[(index)] + class HydrogenCodeStub : public CodeStub { public: - // Retrieve the code for the stub. Generate the code if needed. - virtual Handle<Code> GenerateCode() = 0; + enum InitializationState { + CODE_STUB_IS_NOT_MISS, + CODE_STUB_IS_MISS + }; + + explicit HydrogenCodeStub(InitializationState state) { + is_miss_ = (state == CODE_STUB_IS_MISS); + } virtual Code::Kind GetCodeKind() const { return Code::STUB; } @@ -292,9 +311,36 @@ class HydrogenCodeStub : public CodeStub { return isolate->code_stub_interface_descriptor(MajorKey()); } + bool IsMiss() { return is_miss_; } + + template<class SubClass> + static Handle<Code> GetUninitialized(Isolate* isolate) { + SubClass::GenerateAheadOfTime(isolate); + return SubClass().GetCode(isolate); + } + virtual void InitializeInterfaceDescriptor( Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) = 0; + + // Retrieve the code for the stub. Generate the code if needed. + virtual Handle<Code> GenerateCode() = 0; + + virtual int NotMissMinorKey() = 0; + + Handle<Code> GenerateLightweightMissCode(Isolate* isolate); + + private: + class MinorKeyBits: public BitField<int, 0, kStubMinorKeyBits - 1> {}; + class IsMissBits: public BitField<bool, kStubMinorKeyBits - 1, 1> {}; + + void GenerateLightweightMiss(MacroAssembler* masm); + virtual int MinorKey() { + return IsMissBits::encode(is_miss_) | + MinorKeyBits::encode(NotMissMinorKey()); + } + + bool is_miss_; }; @@ -467,7 +513,8 @@ class FastCloneShallowArrayStub : public HydrogenCodeStub { FastCloneShallowArrayStub(Mode mode, AllocationSiteMode allocation_site_mode, int length) - : mode_(mode), + : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS), + mode_(mode), allocation_site_mode_(allocation_site_mode), length_((mode == COPY_ON_WRITE_ELEMENTS) ? 0 : length) { ASSERT_GE(length_, 0); @@ -513,7 +560,7 @@ class FastCloneShallowArrayStub : public HydrogenCodeStub { STATIC_ASSERT(kFastCloneModeCount < 16); STATIC_ASSERT(kMaximumClonedLength < 16); Major MajorKey() { return FastCloneShallowArray; } - int MinorKey() { + int NotMissMinorKey() { return AllocationSiteModeBits::encode(allocation_site_mode_) | ModeBits::encode(mode_) | LengthBits::encode(length_); @@ -526,7 +573,9 @@ class FastCloneShallowObjectStub : public HydrogenCodeStub { // Maximum number of properties in copied object. static const int kMaximumClonedProperties = 6; - explicit FastCloneShallowObjectStub(int length) : length_(length) { + explicit FastCloneShallowObjectStub(int length) + : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS), + length_(length) { ASSERT_GE(length_, 0); ASSERT_LE(length_, kMaximumClonedProperties); } @@ -543,7 +592,7 @@ class FastCloneShallowObjectStub : public HydrogenCodeStub { int length_; Major MajorKey() { return FastCloneShallowObject; } - int MinorKey() { return length_; } + int NotMissMinorKey() { return length_; } DISALLOW_COPY_AND_ASSIGN(FastCloneShallowObjectStub); }; @@ -587,6 +636,22 @@ class InstanceofStub: public PlatformCodeStub { }; +class ArrayConstructorStub: public PlatformCodeStub { + public: + enum ArgumentCountKey { ANY, NONE, ONE, MORE_THAN_ONE }; + ArrayConstructorStub(Isolate* isolate, int argument_count); + explicit ArrayConstructorStub(Isolate* isolate); + + void Generate(MacroAssembler* masm); + + private: + virtual CodeStub::Major MajorKey() { return ArrayConstructor; } + virtual int MinorKey() { return argument_count_; } + + ArgumentCountKey argument_count_; +}; + + class MathPowStub: public PlatformCodeStub { public: enum ExponentType { INTEGER, DOUBLE, TAGGED, ON_STACK}; @@ -911,6 +976,102 @@ class ICCompareStub: public PlatformCodeStub { }; +class CompareNilICStub : public HydrogenCodeStub { + public: + enum Types { + kCompareAgainstNull = 1 << 0, + kCompareAgainstUndefined = 1 << 1, + kCompareAgainstMonomorphicMap = 1 << 2, + kCompareAgainstUndetectable = 1 << 3, + kFullCompare = kCompareAgainstNull | kCompareAgainstUndefined | + kCompareAgainstUndetectable + }; + + CompareNilICStub(EqualityKind kind, NilValue nil, Types types) + : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS), bit_field_(0) { + bit_field_ = EqualityKindField::encode(kind) | + NilValueField::encode(nil) | + TypesField::encode(types); + } + + virtual InlineCacheState GetICState() { + Types types = GetTypes(); + if (types == kFullCompare) { + return MEGAMORPHIC; + } else if ((types & kCompareAgainstMonomorphicMap) != 0) { + return MONOMORPHIC; + } else { + return PREMONOMORPHIC; + } + } + + virtual Code::Kind GetCodeKind() const { return Code::COMPARE_NIL_IC; } + + Handle<Code> GenerateCode(); + + static Handle<Code> GetUninitialized(Isolate* isolate, + EqualityKind kind, + NilValue nil) { + return CompareNilICStub(kind, nil).GetCode(isolate); + } + + virtual void InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor); + + static void InitializeForIsolate(Isolate* isolate) { + CompareNilICStub compare_stub(kStrictEquality, kNullValue); + compare_stub.InitializeInterfaceDescriptor( + isolate, + isolate->code_stub_interface_descriptor(CodeStub::CompareNilIC)); + } + + virtual Code::ExtraICState GetExtraICState() { + return bit_field_; + } + + EqualityKind GetKind() { return EqualityKindField::decode(bit_field_); } + NilValue GetNilValue() { return NilValueField::decode(bit_field_); } + Types GetTypes() { return TypesField::decode(bit_field_); } + + static Types TypesFromExtraICState( + Code::ExtraICState state) { + return TypesField::decode(state); + } + static EqualityKind EqualityKindFromExtraICState( + Code::ExtraICState state) { + return EqualityKindField::decode(state); + } + static NilValue NilValueFromExtraICState(Code::ExtraICState state) { + return NilValueField::decode(state); + } + + static Types GetPatchedICFlags(Code::ExtraICState extra_ic_state, + Handle<Object> object, + bool* already_monomorphic); + + private: + friend class CompareNilIC; + + class EqualityKindField : public BitField<EqualityKind, 0, 1> {}; + class NilValueField : public BitField<NilValue, 1, 1> {}; + class TypesField : public BitField<Types, 3, 4> {}; + + CompareNilICStub(EqualityKind kind, NilValue nil) + : HydrogenCodeStub(CODE_STUB_IS_MISS), bit_field_(0) { + bit_field_ = EqualityKindField::encode(kind) | + NilValueField::encode(nil); + } + + virtual CodeStub::Major MajorKey() { return CompareNilIC; } + virtual int NotMissMinorKey() { return bit_field_; } + + int bit_field_; + + DISALLOW_COPY_AND_ASSIGN(CompareNilICStub); +}; + + class CEntryStub : public PlatformCodeStub { public: explicit CEntryStub(int result_size, @@ -1291,19 +1452,20 @@ class KeyedLoadDictionaryElementStub : public PlatformCodeStub { public: KeyedLoadDictionaryElementStub() {} - Major MajorKey() { return KeyedLoadElement; } - int MinorKey() { return DICTIONARY_ELEMENTS; } - void Generate(MacroAssembler* masm); private: + Major MajorKey() { return KeyedLoadElement; } + int MinorKey() { return DICTIONARY_ELEMENTS; } + DISALLOW_COPY_AND_ASSIGN(KeyedLoadDictionaryElementStub); }; class KeyedLoadFastElementStub : public HydrogenCodeStub { public: - KeyedLoadFastElementStub(bool is_js_array, ElementsKind elements_kind) { + KeyedLoadFastElementStub(bool is_js_array, ElementsKind elements_kind) + : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS) { bit_field_ = ElementsKindBits::encode(elements_kind) | IsJSArrayBits::encode(is_js_array); } @@ -1323,12 +1485,12 @@ class KeyedLoadFastElementStub : public HydrogenCodeStub { CodeStubInterfaceDescriptor* descriptor); private: - class IsJSArrayBits: public BitField<bool, 8, 1> {}; class ElementsKindBits: public BitField<ElementsKind, 0, 8> {}; + class IsJSArrayBits: public BitField<bool, 8, 1> {}; uint32_t bit_field_; Major MajorKey() { return KeyedLoadElement; } - int MinorKey() { return bit_field_; } + int NotMissMinorKey() { return bit_field_; } DISALLOW_COPY_AND_ASSIGN(KeyedLoadFastElementStub); }; @@ -1338,15 +1500,13 @@ class KeyedStoreFastElementStub : public HydrogenCodeStub { public: KeyedStoreFastElementStub(bool is_js_array, ElementsKind elements_kind, - KeyedAccessStoreMode mode) { + KeyedAccessStoreMode mode) + : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS) { bit_field_ = ElementsKindBits::encode(elements_kind) | IsJSArrayBits::encode(is_js_array) | StoreModeBits::encode(mode); } - Major MajorKey() { return KeyedStoreElement; } - int MinorKey() { return bit_field_; } - bool is_js_array() const { return IsJSArrayBits::decode(bit_field_); } @@ -1371,6 +1531,9 @@ class KeyedStoreFastElementStub : public HydrogenCodeStub { class IsJSArrayBits: public BitField<bool, 12, 1> {}; uint32_t bit_field_; + Major MajorKey() { return KeyedStoreElement; } + int NotMissMinorKey() { return bit_field_; } + DISALLOW_COPY_AND_ASSIGN(KeyedStoreFastElementStub); }; @@ -1378,7 +1541,8 @@ class KeyedStoreFastElementStub : public HydrogenCodeStub { class TransitionElementsKindStub : public HydrogenCodeStub { public: TransitionElementsKindStub(ElementsKind from_kind, - ElementsKind to_kind) { + ElementsKind to_kind) + : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS) { bit_field_ = FromKindBits::encode(from_kind) | ToKindBits::encode(to_kind); } @@ -1403,19 +1567,55 @@ class TransitionElementsKindStub : public HydrogenCodeStub { uint32_t bit_field_; Major MajorKey() { return TransitionElementsKind; } - int MinorKey() { return bit_field_; } + int NotMissMinorKey() { return bit_field_; } DISALLOW_COPY_AND_ASSIGN(TransitionElementsKindStub); }; -class ArrayNoArgumentConstructorStub : public HydrogenCodeStub { +class ArrayConstructorStubBase : public HydrogenCodeStub { public: - ArrayNoArgumentConstructorStub() { + ArrayConstructorStubBase(ElementsKind kind, AllocationSiteMode mode) + : HydrogenCodeStub(CODE_STUB_IS_NOT_MISS) { + bit_field_ = ElementsKindBits::encode(kind) | + AllocationSiteModeBits::encode(mode == TRACK_ALLOCATION_SITE); } - Major MajorKey() { return ArrayNoArgumentConstructor; } - int MinorKey() { return 0; } + ElementsKind elements_kind() const { + return ElementsKindBits::decode(bit_field_); + } + + AllocationSiteMode mode() const { + return AllocationSiteModeBits::decode(bit_field_) + ? TRACK_ALLOCATION_SITE + : DONT_TRACK_ALLOCATION_SITE; + } + + virtual bool IsPregenerated() { return true; } + static void GenerateStubsAheadOfTime(Isolate* isolate); + static void InstallDescriptors(Isolate* isolate); + + // Parameters accessed via CodeStubGraphBuilder::GetParameter() + static const int kPropertyCell = 0; + + private: + int NotMissMinorKey() { return bit_field_; } + + class ElementsKindBits: public BitField<ElementsKind, 0, 8> {}; + class AllocationSiteModeBits: public BitField<bool, 8, 1> {}; + uint32_t bit_field_; + + DISALLOW_COPY_AND_ASSIGN(ArrayConstructorStubBase); +}; + + +class ArrayNoArgumentConstructorStub : public ArrayConstructorStubBase { + public: + ArrayNoArgumentConstructorStub( + ElementsKind kind, + AllocationSiteMode mode = TRACK_ALLOCATION_SITE) + : ArrayConstructorStubBase(kind, mode) { + } virtual Handle<Code> GenerateCode(); @@ -1424,18 +1624,20 @@ class ArrayNoArgumentConstructorStub : public HydrogenCodeStub { CodeStubInterfaceDescriptor* descriptor); private: + Major MajorKey() { return ArrayNoArgumentConstructor; } + DISALLOW_COPY_AND_ASSIGN(ArrayNoArgumentConstructorStub); }; -class ArraySingleArgumentConstructorStub : public HydrogenCodeStub { +class ArraySingleArgumentConstructorStub : public ArrayConstructorStubBase { public: - ArraySingleArgumentConstructorStub() { + ArraySingleArgumentConstructorStub( + ElementsKind kind, + AllocationSiteMode mode = TRACK_ALLOCATION_SITE) + : ArrayConstructorStubBase(kind, mode) { } - Major MajorKey() { return ArraySingleArgumentConstructor; } - int MinorKey() { return 0; } - virtual Handle<Code> GenerateCode(); virtual void InitializeInterfaceDescriptor( @@ -1443,18 +1645,20 @@ class ArraySingleArgumentConstructorStub : public HydrogenCodeStub { CodeStubInterfaceDescriptor* descriptor); private: + Major MajorKey() { return ArraySingleArgumentConstructor; } + DISALLOW_COPY_AND_ASSIGN(ArraySingleArgumentConstructorStub); }; -class ArrayNArgumentsConstructorStub : public HydrogenCodeStub { +class ArrayNArgumentsConstructorStub : public ArrayConstructorStubBase { public: - ArrayNArgumentsConstructorStub() { + ArrayNArgumentsConstructorStub( + ElementsKind kind, + AllocationSiteMode mode = TRACK_ALLOCATION_SITE) : + ArrayConstructorStubBase(kind, mode) { } - Major MajorKey() { return ArrayNArgumentsConstructor; } - int MinorKey() { return 0; } - virtual Handle<Code> GenerateCode(); virtual void InitializeInterfaceDescriptor( @@ -1462,6 +1666,8 @@ class ArrayNArgumentsConstructorStub : public HydrogenCodeStub { CodeStubInterfaceDescriptor* descriptor); private: + Major MajorKey() { return ArrayNArgumentsConstructor; } + DISALLOW_COPY_AND_ASSIGN(ArrayNArgumentsConstructorStub); }; diff --git a/deps/v8/src/code.h b/deps/v8/src/code.h index 766c932e0f..791420cf39 100644 --- a/deps/v8/src/code.h +++ b/deps/v8/src/code.h @@ -29,6 +29,8 @@ #define V8_CODE_H_ #include "allocation.h" +#include "handles.h" +#include "objects.h" namespace v8 { namespace internal { @@ -44,6 +46,8 @@ class ParameterCount BASE_EMBEDDED { : reg_(reg), immediate_(0) { } explicit ParameterCount(int immediate) : reg_(no_reg), immediate_(immediate) { } + explicit ParameterCount(Handle<JSFunction> f) + : reg_(no_reg), immediate_(f->shared()->formal_parameter_count()) { } bool is_reg() const { return !reg_.is(no_reg); } bool is_immediate() const { return !is_reg(); } diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc index 70ce6bc825..dce817129c 100644 --- a/deps/v8/src/compiler.cc +++ b/deps/v8/src/compiler.cc @@ -125,11 +125,8 @@ CompilationInfo::~CompilationInfo() { int CompilationInfo::num_parameters() const { - if (IsStub()) { - return 0; - } else { - return scope()->num_parameters(); - } + ASSERT(!IsStub()); + return scope()->num_parameters(); } @@ -147,8 +144,7 @@ Code::Flags CompilationInfo::flags() const { return Code::ComputeFlags(code_stub()->GetCodeKind(), code_stub()->GetICState(), code_stub()->GetExtraICState(), - Code::NORMAL, - 0); + Code::NORMAL, -1); } else { return Code::ComputeFlags(Code::OPTIMIZED_FUNCTION); } @@ -425,6 +421,12 @@ OptimizingCompiler::Status OptimizingCompiler::GenerateAndInstallCode() { Timer timer(this, &time_taken_to_codegen_); ASSERT(chunk_ != NULL); ASSERT(graph_ != NULL); + // Deferred handles reference objects that were accessible during + // graph creation. To make sure that we don't encounter inconsistencies + // between graph creation and code generation, we disallow accessing + // objects through deferred handles during the latter, with exceptions. + HandleDereferenceGuard no_deref_deferred( + isolate(), HandleDereferenceGuard::DISALLOW_DEFERRED); Handle<Code> optimized_code = chunk_->Codegen(); if (optimized_code.is_null()) { info()->set_bailout_reason("code generation failed"); @@ -622,7 +624,7 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source, isolate->counters()->total_compile_size()->Increment(source_length); // The VM is in the COMPILER state until exiting this function. - VMState state(isolate, COMPILER); + VMState<COMPILER> state(isolate); CompilationCache* compilation_cache = isolate->compilation_cache(); @@ -696,7 +698,7 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source, isolate->counters()->total_compile_size()->Increment(source_length); // The VM is in the COMPILER state until exiting this function. - VMState state(isolate, COMPILER); + VMState<COMPILER> state(isolate); // Do a lookup in the compilation cache; if the entry is not there, invoke // the compiler and add the result to the cache. @@ -859,7 +861,7 @@ bool Compiler::CompileLazy(CompilationInfo* info) { ZoneScope zone_scope(info->zone(), DELETE_ON_EXIT); // The VM is in the COMPILER state until exiting this function. - VMState state(isolate, COMPILER); + VMState<COMPILER> state(isolate); PostponeInterruptsScope postpone(isolate); @@ -923,7 +925,7 @@ void Compiler::RecompileParallel(Handle<JSFunction> closure) { } SmartPointer<CompilationInfo> info(new CompilationInfoWithZone(closure)); - VMState state(isolate, PARALLEL_COMPILER); + VMState<COMPILER> state(isolate); PostponeInterruptsScope postpone(isolate); Handle<SharedFunctionInfo> shared = info->shared_info(); @@ -998,7 +1000,7 @@ void Compiler::InstallOptimizedCode(OptimizingCompiler* optimizing_compiler) { } Isolate* isolate = info->isolate(); - VMState state(isolate, PARALLEL_COMPILER); + VMState<COMPILER> state(isolate); Logger::TimerEventScope timer( isolate, Logger::TimerEventScope::v8_recompile_synchronous); // If crankshaft succeeded, install the optimized code else install diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h index dae39db351..00074c899b 100644 --- a/deps/v8/src/compiler.h +++ b/deps/v8/src/compiler.h @@ -143,6 +143,14 @@ class CompilationInfo { return SavesCallerDoubles::decode(flags_); } + void MarkAsRequiresFrame() { + flags_ |= RequiresFrame::encode(true); + } + + bool requires_frame() const { + return RequiresFrame::decode(flags_); + } + void SetParseRestriction(ParseRestriction restriction) { flags_ = ParseRestricitonField::update(flags_, restriction); } @@ -300,6 +308,8 @@ class CompilationInfo { class SavesCallerDoubles: public BitField<bool, 12, 1> {}; // If the set of valid statements is restricted. class ParseRestricitonField: public BitField<ParseRestriction, 13, 1> {}; + // If the function requires a frame (for unspecified reasons) + class RequiresFrame: public BitField<bool, 14, 1> {}; unsigned flags_; diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h index abeb8121cb..0024e13d6b 100644 --- a/deps/v8/src/contexts.h +++ b/deps/v8/src/contexts.h @@ -123,6 +123,7 @@ enum BindingFlags { V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun) \ V(INSTANTIATE_FUN_INDEX, JSFunction, instantiate_fun) \ V(CONFIGURE_INSTANCE_FUN_INDEX, JSFunction, configure_instance_fun) \ + V(ARRAY_BUFFER_FUN_INDEX, JSFunction, array_buffer_fun) \ V(FUNCTION_MAP_INDEX, Map, function_map) \ V(STRICT_MODE_FUNCTION_MAP_INDEX, Map, strict_mode_function_map) \ V(FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, function_without_prototype_map) \ @@ -276,6 +277,7 @@ class Context: public FixedArray { GLOBAL_EVAL_FUN_INDEX, INSTANTIATE_FUN_INDEX, CONFIGURE_INSTANCE_FUN_INDEX, + ARRAY_BUFFER_FUN_INDEX, MESSAGE_LISTENERS_INDEX, MAKE_MESSAGE_FUN_INDEX, GET_STACK_TRACE_LINE_INDEX, diff --git a/deps/v8/src/conversions-inl.h b/deps/v8/src/conversions-inl.h index eb718d684d..595ae9ed5b 100644 --- a/deps/v8/src/conversions-inl.h +++ b/deps/v8/src/conversions-inl.h @@ -29,9 +29,9 @@ #define V8_CONVERSIONS_INL_H_ #include <limits.h> // Required for INT_MAX etc. -#include <math.h> #include <float.h> // Required for DBL_MAX and on Win32 for finite() #include <stdarg.h> +#include <cmath> #include "globals.h" // Required for V8_INFINITY // ---------------------------------------------------------------------------- @@ -86,8 +86,8 @@ inline unsigned int FastD2UI(double x) { inline double DoubleToInteger(double x) { - if (isnan(x)) return 0; - if (!isfinite(x) || x == 0) return x; + if (std::isnan(x)) return 0; + if (!std::isfinite(x) || x == 0) return x; return (x >= 0) ? floor(x) : ceil(x); } diff --git a/deps/v8/src/conversions.cc b/deps/v8/src/conversions.cc index 5bfddd04c0..cdc42e34d9 100644 --- a/deps/v8/src/conversions.cc +++ b/deps/v8/src/conversions.cc @@ -26,14 +26,19 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include <stdarg.h> -#include <math.h> #include <limits.h> +#include <cmath> #include "conversions-inl.h" #include "dtoa.h" #include "strtod.h" #include "utils.h" +#ifndef _STLP_VENDOR_CSTD +// STLPort doesn't import fpclassify into the std namespace. +using std::fpclassify; +#endif + namespace v8 { namespace internal { diff --git a/deps/v8/src/counters.cc b/deps/v8/src/counters.cc index 7c8265e981..fa192baed2 100644 --- a/deps/v8/src/counters.cc +++ b/deps/v8/src/counters.cc @@ -45,57 +45,38 @@ int* StatsCounter::FindLocationInStatsTable() const { } -// Start the timer. -void StatsCounterTimer::Start() { - if (!counter_.Enabled()) - return; - stop_time_ = 0; - start_time_ = OS::Ticks(); -} - -// Stop the timer and record the results. -void StatsCounterTimer::Stop() { - if (!counter_.Enabled()) - return; - stop_time_ = OS::Ticks(); - - // Compute the delta between start and stop, in milliseconds. - int milliseconds = static_cast<int>(stop_time_ - start_time_) / 1000; - counter_.Increment(milliseconds); -} - void Histogram::AddSample(int sample) { if (Enabled()) { - Isolate::Current()->stats_table()->AddHistogramSample(histogram_, sample); + isolate()->stats_table()->AddHistogramSample(histogram_, sample); } } void* Histogram::CreateHistogram() const { - return Isolate::Current()->stats_table()-> + return isolate()->stats_table()-> CreateHistogram(name_, min_, max_, num_buckets_); } // Start the timer. void HistogramTimer::Start() { - if (histogram_.Enabled()) { + if (Enabled()) { stop_time_ = 0; start_time_ = OS::Ticks(); } if (FLAG_log_internal_timer_events) { - LOG(Isolate::Current(), TimerEvent(Logger::START, histogram_.name_)); + LOG(isolate(), TimerEvent(Logger::START, name())); } } // Stop the timer and record the results. void HistogramTimer::Stop() { - if (histogram_.Enabled()) { + if (Enabled()) { stop_time_ = OS::Ticks(); // Compute the delta between start and stop, in milliseconds. int milliseconds = static_cast<int>(stop_time_ - start_time_) / 1000; - histogram_.AddSample(milliseconds); + AddSample(milliseconds); } if (FLAG_log_internal_timer_events) { - LOG(Isolate::Current(), TimerEvent(Logger::END, histogram_.name_)); + LOG(isolate(), TimerEvent(Logger::END, name())); } } diff --git a/deps/v8/src/counters.h b/deps/v8/src/counters.h index 577280f444..a633fea779 100644 --- a/deps/v8/src/counters.h +++ b/deps/v8/src/counters.h @@ -113,14 +113,11 @@ class StatsTable { // The row has a 32bit value for each process/thread in the table and also // a name (stored in the table metadata). Since the storage location can be // thread-specific, this class cannot be shared across threads. -// -// This class is designed to be POD initialized. It will be registered with -// the counter system on first use. For example: -// StatsCounter c = { "c:myctr", NULL, false }; -struct StatsCounter { - const char* name_; - int* ptr_; - bool lookup_done_; +class StatsCounter { + public: + StatsCounter() { } + explicit StatsCounter(const char* name) + : name_(name), ptr_(NULL), lookup_done_(false) { } // Sets the counter to a specific value. void Set(int value) { @@ -177,39 +174,29 @@ struct StatsCounter { private: int* FindLocationInStatsTable() const; -}; - -// StatsCounterTimer t = { { L"t:foo", NULL, false }, 0, 0 }; -struct StatsCounterTimer { - StatsCounter counter_; - - int64_t start_time_; - int64_t stop_time_; - - // Start the timer. - void Start(); - - // Stop the timer and record the results. - void Stop(); - // Returns true if the timer is running. - bool Running() { - return counter_.Enabled() && start_time_ != 0 && stop_time_ == 0; - } + const char* name_; + int* ptr_; + bool lookup_done_; }; // A Histogram represents a dynamically created histogram in the StatsTable. -// -// This class is designed to be POD initialized. It will be registered with -// the histogram system on first use. For example: -// Histogram h = { "myhist", 0, 10000, 50, NULL, false }; -struct Histogram { - const char* name_; - int min_; - int max_; - int num_buckets_; - void* histogram_; - bool lookup_done_; +// It will be registered with the histogram system on first use. +class Histogram { + public: + Histogram() { } + Histogram(const char* name, + int min, + int max, + int num_buckets, + Isolate* isolate) + : name_(name), + min_(min), + max_(max), + num_buckets_(num_buckets), + histogram_(NULL), + lookup_done_(false), + isolate_(isolate) { } // Add a single sample to this histogram. void AddSample(int sample); @@ -234,17 +221,33 @@ struct Histogram { return histogram_; } + const char* name() { return name_; } + Isolate* isolate() const { return isolate_; } + private: void* CreateHistogram() const; -}; -// A HistogramTimer allows distributions of results to be created -// HistogramTimer t = { {L"foo", 0, 10000, 50, NULL, false}, 0, 0 }; -struct HistogramTimer { - Histogram histogram_; + const char* name_; + int min_; + int max_; + int num_buckets_; + void* histogram_; + bool lookup_done_; + Isolate* isolate_; +}; - int64_t start_time_; - int64_t stop_time_; +// A HistogramTimer allows distributions of results to be created. +class HistogramTimer : public Histogram { + public: + HistogramTimer() { } + HistogramTimer(const char* name, + int min, + int max, + int num_buckets, + Isolate* isolate) + : Histogram(name, min, max, num_buckets, isolate), + start_time_(0), + stop_time_(0) { } // Start the timer. void Start(); @@ -254,12 +257,12 @@ struct HistogramTimer { // Returns true if the timer is running. bool Running() { - return histogram_.Enabled() && (start_time_ != 0) && (stop_time_ == 0); + return Enabled() && (start_time_ != 0) && (stop_time_ == 0); } - void Reset() { - histogram_.Reset(); - } + private: + int64_t start_time_; + int64_t stop_time_; }; // Helper class for scoping a HistogramTimer. diff --git a/deps/v8/src/cpu-profiler.cc b/deps/v8/src/cpu-profiler.cc index 47c2a94232..51d29423c4 100644 --- a/deps/v8/src/cpu-profiler.cc +++ b/deps/v8/src/cpu-profiler.cc @@ -44,9 +44,11 @@ static const int kTickSamplesBufferChunksCount = 16; static const int kProfilerStackSize = 64 * KB; -ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator) +ProfilerEventsProcessor::ProfilerEventsProcessor( + ProfileGenerator* generator, CpuProfilesCollection* profiles) : Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)), generator_(generator), + profiles_(profiles), running_(true), ticks_buffer_(sizeof(TickSampleEventRecord), kTickSamplesBufferChunkSize, @@ -65,7 +67,7 @@ void ProfilerEventsProcessor::CallbackCreateEvent(Logger::LogEventsAndTags tag, rec->type = CodeEventRecord::CODE_CREATION; rec->order = ++enqueue_order_; rec->start = start; - rec->entry = generator_->NewCodeEntry(tag, prefix, name); + rec->entry = profiles_->NewCodeEntry(tag, prefix, name); rec->size = 1; rec->shared = NULL; events_buffer_.Enqueue(evt_rec); @@ -85,7 +87,7 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag, rec->type = CodeEventRecord::CODE_CREATION; rec->order = ++enqueue_order_; rec->start = start; - rec->entry = generator_->NewCodeEntry(tag, name, resource_name, line_number); + rec->entry = profiles_->NewCodeEntry(tag, name, resource_name, line_number); rec->size = size; rec->shared = shared; events_buffer_.Enqueue(evt_rec); @@ -102,7 +104,7 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag, rec->type = CodeEventRecord::CODE_CREATION; rec->order = ++enqueue_order_; rec->start = start; - rec->entry = generator_->NewCodeEntry(tag, name); + rec->entry = profiles_->NewCodeEntry(tag, name); rec->size = size; rec->shared = NULL; events_buffer_.Enqueue(evt_rec); @@ -119,7 +121,7 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag, rec->type = CodeEventRecord::CODE_CREATION; rec->order = ++enqueue_order_; rec->start = start; - rec->entry = generator_->NewCodeEntry(tag, args_count); + rec->entry = profiles_->NewCodeEntry(tag, args_count); rec->size = size; rec->shared = NULL; events_buffer_.Enqueue(evt_rec); @@ -162,7 +164,7 @@ void ProfilerEventsProcessor::RegExpCodeCreateEvent( rec->type = CodeEventRecord::CODE_CREATION; rec->order = ++enqueue_order_; rec->start = start; - rec->entry = generator_->NewCodeEntry(tag, prefix, name); + rec->entry = profiles_->NewCodeEntry(tag, prefix, name); rec->size = size; events_buffer_.Enqueue(evt_rec); } @@ -443,7 +445,7 @@ void CpuProfiler::StartProcessorIfNotStarted() { saved_logging_nesting_ = isolate_->logger()->logging_nesting_; isolate_->logger()->logging_nesting_ = 0; generator_ = new ProfileGenerator(profiles_); - processor_ = new ProfilerEventsProcessor(generator_); + processor_ = new ProfilerEventsProcessor(generator_, profiles_); is_profiling_ = true; processor_->StartSynchronously(); // Enumerate stuff we already have in the heap. @@ -458,7 +460,7 @@ void CpuProfiler::StartProcessorIfNotStarted() { isolate_->logger()->LogAccessorCallbacks(); } // Enable stack sampling. - Sampler* sampler = reinterpret_cast<Sampler*>(isolate_->logger()->ticker_); + Sampler* sampler = isolate_->logger()->sampler(); sampler->IncreaseProfilingDepth(); if (!sampler->IsActive()) { sampler->Start(); diff --git a/deps/v8/src/cpu-profiler.h b/deps/v8/src/cpu-profiler.h index 6e2b0e09cd..da7ea6de24 100644 --- a/deps/v8/src/cpu-profiler.h +++ b/deps/v8/src/cpu-profiler.h @@ -125,7 +125,8 @@ class TickSampleEventRecord { // methods called by event producers: VM and stack sampler threads. class ProfilerEventsProcessor : public Thread { public: - explicit ProfilerEventsProcessor(ProfileGenerator* generator); + ProfilerEventsProcessor(ProfileGenerator* generator, + CpuProfilesCollection* profiles); virtual ~ProfilerEventsProcessor() {} // Thread control. @@ -178,6 +179,7 @@ class ProfilerEventsProcessor : public Thread { INLINE(static bool FilterOutCodeCreateEvent(Logger::LogEventsAndTags tag)); ProfileGenerator* generator_; + CpuProfilesCollection* profiles_; bool running_; UnboundQueue<CodeEventsContainer> events_buffer_; SamplingCircularQueue ticks_buffer_; diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc index 8f6e384c1a..22ace174d2 100644 --- a/deps/v8/src/d8.cc +++ b/deps/v8/src/d8.cc @@ -42,6 +42,13 @@ #ifdef V8_SHARED #include <assert.h> +#endif // V8_SHARED + +#ifndef V8_SHARED +#include <algorithm> +#endif // !V8_SHARED + +#ifdef V8_SHARED #include "../include/v8-testing.h" #endif // V8_SHARED @@ -83,7 +90,7 @@ const char kArrayBufferMarkerPropName[] = "d8::_is_array_buffer_"; const char kArrayMarkerPropName[] = "d8::_is_typed_array_"; -#define FOR_EACH_SYMBOL(V) \ +#define FOR_EACH_STRING(V) \ V(ArrayBuffer, "ArrayBuffer") \ V(ArrayBufferMarkerPropName, kArrayBufferMarkerPropName) \ V(ArrayMarkerPropName, kArrayMarkerPropName) \ @@ -94,36 +101,58 @@ const char kArrayMarkerPropName[] = "d8::_is_typed_array_"; V(length, "length") -class Symbols { +class PerIsolateData { public: - explicit Symbols(Isolate* isolate) : isolate_(isolate) { + explicit PerIsolateData(Isolate* isolate) : isolate_(isolate), realms_(NULL) { HandleScope scope(isolate); -#define INIT_SYMBOL(name, value) \ - name##_ = Persistent<String>::New(isolate, String::NewSymbol(value)); - FOR_EACH_SYMBOL(INIT_SYMBOL) -#undef INIT_SYMBOL +#define INIT_STRING(name, value) \ + name##_string_ = Persistent<String>::New(isolate, String::NewSymbol(value)); + FOR_EACH_STRING(INIT_STRING) +#undef INIT_STRING isolate->SetData(this); } - ~Symbols() { -#define DISPOSE_SYMBOL(name, value) name##_.Dispose(isolate_); - FOR_EACH_SYMBOL(DISPOSE_SYMBOL) -#undef DISPOSE_SYMBOL + ~PerIsolateData() { +#define DISPOSE_STRING(name, value) name##_string_.Dispose(isolate_); + FOR_EACH_STRING(DISPOSE_STRING) +#undef DISPOSE_STRING isolate_->SetData(NULL); // Not really needed, just to be sure... } -#define DEFINE_SYMBOL_GETTER(name, value) \ - static Persistent<String> name(Isolate* isolate) { \ - return reinterpret_cast<Symbols*>(isolate->GetData())->name##_; \ + inline static PerIsolateData* Get(Isolate* isolate) { + return reinterpret_cast<PerIsolateData*>(isolate->GetData()); + } + +#define DEFINE_STRING_GETTER(name, value) \ + static Persistent<String> name##_string(Isolate* isolate) { \ + return Get(isolate)->name##_string_; \ } - FOR_EACH_SYMBOL(DEFINE_SYMBOL_GETTER) -#undef DEFINE_SYMBOL_GETTER + FOR_EACH_STRING(DEFINE_STRING_GETTER) +#undef DEFINE_STRING_GETTER + + class RealmScope { + public: + explicit RealmScope(PerIsolateData* data); + ~RealmScope(); + private: + PerIsolateData* data_; + }; private: + friend class Shell; + friend class RealmScope; Isolate* isolate_; -#define DEFINE_MEMBER(name, value) Persistent<String> name##_; - FOR_EACH_SYMBOL(DEFINE_MEMBER) + int realm_count_; + int realm_current_; + int realm_switch_; + Persistent<Context>* realms_; + Persistent<Value> realm_shared_; + +#define DEFINE_MEMBER(name, value) Persistent<String> name##_string_; + FOR_EACH_STRING(DEFINE_MEMBER) #undef DEFINE_MEMBER + + int RealmFind(Handle<Context> context); }; @@ -207,14 +236,20 @@ bool Shell::ExecuteString(Isolate* isolate, // When debugging make exceptions appear to be uncaught. try_catch.SetVerbose(true); } - Handle<Script> script = Script::Compile(source, name); + Handle<Script> script = Script::New(source, name); if (script.IsEmpty()) { // Print errors that happened during compilation. if (report_exceptions && !FLAG_debugger) ReportException(isolate, &try_catch); return false; } else { + PerIsolateData* data = PerIsolateData::Get(isolate); + Local<Context> realm = + Local<Context>::New(data->realms_[data->realm_current_]); + realm->Enter(); Handle<Value> result = script->Run(); + realm->Exit(); + data->realm_current_ = data->realm_switch_; if (result.IsEmpty()) { ASSERT(try_catch.HasCaught()); // Print errors that happened during execution. @@ -255,6 +290,164 @@ bool Shell::ExecuteString(Isolate* isolate, } +PerIsolateData::RealmScope::RealmScope(PerIsolateData* data) : data_(data) { + data_->realm_count_ = 1; + data_->realm_current_ = 0; + data_->realm_switch_ = 0; + data_->realms_ = new Persistent<Context>[1]; + data_->realms_[0] = + Persistent<Context>::New(data_->isolate_, Context::GetEntered()); + data_->realm_shared_.Clear(); +} + + +PerIsolateData::RealmScope::~RealmScope() { + // Drop realms to avoid keeping them alive. + for (int i = 0; i < data_->realm_count_; ++i) + data_->realms_[i].Dispose(data_->isolate_); + delete[] data_->realms_; + if (!data_->realm_shared_.IsEmpty()) + data_->realm_shared_.Dispose(data_->isolate_); +} + + +int PerIsolateData::RealmFind(Handle<Context> context) { + for (int i = 0; i < realm_count_; ++i) { + if (realms_[i] == context) return i; + } + return -1; +} + + +// Realm.current() returns the index of the currently active realm. +Handle<Value> Shell::RealmCurrent(const Arguments& args) { + Isolate* isolate = args.GetIsolate(); + PerIsolateData* data = PerIsolateData::Get(isolate); + int index = data->RealmFind(Context::GetEntered()); + if (index == -1) return Undefined(isolate); + return Number::New(index); +} + + +// Realm.owner(o) returns the index of the realm that created o. +Handle<Value> Shell::RealmOwner(const Arguments& args) { + Isolate* isolate = args.GetIsolate(); + PerIsolateData* data = PerIsolateData::Get(isolate); + if (args.Length() < 1 || !args[0]->IsObject()) { + return Throw("Invalid argument"); + } + int index = data->RealmFind(args[0]->ToObject()->CreationContext()); + if (index == -1) return Undefined(isolate); + return Number::New(index); +} + + +// Realm.global(i) returns the global object of realm i. +// (Note that properties of global objects cannot be read/written cross-realm.) +Handle<Value> Shell::RealmGlobal(const Arguments& args) { + PerIsolateData* data = PerIsolateData::Get(args.GetIsolate()); + if (args.Length() < 1 || !args[0]->IsNumber()) { + return Throw("Invalid argument"); + } + int index = args[0]->Uint32Value(); + if (index >= data->realm_count_ || data->realms_[index].IsEmpty()) { + return Throw("Invalid realm index"); + } + return data->realms_[index]->Global(); +} + + +// Realm.create() creates a new realm and returns its index. +Handle<Value> Shell::RealmCreate(const Arguments& args) { + Isolate* isolate = args.GetIsolate(); + PerIsolateData* data = PerIsolateData::Get(isolate); + Persistent<Context>* old_realms = data->realms_; + int index = data->realm_count_; + data->realms_ = new Persistent<Context>[++data->realm_count_]; + for (int i = 0; i < index; ++i) data->realms_[i] = old_realms[i]; + delete[] old_realms; + Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate); + data->realms_[index] = Persistent<Context>::New( + isolate, Context::New(isolate, NULL, global_template)); + return Number::New(index); +} + + +// Realm.dispose(i) disposes the reference to the realm i. +Handle<Value> Shell::RealmDispose(const Arguments& args) { + Isolate* isolate = args.GetIsolate(); + PerIsolateData* data = PerIsolateData::Get(isolate); + if (args.Length() < 1 || !args[0]->IsNumber()) { + return Throw("Invalid argument"); + } + int index = args[0]->Uint32Value(); + if (index >= data->realm_count_ || data->realms_[index].IsEmpty() || + index == 0 || + index == data->realm_current_ || index == data->realm_switch_) { + return Throw("Invalid realm index"); + } + data->realms_[index].Dispose(isolate); + data->realms_[index].Clear(); + return Undefined(isolate); +} + + +// Realm.switch(i) switches to the realm i for consecutive interactive inputs. +Handle<Value> Shell::RealmSwitch(const Arguments& args) { + Isolate* isolate = args.GetIsolate(); + PerIsolateData* data = PerIsolateData::Get(isolate); + if (args.Length() < 1 || !args[0]->IsNumber()) { + return Throw("Invalid argument"); + } + int index = args[0]->Uint32Value(); + if (index >= data->realm_count_ || data->realms_[index].IsEmpty()) { + return Throw("Invalid realm index"); + } + data->realm_switch_ = index; + return Undefined(isolate); +} + + +// Realm.eval(i, s) evaluates s in realm i and returns the result. +Handle<Value> Shell::RealmEval(const Arguments& args) { + Isolate* isolate = args.GetIsolate(); + PerIsolateData* data = PerIsolateData::Get(isolate); + if (args.Length() < 2 || !args[0]->IsNumber() || !args[1]->IsString()) { + return Throw("Invalid argument"); + } + int index = args[0]->Uint32Value(); + if (index >= data->realm_count_ || data->realms_[index].IsEmpty()) { + return Throw("Invalid realm index"); + } + Handle<Script> script = Script::New(args[1]->ToString()); + if (script.IsEmpty()) return Undefined(isolate); + Local<Context> realm = Local<Context>::New(data->realms_[index]); + realm->Enter(); + Handle<Value> result = script->Run(); + realm->Exit(); + return result; +} + + +// Realm.shared is an accessor for a single shared value across realms. +Handle<Value> Shell::RealmSharedGet(Local<String> property, + const AccessorInfo& info) { + Isolate* isolate = info.GetIsolate(); + PerIsolateData* data = PerIsolateData::Get(isolate); + if (data->realm_shared_.IsEmpty()) return Undefined(isolate); + return data->realm_shared_; +} + +void Shell::RealmSharedSet(Local<String> property, + Local<Value> value, + const AccessorInfo& info) { + Isolate* isolate = info.GetIsolate(); + PerIsolateData* data = PerIsolateData::Get(isolate); + if (!data->realm_shared_.IsEmpty()) data->realm_shared_.Dispose(isolate); + data->realm_shared_ = Persistent<Value>::New(isolate, value); +} + + Handle<Value> Shell::Print(const Arguments& args) { Handle<Value> val = Write(args); printf("\n"); @@ -416,7 +609,8 @@ Handle<Value> Shell::CreateExternalArrayBuffer(Isolate* isolate, } memset(data, 0, length); - buffer->SetHiddenValue(Symbols::ArrayBufferMarkerPropName(isolate), True()); + buffer->SetHiddenValue( + PerIsolateData::ArrayBufferMarkerPropName_string(isolate), True()); Persistent<Object> persistent_array = Persistent<Object>::New(isolate, buffer); persistent_array.MakeWeak(isolate, data, ExternalArrayWeakCallback); @@ -425,7 +619,7 @@ Handle<Value> Shell::CreateExternalArrayBuffer(Isolate* isolate, buffer->SetIndexedPropertiesToExternalArrayData( data, v8::kExternalByteArray, length); - buffer->Set(Symbols::byteLength(isolate), + buffer->Set(PerIsolateData::byteLength_string(isolate), Int32::New(length, isolate), ReadOnly); @@ -470,20 +664,20 @@ Handle<Object> Shell::CreateExternalArray(Isolate* isolate, array->SetIndexedPropertiesToExternalArrayData( static_cast<uint8_t*>(data) + byteOffset, type, length); - array->SetHiddenValue(Symbols::ArrayMarkerPropName(isolate), + array->SetHiddenValue(PerIsolateData::ArrayMarkerPropName_string(isolate), Int32::New(type, isolate)); - array->Set(Symbols::byteLength(isolate), + array->Set(PerIsolateData::byteLength_string(isolate), Int32::New(byteLength, isolate), ReadOnly); - array->Set(Symbols::byteOffset(isolate), + array->Set(PerIsolateData::byteOffset_string(isolate), Int32::New(byteOffset, isolate), ReadOnly); - array->Set(Symbols::length(isolate), + array->Set(PerIsolateData::length_string(isolate), Int32::New(length, isolate), ReadOnly); - array->Set(Symbols::BYTES_PER_ELEMENT(isolate), + array->Set(PerIsolateData::BYTES_PER_ELEMENT_string(isolate), Int32::New(element_size, isolate)); - array->Set(Symbols::buffer(isolate), + array->Set(PerIsolateData::buffer_string(isolate), buffer, ReadOnly); @@ -524,11 +718,11 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args, } if (args[0]->IsObject() && !args[0]->ToObject()->GetHiddenValue( - Symbols::ArrayBufferMarkerPropName(isolate)).IsEmpty()) { + PerIsolateData::ArrayBufferMarkerPropName_string(isolate)).IsEmpty()) { // Construct from ArrayBuffer. buffer = args[0]->ToObject(); - int32_t bufferLength = - convertToUint(buffer->Get(Symbols::byteLength(isolate)), &try_catch); + int32_t bufferLength = convertToUint( + buffer->Get(PerIsolateData::byteLength_string(isolate)), &try_catch); if (try_catch.HasCaught()) return try_catch.ReThrow(); if (args.Length() < 2 || args[1]->IsUndefined()) { @@ -560,9 +754,10 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args, } } else { if (args[0]->IsObject() && - args[0]->ToObject()->Has(Symbols::length(isolate))) { + args[0]->ToObject()->Has(PerIsolateData::length_string(isolate))) { // Construct from array. - Local<Value> value = args[0]->ToObject()->Get(Symbols::length(isolate)); + Local<Value> value = + args[0]->ToObject()->Get(PerIsolateData::length_string(isolate)); if (try_catch.HasCaught()) return try_catch.ReThrow(); length = convertToUint(value, &try_catch); if (try_catch.HasCaught()) return try_catch.ReThrow(); @@ -576,7 +771,8 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args, byteOffset = 0; Handle<Object> global = Context::GetCurrent()->Global(); - Handle<Value> array_buffer = global->Get(Symbols::ArrayBuffer(isolate)); + Handle<Value> array_buffer = + global->Get(PerIsolateData::ArrayBuffer_string(isolate)); ASSERT(!try_catch.HasCaught() && array_buffer->IsFunction()); Handle<Value> buffer_args[] = { Uint32::New(byteLength, isolate) }; Handle<Value> result = Handle<Function>::Cast(array_buffer)->NewInstance( @@ -611,14 +807,14 @@ Handle<Value> Shell::ArrayBufferSlice(const Arguments& args) { Isolate* isolate = args.GetIsolate(); Local<Object> self = args.This(); - Local<Value> marker = - self->GetHiddenValue(Symbols::ArrayBufferMarkerPropName(isolate)); + Local<Value> marker = self->GetHiddenValue( + PerIsolateData::ArrayBufferMarkerPropName_string(isolate)); if (marker.IsEmpty()) { return Throw("'slice' invoked on wrong receiver type"); } - int32_t length = - convertToUint(self->Get(Symbols::byteLength(isolate)), &try_catch); + int32_t length = convertToUint( + self->Get(PerIsolateData::byteLength_string(isolate)), &try_catch); if (try_catch.HasCaught()) return try_catch.ReThrow(); if (args.Length() == 0) { @@ -667,21 +863,22 @@ Handle<Value> Shell::ArraySubArray(const Arguments& args) { Isolate* isolate = args.GetIsolate(); Local<Object> self = args.This(); Local<Value> marker = - self->GetHiddenValue(Symbols::ArrayMarkerPropName(isolate)); + self->GetHiddenValue(PerIsolateData::ArrayMarkerPropName_string(isolate)); if (marker.IsEmpty()) { return Throw("'subarray' invoked on wrong receiver type"); } - Handle<Object> buffer = self->Get(Symbols::buffer(isolate))->ToObject(); + Handle<Object> buffer = + self->Get(PerIsolateData::buffer_string(isolate))->ToObject(); if (try_catch.HasCaught()) return try_catch.ReThrow(); - int32_t length = - convertToUint(self->Get(Symbols::length(isolate)), &try_catch); + int32_t length = convertToUint( + self->Get(PerIsolateData::length_string(isolate)), &try_catch); if (try_catch.HasCaught()) return try_catch.ReThrow(); - int32_t byteOffset = - convertToUint(self->Get(Symbols::byteOffset(isolate)), &try_catch); + int32_t byteOffset = convertToUint( + self->Get(PerIsolateData::byteOffset_string(isolate)), &try_catch); if (try_catch.HasCaught()) return try_catch.ReThrow(); - int32_t element_size = - convertToUint(self->Get(Symbols::BYTES_PER_ELEMENT(isolate)), &try_catch); + int32_t element_size = convertToUint( + self->Get(PerIsolateData::BYTES_PER_ELEMENT_string(isolate)), &try_catch); if (try_catch.HasCaught()) return try_catch.ReThrow(); if (args.Length() == 0) { @@ -726,27 +923,27 @@ Handle<Value> Shell::ArraySet(const Arguments& args) { Isolate* isolate = args.GetIsolate(); Local<Object> self = args.This(); Local<Value> marker = - self->GetHiddenValue(Symbols::ArrayMarkerPropName(isolate)); + self->GetHiddenValue(PerIsolateData::ArrayMarkerPropName_string(isolate)); if (marker.IsEmpty()) { return Throw("'set' invoked on wrong receiver type"); } - int32_t length = - convertToUint(self->Get(Symbols::length(isolate)), &try_catch); + int32_t length = convertToUint( + self->Get(PerIsolateData::length_string(isolate)), &try_catch); if (try_catch.HasCaught()) return try_catch.ReThrow(); - int32_t element_size = - convertToUint(self->Get(Symbols::BYTES_PER_ELEMENT(isolate)), &try_catch); + int32_t element_size = convertToUint( + self->Get(PerIsolateData::BYTES_PER_ELEMENT_string(isolate)), &try_catch); if (try_catch.HasCaught()) return try_catch.ReThrow(); if (args.Length() == 0) { return Throw("'set' must have at least one argument"); } if (!args[0]->IsObject() || - !args[0]->ToObject()->Has(Symbols::length(isolate))) { + !args[0]->ToObject()->Has(PerIsolateData::length_string(isolate))) { return Throw("'set' invoked with non-array argument"); } Handle<Object> source = args[0]->ToObject(); - int32_t source_length = - convertToUint(source->Get(Symbols::length(isolate)), &try_catch); + int32_t source_length = convertToUint( + source->Get(PerIsolateData::length_string(isolate)), &try_catch); if (try_catch.HasCaught()) return try_catch.ReThrow(); int32_t offset; @@ -761,28 +958,30 @@ Handle<Value> Shell::ArraySet(const Arguments& args) { } int32_t source_element_size; - if (source->GetHiddenValue(Symbols::ArrayMarkerPropName(isolate)).IsEmpty()) { + if (source->GetHiddenValue( + PerIsolateData::ArrayMarkerPropName_string(isolate)).IsEmpty()) { source_element_size = 0; } else { - source_element_size = - convertToUint(source->Get(Symbols::BYTES_PER_ELEMENT(isolate)), - &try_catch); + source_element_size = convertToUint( + source->Get(PerIsolateData::BYTES_PER_ELEMENT_string(isolate)), + &try_catch); if (try_catch.HasCaught()) return try_catch.ReThrow(); } if (element_size == source_element_size && self->GetConstructor()->StrictEquals(source->GetConstructor())) { // Use memmove on the array buffers. - Handle<Object> buffer = self->Get(Symbols::buffer(isolate))->ToObject(); + Handle<Object> buffer = + self->Get(PerIsolateData::buffer_string(isolate))->ToObject(); if (try_catch.HasCaught()) return try_catch.ReThrow(); Handle<Object> source_buffer = - source->Get(Symbols::buffer(isolate))->ToObject(); + source->Get(PerIsolateData::buffer_string(isolate))->ToObject(); if (try_catch.HasCaught()) return try_catch.ReThrow(); - int32_t byteOffset = - convertToUint(self->Get(Symbols::byteOffset(isolate)), &try_catch); + int32_t byteOffset = convertToUint( + self->Get(PerIsolateData::byteOffset_string(isolate)), &try_catch); if (try_catch.HasCaught()) return try_catch.ReThrow(); - int32_t source_byteOffset = - convertToUint(source->Get(Symbols::byteOffset(isolate)), &try_catch); + int32_t source_byteOffset = convertToUint( + source->Get(PerIsolateData::byteOffset_string(isolate)), &try_catch); if (try_catch.HasCaught()) return try_catch.ReThrow(); uint8_t* dest = byteOffset + offset * element_size + static_cast<uint8_t*>( @@ -798,21 +997,22 @@ Handle<Value> Shell::ArraySet(const Arguments& args) { } } else { // Need to copy element-wise to make the right conversions. - Handle<Object> buffer = self->Get(Symbols::buffer(isolate))->ToObject(); + Handle<Object> buffer = + self->Get(PerIsolateData::buffer_string(isolate))->ToObject(); if (try_catch.HasCaught()) return try_catch.ReThrow(); Handle<Object> source_buffer = - source->Get(Symbols::buffer(isolate))->ToObject(); + source->Get(PerIsolateData::buffer_string(isolate))->ToObject(); if (try_catch.HasCaught()) return try_catch.ReThrow(); if (buffer->StrictEquals(source_buffer)) { // Same backing store, need to handle overlap correctly. // This gets a bit tricky in the case of different element sizes // (which, of course, is extremely unlikely to ever occur in practice). - int32_t byteOffset = - convertToUint(self->Get(Symbols::byteOffset(isolate)), &try_catch); + int32_t byteOffset = convertToUint( + self->Get(PerIsolateData::byteOffset_string(isolate)), &try_catch); if (try_catch.HasCaught()) return try_catch.ReThrow(); - int32_t source_byteOffset = - convertToUint(source->Get(Symbols::byteOffset(isolate)), &try_catch); + int32_t source_byteOffset = convertToUint( + source->Get(PerIsolateData::byteOffset_string(isolate)), &try_catch); if (try_catch.HasCaught()) return try_catch.ReThrow(); // Copy as much as we can from left to right. @@ -860,8 +1060,8 @@ void Shell::ExternalArrayWeakCallback(v8::Isolate* isolate, Persistent<Value> object, void* data) { HandleScope scope(isolate); - int32_t length = - object->ToObject()->Get(Symbols::byteLength(isolate))->Uint32Value(); + int32_t length = object->ToObject()->Get( + PerIsolateData::byteLength_string(isolate))->Uint32Value(); isolate->AdjustAmountOfExternalAllocatedMemory(-length); delete[] static_cast<uint8_t*>(data); object.Dispose(isolate); @@ -1238,10 +1438,30 @@ Handle<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) { global_template->Set(String::New("disableProfiler"), FunctionTemplate::New(DisableProfiler)); + // Bind the Realm object. + Handle<ObjectTemplate> realm_template = ObjectTemplate::New(); + realm_template->Set(String::New("current"), + FunctionTemplate::New(RealmCurrent)); + realm_template->Set(String::New("owner"), + FunctionTemplate::New(RealmOwner)); + realm_template->Set(String::New("global"), + FunctionTemplate::New(RealmGlobal)); + realm_template->Set(String::New("create"), + FunctionTemplate::New(RealmCreate)); + realm_template->Set(String::New("dispose"), + FunctionTemplate::New(RealmDispose)); + realm_template->Set(String::New("switch"), + FunctionTemplate::New(RealmSwitch)); + realm_template->Set(String::New("eval"), + FunctionTemplate::New(RealmEval)); + realm_template->SetAccessor(String::New("shared"), + RealmSharedGet, RealmSharedSet); + global_template->Set(String::New("Realm"), realm_template); + // Bind the handlers for external arrays. PropertyAttribute attr = static_cast<PropertyAttribute>(ReadOnly | DontDelete); - global_template->Set(Symbols::ArrayBuffer(isolate), + global_template->Set(PerIsolateData::ArrayBuffer_string(isolate), CreateArrayBufferTemplate(ArrayBuffer), attr); global_template->Set(String::New("Int8Array"), CreateArrayTemplate(Int8Array), attr); @@ -1360,9 +1580,8 @@ struct CounterAndKey { }; -int CompareKeys(const void* a, const void* b) { - return strcmp(static_cast<const CounterAndKey*>(a)->key, - static_cast<const CounterAndKey*>(b)->key); +inline bool operator<(const CounterAndKey& lhs, const CounterAndKey& rhs) { + return strcmp(lhs.key, rhs.key) < 0; } #endif // V8_SHARED @@ -1382,7 +1601,7 @@ void Shell::OnExit() { counters[j].counter = i.CurrentValue(); counters[j].key = i.CurrentKey(); } - qsort(counters, number_of_counters, sizeof(counters[0]), CompareKeys); + std::sort(counters, counters + number_of_counters); printf("+----------------------------------------------------------------+" "-------------+\n"); printf("| Name |" @@ -1469,7 +1688,8 @@ Handle<Value> Shell::ReadBuffer(const Arguments& args) { } Isolate* isolate = args.GetIsolate(); Handle<Object> buffer = Object::New(); - buffer->SetHiddenValue(Symbols::ArrayBufferMarkerPropName(isolate), True()); + buffer->SetHiddenValue( + PerIsolateData::ArrayBufferMarkerPropName_string(isolate), True()); Persistent<Object> persistent_buffer = Persistent<Object>::New(isolate, buffer); persistent_buffer.MakeWeak(isolate, data, ExternalArrayWeakCallback); @@ -1478,7 +1698,7 @@ Handle<Value> Shell::ReadBuffer(const Arguments& args) { buffer->SetIndexedPropertiesToExternalArrayData( data, kExternalUnsignedByteArray, length); - buffer->Set(Symbols::byteLength(isolate), + buffer->Set(PerIsolateData::byteLength_string(isolate), Int32::New(static_cast<int32_t>(length), isolate), ReadOnly); return buffer; } @@ -1521,6 +1741,7 @@ Handle<String> Shell::ReadFile(Isolate* isolate, const char* name) { void Shell::RunShell(Isolate* isolate) { Locker locker(isolate); Context::Scope context_scope(evaluation_context_); + PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate)); HandleScope outer_scope(isolate); Handle<String> name = String::New("(d8)"); LineEditor* console = LineEditor::Get(); @@ -1573,6 +1794,7 @@ void ShellThread::Run() { Persistent<Context> thread_context = Shell::CreateEvaluationContext(isolate_); Context::Scope context_scope(thread_context); + PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate_)); while ((ptr != NULL) && (*ptr != '\0')) { HandleScope inner_scope(isolate_); @@ -1671,10 +1893,11 @@ void SourceGroup::ExecuteInThread() { Isolate::Scope iscope(isolate); Locker lock(isolate); HandleScope scope(isolate); - Symbols symbols(isolate); + PerIsolateData data(isolate); Persistent<Context> context = Shell::CreateEvaluationContext(isolate); { Context::Scope cscope(context); + PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate)); Execute(isolate); } context.Dispose(isolate); @@ -1883,6 +2106,7 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[]) { } { Context::Scope cscope(context); + PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate)); options.isolate_sources[0].Execute(isolate); } if (!options.last_run) { @@ -1933,7 +2157,7 @@ int Shell::Main(int argc, char* argv[]) { #ifdef ENABLE_VTUNE_JIT_INTERFACE vTune::InitilizeVtuneForV8(); #endif - Symbols symbols(isolate); + PerIsolateData data(isolate); InitializeDebugger(isolate); if (options.stress_opt || options.stress_deopt) { diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h index 2789c07638..4d9504f0d4 100644 --- a/deps/v8/src/d8.h +++ b/deps/v8/src/d8.h @@ -298,6 +298,19 @@ class Shell : public i::AllStatic { #endif // ENABLE_DEBUGGER_SUPPORT #endif // V8_SHARED + static Handle<Value> RealmCurrent(const Arguments& args); + static Handle<Value> RealmOwner(const Arguments& args); + static Handle<Value> RealmGlobal(const Arguments& args); + static Handle<Value> RealmCreate(const Arguments& args); + static Handle<Value> RealmDispose(const Arguments& args); + static Handle<Value> RealmSwitch(const Arguments& args); + static Handle<Value> RealmEval(const Arguments& args); + static Handle<Value> RealmSharedGet(Local<String> property, + const AccessorInfo& info); + static void RealmSharedSet(Local<String> property, + Local<Value> value, + const AccessorInfo& info); + static Handle<Value> Print(const Arguments& args); static Handle<Value> Write(const Arguments& args); static Handle<Value> Quit(const Arguments& args); diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc index 4af2194ea5..efba8e5853 100644 --- a/deps/v8/src/debug.cc +++ b/deps/v8/src/debug.cc @@ -612,7 +612,6 @@ void ScriptCache::Add(Handle<Script> script) { ASSERT(*script == *reinterpret_cast<Script**>(entry->value)); return; } - // Globalize the script object, make it weak and use the location of the // global handle as the value in the hash map. Handle<Script> script_ = diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h index 97b2206b6a..edf6c504f1 100644 --- a/deps/v8/src/deoptimizer.h +++ b/deps/v8/src/deoptimizer.h @@ -309,6 +309,7 @@ class Deoptimizer : public Malloced { protected: MacroAssembler* masm() const { return masm_; } BailoutType type() const { return type_; } + Isolate* isolate() const { return masm_->isolate(); } virtual void GeneratePrologue() { } diff --git a/deps/v8/src/dtoa.cc b/deps/v8/src/dtoa.cc index 00233a8829..bda67205c7 100644 --- a/deps/v8/src/dtoa.cc +++ b/deps/v8/src/dtoa.cc @@ -25,7 +25,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#include <math.h> +#include <cmath> #include "../include/v8stdint.h" #include "checks.h" diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc index 025a25619f..6d8c3c1022 100644 --- a/deps/v8/src/execution.cc +++ b/deps/v8/src/execution.cc @@ -76,7 +76,7 @@ static Handle<Object> Invoke(bool is_construct, Isolate* isolate = function->GetIsolate(); // Entering JavaScript. - VMState state(isolate, JS); + VMState<JS> state(isolate); // Placeholder for return value. MaybeObject* value = reinterpret_cast<Object*>(kZapValue); @@ -426,6 +426,13 @@ bool StackGuard::IsTerminateExecution() { } +void StackGuard::CancelTerminateExecution() { + ExecutionAccess access(isolate_); + Continue(TERMINATE); + isolate_->CancelTerminateExecution(); +} + + void StackGuard::TerminateExecution() { ExecutionAccess access(isolate_); thread_local_.interrupt_flags_ |= TERMINATE; diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h index 9cf8ac649c..3cdbf63e93 100644 --- a/deps/v8/src/execution.h +++ b/deps/v8/src/execution.h @@ -190,6 +190,7 @@ class StackGuard { void Interrupt(); bool IsTerminateExecution(); void TerminateExecution(); + void CancelTerminateExecution(); #ifdef ENABLE_DEBUGGER_SUPPORT bool IsDebugBreak(); void DebugBreak(); diff --git a/deps/v8/src/extensions/gc-extension.cc b/deps/v8/src/extensions/gc-extension.cc index 813b9219bf..1a2fe8ff4b 100644 --- a/deps/v8/src/extensions/gc-extension.cc +++ b/deps/v8/src/extensions/gc-extension.cc @@ -26,12 +26,11 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "gc-extension.h" +#include "platform.h" namespace v8 { namespace internal { -const char* const GCExtension::kSource = "native function gc();"; - v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunction( v8::Handle<v8::String> str) { @@ -50,7 +49,15 @@ v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) { void GCExtension::Register() { - static GCExtension gc_extension; + static char buffer[50]; + Vector<char> temp_vector(buffer, sizeof(buffer)); + if (FLAG_expose_gc_as != NULL && strlen(FLAG_expose_gc_as) != 0) { + OS::SNPrintF(temp_vector, "native function %s();", FLAG_expose_gc_as); + } else { + OS::SNPrintF(temp_vector, "native function gc();"); + } + + static GCExtension gc_extension(buffer); static v8::DeclareExtension declaration(&gc_extension); } diff --git a/deps/v8/src/extensions/gc-extension.h b/deps/v8/src/extensions/gc-extension.h index 06ea4ed21a..54b865adf7 100644 --- a/deps/v8/src/extensions/gc-extension.h +++ b/deps/v8/src/extensions/gc-extension.h @@ -35,13 +35,11 @@ namespace internal { class GCExtension : public v8::Extension { public: - GCExtension() : v8::Extension("v8/gc", kSource) {} + explicit GCExtension(const char* source) : v8::Extension("v8/gc", source) {} virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction( v8::Handle<v8::String> name); static v8::Handle<v8::Value> GC(const v8::Arguments& args); static void Register(); - private: - static const char* const kSource; }; } } // namespace v8::internal diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc index 77d22006a6..f36006c114 100644 --- a/deps/v8/src/factory.cc +++ b/deps/v8/src/factory.cc @@ -476,6 +476,8 @@ Handle<ExternalArray> Factory::NewExternalArray(int length, Handle<JSGlobalPropertyCell> Factory::NewJSGlobalPropertyCell( Handle<Object> value) { + ALLOW_HANDLE_DEREF(isolate(), + "converting a handle into a global property cell"); CALL_HEAP_FUNCTION( isolate(), isolate()->heap()->AllocateJSGlobalPropertyCell(*value), @@ -1044,6 +1046,16 @@ void Factory::EnsureCanContainElements(Handle<JSArray> array, } +Handle<JSArrayBuffer> Factory::NewJSArrayBuffer() { + JSFunction* array_buffer_fun = + isolate()->context()->native_context()->array_buffer_fun(); + CALL_HEAP_FUNCTION( + isolate(), + isolate()->heap()->AllocateJSObject(array_buffer_fun), + JSArrayBuffer); +} + + Handle<JSProxy> Factory::NewJSProxy(Handle<Object> handler, Handle<Object> prototype) { CALL_HEAP_FUNCTION( diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h index b6bfa8ae46..caac78df47 100644 --- a/deps/v8/src/factory.h +++ b/deps/v8/src/factory.h @@ -313,6 +313,8 @@ class Factory { uint32_t length, EnsureElementsMode mode); + Handle<JSArrayBuffer> NewJSArrayBuffer(); + Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype); // Change the type of the argument into a JS object/function and reinitialize. diff --git a/deps/v8/src/fixed-dtoa.cc b/deps/v8/src/fixed-dtoa.cc index 1fd974c3e2..fd90eca901 100644 --- a/deps/v8/src/fixed-dtoa.cc +++ b/deps/v8/src/fixed-dtoa.cc @@ -25,7 +25,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#include <math.h> +#include <cmath> #include "../include/v8stdint.h" #include "checks.h" diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h index 9c9362b2f0..0a6bf6762b 100644 --- a/deps/v8/src/flag-definitions.h +++ b/deps/v8/src/flag-definitions.h @@ -230,6 +230,9 @@ DEFINE_bool(stress_environments, false, "environment for every instruction") DEFINE_int(deopt_every_n_times, 0, "deoptimize every n times a deopt point is passed") +DEFINE_int(deopt_every_n_garbage_collections, + 0, + "deoptimize every n garbage collections") DEFINE_bool(trap_on_deopt, false, "put a break point before deoptimizing") DEFINE_bool(deoptimize_uncommon_cases, true, "deoptimize uncommon cases") DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining") @@ -344,6 +347,10 @@ DEFINE_bool(enable_vldr_imm, false, DEFINE_string(expose_natives_as, NULL, "expose natives in global object") DEFINE_string(expose_debug_as, NULL, "expose debug in global object") DEFINE_bool(expose_gc, false, "expose gc extension") +DEFINE_string(expose_gc_as, + NULL, + "expose gc extension under the specified name") +DEFINE_implication(expose_gc_as, expose_gc) DEFINE_bool(expose_externalize_string, false, "expose externalize string extension") DEFINE_int(stack_trace_limit, 10, "number of stack frames to capture") @@ -666,9 +673,6 @@ DEFINE_bool(collect_heap_spill_statistics, false, DEFINE_bool(trace_isolates, false, "trace isolate state changes") -// VM state -DEFINE_bool(log_state_changes, false, "Log state changes.") - // Regexp DEFINE_bool(regexp_possessive_quantifier, false, @@ -716,6 +720,7 @@ DEFINE_bool(log_internal_timer_events, false, "Time internal events.") DEFINE_bool(log_timer_events, false, "Time events including external callbacks.") DEFINE_implication(log_timer_events, log_internal_timer_events) +DEFINE_implication(log_internal_timer_events, prof) // // Disassembler only flags diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc index 72d0835845..dc646b1a98 100644 --- a/deps/v8/src/full-codegen.cc +++ b/deps/v8/src/full-codegen.cc @@ -923,6 +923,20 @@ void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) { } +void FullCodeGenerator::EmitGeneratorSend(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + ASSERT(args->length() == 2); + EmitGeneratorResume(args->at(0), args->at(1), JSGeneratorObject::SEND); +} + + +void FullCodeGenerator::EmitGeneratorThrow(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + ASSERT(args->length() == 2); + EmitGeneratorResume(args->at(0), args->at(1), JSGeneratorObject::THROW); +} + + void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) { switch (expr->op()) { case Token::COMMA: @@ -1241,9 +1255,12 @@ void FullCodeGenerator::VisitWithStatement(WithStatement* stmt) { __ CallRuntime(Runtime::kPushWithContext, 2); StoreToFrameField(StandardFrameConstants::kContextOffset, context_register()); + Scope* saved_scope = scope(); + scope_ = stmt->scope(); { WithOrCatch body(this); Visit(stmt->statement()); } + scope_ = saved_scope; // Pop context. LoadContextField(context_register(), Context::PREVIOUS_INDEX); @@ -1548,30 +1565,6 @@ void FullCodeGenerator::VisitSharedFunctionInfoLiteral( } -void FullCodeGenerator::VisitYield(Yield* expr) { - if (expr->is_delegating_yield()) - UNIMPLEMENTED(); - - Comment cmnt(masm_, "[ Yield"); - // TODO(wingo): Actually update the iterator state. - VisitForEffect(expr->generator_object()); - VisitForAccumulatorValue(expr->expression()); - // TODO(wingo): Assert that the operand stack depth is 0, at least while - // general yield expressions are unimplemented. - - // TODO(wingo): What follows is as in VisitReturnStatement. Replace it with a - // call to a builtin that will resume the generator. - NestedStatement* current = nesting_stack_; - int stack_depth = 0; - int context_length = 0; - while (current != NULL) { - current = current->Exit(&stack_depth, &context_length); - } - __ Drop(stack_depth); - EmitReturnSequence(); -} - - void FullCodeGenerator::VisitThrow(Throw* expr) { Comment cmnt(masm_, "[ Throw"); VisitForStackValue(expr->exception()); diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h index b9647c2ea3..3734ae5267 100644 --- a/deps/v8/src/full-codegen.h +++ b/deps/v8/src/full-codegen.h @@ -486,6 +486,11 @@ class FullCodeGenerator: public AstVisitor { INLINE_RUNTIME_FUNCTION_LIST(EMIT_INLINE_RUNTIME_CALL) #undef EMIT_INLINE_RUNTIME_CALL + // Platform-specific code for resuming generators. + void EmitGeneratorResume(Expression *generator, + Expression *value, + JSGeneratorObject::ResumeMode resume_mode); + // Platform-specific code for loading variables. void EmitLoadGlobalCheckExtensions(Variable* var, TypeofState typeof_state, diff --git a/deps/v8/src/generator.js b/deps/v8/src/generator.js index 481d4d37f8..5e61091565 100644 --- a/deps/v8/src/generator.js +++ b/deps/v8/src/generator.js @@ -44,7 +44,7 @@ function GeneratorObjectNext() { ['[Generator].prototype.next', this]); } - // TODO(wingo): Implement. + return %_GeneratorSend(this, void 0); } function GeneratorObjectSend(value) { @@ -53,7 +53,7 @@ function GeneratorObjectSend(value) { ['[Generator].prototype.send', this]); } - // TODO(wingo): Implement. + return %_GeneratorSend(this, value); } function GeneratorObjectThrow(exn) { @@ -62,16 +62,7 @@ function GeneratorObjectThrow(exn) { ['[Generator].prototype.throw', this]); } - // TODO(wingo): Implement. -} - -function GeneratorObjectClose() { - if (!IS_GENERATOR(this)) { - throw MakeTypeError('incompatible_method_receiver', - ['[Generator].prototype.close', this]); - } - - // TODO(wingo): Implement. + return %_GeneratorThrow(this, exn); } function SetUpGenerators() { @@ -81,8 +72,7 @@ function SetUpGenerators() { DONT_ENUM | DONT_DELETE | READ_ONLY, ["next", GeneratorObjectNext, "send", GeneratorObjectSend, - "throw", GeneratorObjectThrow, - "close", GeneratorObjectClose]); + "throw", GeneratorObjectThrow]); %SetProperty(GeneratorObjectPrototype, "constructor", GeneratorFunctionPrototype, DONT_ENUM | DONT_DELETE | READ_ONLY); %SetPrototype(GeneratorFunctionPrototype, $Function.prototype); diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc index cb3115abfc..7ee89d7b2d 100644 --- a/deps/v8/src/global-handles.cc +++ b/deps/v8/src/global-handles.cc @@ -37,7 +37,13 @@ namespace internal { ObjectGroup::~ObjectGroup() { - if (info_ != NULL) info_->Dispose(); + if (info != NULL) info->Dispose(); + delete[] objects; +} + + +ImplicitRefGroup::~ImplicitRefGroup() { + delete[] children; } @@ -267,7 +273,7 @@ class GlobalHandles::Node { ASSERT(!object_->IsExternalTwoByteString() || ExternalTwoByteString::cast(object_)->resource() != NULL); // Leaving V8. - VMState state(isolate, EXTERNAL); + VMState<EXTERNAL> state(isolate); if (near_death_callback_ != NULL) { if (IsWeakCallback::decode(flags_)) { WeakReferenceCallback callback = @@ -438,7 +444,8 @@ GlobalHandles::GlobalHandles(Isolate* isolate) first_block_(NULL), first_used_block_(NULL), first_free_(NULL), - post_gc_processing_count_(0) {} + post_gc_processing_count_(0), + object_group_connections_(kObjectGroupConnectionsCapacity) {} GlobalHandles::~GlobalHandles() { @@ -578,15 +585,16 @@ void GlobalHandles::IterateNewSpaceWeakIndependentRoots(ObjectVisitor* v) { bool GlobalHandles::IterateObjectGroups(ObjectVisitor* v, WeakSlotCallbackWithHeap can_skip) { + ComputeObjectGroupsAndImplicitReferences(); int last = 0; bool any_group_was_visited = false; for (int i = 0; i < object_groups_.length(); i++) { ObjectGroup* entry = object_groups_.at(i); ASSERT(entry != NULL); - Object*** objects = entry->objects_; + Object*** objects = entry->objects; bool group_should_be_visited = false; - for (size_t j = 0; j < entry->length_; j++) { + for (size_t j = 0; j < entry->length; j++) { Object* object = *objects[j]; if (object->IsHeapObject()) { if (!can_skip(isolate_->heap(), &object)) { @@ -603,7 +611,7 @@ bool GlobalHandles::IterateObjectGroups(ObjectVisitor* v, // An object in the group requires visiting, so iterate over all // objects in the group. - for (size_t j = 0; j < entry->length_; ++j) { + for (size_t j = 0; j < entry->length; ++j) { Object* object = *objects[j]; if (object->IsHeapObject()) { v->VisitPointer(&object); @@ -613,7 +621,7 @@ bool GlobalHandles::IterateObjectGroups(ObjectVisitor* v, // Once the entire group has been iterated over, set the object // group to NULL so it won't be processed again. - entry->Dispose(); + delete entry; object_groups_.at(i) = NULL; } object_groups_.Rewind(last); @@ -824,7 +832,23 @@ void GlobalHandles::AddObjectGroup(Object*** handles, if (info != NULL) info->Dispose(); return; } - object_groups_.Add(ObjectGroup::New(handles, length, info)); + ObjectGroup* group = new ObjectGroup(length); + for (size_t i = 0; i < length; ++i) + group->objects[i] = handles[i]; + group->info = info; + object_groups_.Add(group); +} + + +void GlobalHandles::SetObjectGroupId(Object** handle, + UniqueId id) { + object_group_connections_.Add(ObjectGroupConnection(id, handle)); +} + + +void GlobalHandles::SetRetainedObjectInfo(UniqueId id, + RetainedObjectInfo* info) { + retainer_infos_.Add(ObjectGroupRetainerInfo(id, info)); } @@ -838,23 +862,45 @@ void GlobalHandles::AddImplicitReferences(HeapObject** parent, } #endif if (length == 0) return; - implicit_ref_groups_.Add(ImplicitRefGroup::New(parent, children, length)); + ImplicitRefGroup* group = new ImplicitRefGroup(parent, length); + for (size_t i = 0; i < length; ++i) + group->children[i] = children[i]; + implicit_ref_groups_.Add(group); +} + + +void GlobalHandles::SetReferenceFromGroup(UniqueId id, Object** child) { + ASSERT(!Node::FromLocation(child)->is_independent()); + implicit_ref_connections_.Add(ObjectGroupConnection(id, child)); +} + + +void GlobalHandles::SetReference(HeapObject** parent, Object** child) { + ASSERT(!Node::FromLocation(child)->is_independent()); + ImplicitRefGroup* group = new ImplicitRefGroup(parent, 1); + group->children[0] = child; + implicit_ref_groups_.Add(group); } void GlobalHandles::RemoveObjectGroups() { - for (int i = 0; i < object_groups_.length(); i++) { - object_groups_.at(i)->Dispose(); - } + for (int i = 0; i < object_groups_.length(); i++) + delete object_groups_.at(i); object_groups_.Clear(); + for (int i = 0; i < retainer_infos_.length(); ++i) + retainer_infos_[i].info->Dispose(); + retainer_infos_.Clear(); + object_group_connections_.Clear(); + object_group_connections_.Initialize(kObjectGroupConnectionsCapacity); } void GlobalHandles::RemoveImplicitRefGroups() { for (int i = 0; i < implicit_ref_groups_.length(); i++) { - implicit_ref_groups_.at(i)->Dispose(); + delete implicit_ref_groups_.at(i); } implicit_ref_groups_.Clear(); + implicit_ref_connections_.Clear(); } @@ -863,4 +909,108 @@ void GlobalHandles::TearDown() { } +void GlobalHandles::ComputeObjectGroupsAndImplicitReferences() { + if (object_group_connections_.length() == 0) { + for (int i = 0; i < retainer_infos_.length(); ++i) + retainer_infos_[i].info->Dispose(); + retainer_infos_.Clear(); + implicit_ref_connections_.Clear(); + return; + } + + object_group_connections_.Sort(); + retainer_infos_.Sort(); + implicit_ref_connections_.Sort(); + + int info_index = 0; // For iterating retainer_infos_. + UniqueId current_group_id(0); + int current_group_start = 0; + + int current_implicit_refs_start = 0; + int current_implicit_refs_end = 0; + for (int i = 0; i <= object_group_connections_.length(); ++i) { + if (i == 0) + current_group_id = object_group_connections_[i].id; + if (i == object_group_connections_.length() || + current_group_id != object_group_connections_[i].id) { + // Group detected: objects in indices [current_group_start, i[. + + // Find out which implicit references are related to this group. (We want + // to ignore object groups which only have 1 object, but that object is + // needed as a representative object for the implicit refrerence group.) + while (current_implicit_refs_start < implicit_ref_connections_.length() && + implicit_ref_connections_[current_implicit_refs_start].id < + current_group_id) + ++current_implicit_refs_start; + current_implicit_refs_end = current_implicit_refs_start; + while (current_implicit_refs_end < implicit_ref_connections_.length() && + implicit_ref_connections_[current_implicit_refs_end].id == + current_group_id) + ++current_implicit_refs_end; + + if (current_implicit_refs_end > current_implicit_refs_start) { + // Find a representative object for the implicit references. + HeapObject** representative = NULL; + for (int j = current_group_start; j < i; ++j) { + Object** object = object_group_connections_[j].object; + if ((*object)->IsHeapObject()) { + representative = reinterpret_cast<HeapObject**>(object); + break; + } + } + if (representative) { + ImplicitRefGroup* group = new ImplicitRefGroup( + representative, + current_implicit_refs_end - current_implicit_refs_start); + for (int j = current_implicit_refs_start; + j < current_implicit_refs_end; + ++j) { + group->children[j - current_implicit_refs_start] = + implicit_ref_connections_[j].object; + } + implicit_ref_groups_.Add(group); + } + current_implicit_refs_start = current_implicit_refs_end; + } + + // Find a RetainedObjectInfo for the group. + RetainedObjectInfo* info = NULL; + while (info_index < retainer_infos_.length() && + retainer_infos_[info_index].id < current_group_id) { + retainer_infos_[info_index].info->Dispose(); + ++info_index; + } + if (info_index < retainer_infos_.length() && + retainer_infos_[info_index].id == current_group_id) { + // This object group has an associated ObjectGroupRetainerInfo. + info = retainer_infos_[info_index].info; + ++info_index; + } + + // Ignore groups which only contain one object. + if (i > current_group_start + 1) { + ObjectGroup* group = new ObjectGroup(i - current_group_start); + for (int j = current_group_start; j < i; ++j) { + group->objects[j - current_group_start] = + object_group_connections_[j].object; + } + group->info = info; + object_groups_.Add(group); + } else if (info) { + info->Dispose(); + } + + if (i < object_group_connections_.length()) { + current_group_id = object_group_connections_[i].id; + current_group_start = i; + } + } + } + object_group_connections_.Clear(); + object_group_connections_.Initialize(kObjectGroupConnectionsCapacity); + retainer_infos_.Clear(); + implicit_ref_connections_.Clear(); +} + + } } // namespace v8::internal diff --git a/deps/v8/src/global-handles.h b/deps/v8/src/global-handles.h index 90707b0bc2..81e14765c1 100644 --- a/deps/v8/src/global-handles.h +++ b/deps/v8/src/global-handles.h @@ -28,6 +28,7 @@ #ifndef V8_GLOBAL_HANDLES_H_ #define V8_GLOBAL_HANDLES_H_ +#include "../include/v8.h" #include "../include/v8-profiler.h" #include "list.h" @@ -46,70 +47,76 @@ class ObjectVisitor; // At GC the destroyed global handles are removed from the free list // and deallocated. +// Data structures for tracking object groups and implicit references. + // An object group is treated like a single JS object: if one of object in // the group is alive, all objects in the same group are considered alive. // An object group is used to simulate object relationship in a DOM tree. -class ObjectGroup { - public: - static ObjectGroup* New(Object*** handles, - size_t length, - v8::RetainedObjectInfo* info) { + +// An implicit references group consists of two parts: a parent object and a +// list of children objects. If the parent is alive, all the children are alive +// too. + +struct ObjectGroup { + explicit ObjectGroup(size_t length) + : info(NULL), length(length) { ASSERT(length > 0); - ObjectGroup* group = reinterpret_cast<ObjectGroup*>( - malloc(OFFSET_OF(ObjectGroup, objects_[length]))); - group->length_ = length; - group->info_ = info; - CopyWords(group->objects_, handles, static_cast<int>(length)); - return group; + objects = new Object**[length]; } + ~ObjectGroup(); - void Dispose() { - if (info_ != NULL) info_->Dispose(); - free(this); - } + v8::RetainedObjectInfo* info; + Object*** objects; + size_t length; +}; - size_t length_; - v8::RetainedObjectInfo* info_; - Object** objects_[1]; // Variable sized array. - private: - void* operator new(size_t size); - void operator delete(void* p); - ~ObjectGroup(); - DISALLOW_IMPLICIT_CONSTRUCTORS(ObjectGroup); +struct ImplicitRefGroup { + ImplicitRefGroup(HeapObject** parent, size_t length) + : parent(parent), length(length) { + ASSERT(length > 0); + children = new Object**[length]; + } + ~ImplicitRefGroup(); + + HeapObject** parent; + Object*** children; + size_t length; }; -// An implicit references group consists of two parts: a parent object and -// a list of children objects. If the parent is alive, all the children -// are alive too. -class ImplicitRefGroup { - public: - static ImplicitRefGroup* New(HeapObject** parent, - Object*** children, - size_t length) { - ASSERT(length > 0); - ImplicitRefGroup* group = reinterpret_cast<ImplicitRefGroup*>( - malloc(OFFSET_OF(ImplicitRefGroup, children_[length]))); - group->parent_ = parent; - group->length_ = length; - CopyWords(group->children_, children, length); - return group; +// For internal bookkeeping. +struct ObjectGroupConnection { + ObjectGroupConnection(UniqueId id, Object** object) + : id(id), object(object) {} + + bool operator==(const ObjectGroupConnection& other) const { + return id == other.id; } - void Dispose() { - free(this); + bool operator<(const ObjectGroupConnection& other) const { + return id < other.id; } - HeapObject** parent_; - size_t length_; - Object** children_[1]; // Variable sized array. + UniqueId id; + Object** object; +}; - private: - void* operator new(size_t size); - void operator delete(void* p); - ~ImplicitRefGroup(); - DISALLOW_IMPLICIT_CONSTRUCTORS(ImplicitRefGroup); + +struct ObjectGroupRetainerInfo { + ObjectGroupRetainerInfo(UniqueId id, RetainedObjectInfo* info) + : id(id), info(info) {} + + bool operator==(const ObjectGroupRetainerInfo& other) const { + return id == other.id; + } + + bool operator<(const ObjectGroupRetainerInfo& other) const { + return id < other.id; + } + + UniqueId id; + RetainedObjectInfo* info; }; @@ -218,6 +225,16 @@ class GlobalHandles { size_t length, v8::RetainedObjectInfo* info); + // Associates handle with the object group represented by id. + // Should be only used in GC callback function before a collection. + // All groups are destroyed after a garbage collection. + void SetObjectGroupId(Object** handle, UniqueId id); + + // Set RetainedObjectInfo for an object group. Should not be called more than + // once for a group. Should not be called for a group which contains no + // handles. + void SetRetainedObjectInfo(UniqueId id, RetainedObjectInfo* info); + // Add an implicit references' group. // Should be only used in GC callback function before a collection. // All groups are destroyed after a mark-compact collection. @@ -225,11 +242,23 @@ class GlobalHandles { Object*** children, size_t length); - // Returns the object groups. - List<ObjectGroup*>* object_groups() { return &object_groups_; } + // Adds an implicit reference from a group to an object. Should be only used + // in GC callback function before a collection. All implicit references are + // destroyed after a mark-compact collection. + void SetReferenceFromGroup(UniqueId id, Object** child); + + // Adds an implicit reference from a parent object to a child object. Should + // be only used in GC callback function before a collection. All implicit + // references are destroyed after a mark-compact collection. + void SetReference(HeapObject** parent, Object** child); + + List<ObjectGroup*>* object_groups() { + ComputeObjectGroupsAndImplicitReferences(); + return &object_groups_; + } - // Returns the implicit references' groups. List<ImplicitRefGroup*>* implicit_ref_groups() { + ComputeObjectGroupsAndImplicitReferences(); return &implicit_ref_groups_; } @@ -250,6 +279,15 @@ class GlobalHandles { private: explicit GlobalHandles(Isolate* isolate); + // Migrates data from the internal representation (object_group_connections_, + // retainer_infos_ and implicit_ref_connections_) to the public and more + // efficient representation (object_groups_ and implicit_ref_groups_). + void ComputeObjectGroupsAndImplicitReferences(); + + // v8::internal::List is inefficient even for small number of elements, if we + // don't assign any initial capacity. + static const int kObjectGroupConnectionsCapacity = 20; + // Internal node structures. class Node; class NodeBlock; @@ -275,9 +313,17 @@ class GlobalHandles { int post_gc_processing_count_; + // Object groups and implicit references, public and more efficient + // representation. List<ObjectGroup*> object_groups_; List<ImplicitRefGroup*> implicit_ref_groups_; + // Object groups and implicit references, temporary representation while + // constructing the groups. + List<ObjectGroupConnection> object_group_connections_; + List<ObjectGroupRetainerInfo> retainer_infos_; + List<ObjectGroupConnection> implicit_ref_connections_; + friend class Isolate; DISALLOW_COPY_AND_ASSIGN(GlobalHandles); diff --git a/deps/v8/src/handles-inl.h b/deps/v8/src/handles-inl.h index bb113110cb..5a3e9ed274 100644 --- a/deps/v8/src/handles-inl.h +++ b/deps/v8/src/handles-inl.h @@ -55,13 +55,8 @@ template <typename T> inline bool Handle<T>::is_identical_to(const Handle<T> other) const { ASSERT(location_ == NULL || reinterpret_cast<Address>(*location_) != kZapValue); -#ifdef DEBUG - if (FLAG_enable_slow_asserts) { - Isolate* isolate = Isolate::Current(); - CHECK(isolate->AllowHandleDereference() || - !isolate->optimizing_compiler_thread()->IsOptimizerThread()); - } -#endif // DEBUG + // Dereferencing deferred handles to check object equality is safe. + SLOW_ASSERT(IsDereferenceAllowed(true) && other.IsDereferenceAllowed(true)); return *location_ == *other.location_; } @@ -70,7 +65,7 @@ template <typename T> inline T* Handle<T>::operator*() const { ASSERT(location_ != NULL); ASSERT(reinterpret_cast<Address>(*location_) != kHandleZapValue); - SLOW_ASSERT(Isolate::Current()->AllowHandleDereference()); + SLOW_ASSERT(IsDereferenceAllowed(false)); return *BitCast<T**>(location_); } @@ -78,10 +73,44 @@ template <typename T> inline T** Handle<T>::location() const { ASSERT(location_ == NULL || reinterpret_cast<Address>(*location_) != kZapValue); - SLOW_ASSERT(Isolate::Current()->AllowHandleDereference()); + SLOW_ASSERT(IsDereferenceAllowed(false)); return location_; } +#ifdef DEBUG +template <typename T> +bool Handle<T>::IsDereferenceAllowed(bool allow_deferred) const { + if (location_ == NULL) return true; + Object* object = *BitCast<T**>(location_); + if (object->IsSmi()) return true; + HeapObject* heap_object = HeapObject::cast(object); + Isolate* isolate = heap_object->GetIsolate(); + Object** handle = reinterpret_cast<Object**>(location_); + Object** roots_array_start = isolate->heap()->roots_array_start(); + if (roots_array_start <= handle && + handle < roots_array_start + Heap::kStrongRootListLength) { + return true; + } + if (isolate->optimizing_compiler_thread()->IsOptimizerThread() && + !Heap::RelocationLock::IsLockedByOptimizerThread(isolate->heap())) { + return false; + } + switch (isolate->HandleDereferenceGuardState()) { + case HandleDereferenceGuard::ALLOW: + return true; + case HandleDereferenceGuard::DISALLOW: + return false; + case HandleDereferenceGuard::DISALLOW_DEFERRED: + // Accessing maps and internalized strings is safe. + if (heap_object->IsMap()) return true; + if (heap_object->IsInternalizedString()) return true; + return allow_deferred || !isolate->IsDeferredHandle(handle); + } + return false; +} +#endif + + HandleScope::HandleScope(Isolate* isolate) { v8::ImplementationUtilities::HandleScopeData* current = @@ -181,13 +210,13 @@ inline NoHandleAllocation::~NoHandleAllocation() { HandleDereferenceGuard::HandleDereferenceGuard(Isolate* isolate, State state) : isolate_(isolate) { - old_state_ = isolate_->AllowHandleDereference(); - isolate_->SetAllowHandleDereference(state == ALLOW); + old_state_ = isolate_->HandleDereferenceGuardState(); + isolate_->SetHandleDereferenceGuardState(state); } HandleDereferenceGuard::~HandleDereferenceGuard() { - isolate_->SetAllowHandleDereference(old_state_); + isolate_->SetHandleDereferenceGuardState(old_state_); } #endif diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc index 059ff2486d..5a5773ebd0 100644 --- a/deps/v8/src/handles.cc +++ b/deps/v8/src/handles.cc @@ -565,7 +565,7 @@ v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSReceiver> receiver, LOG(isolate, ApiObjectAccess("interceptor-named-enum", *object)); { // Leaving JavaScript. - VMState state(isolate, EXTERNAL); + VMState<EXTERNAL> state(isolate); result = enum_fun(info); } } @@ -590,7 +590,7 @@ v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSReceiver> receiver, LOG(isolate, ApiObjectAccess("interceptor-indexed-enum", *object)); { // Leaving JavaScript. - VMState state(isolate, EXTERNAL); + VMState<EXTERNAL> state(isolate); result = enum_fun(info); #if ENABLE_EXTRA_CHECKS CHECK(result.IsEmpty() || v8::Utils::OpenHandle(*result)->IsJSObject()); diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h index 8e9404cb9d..938d43b8a4 100644 --- a/deps/v8/src/handles.h +++ b/deps/v8/src/handles.h @@ -73,8 +73,8 @@ class Handle { INLINE(T** location() const); template <class S> static Handle<T> cast(Handle<S> that) { - T::cast(*that); - return Handle<T>(reinterpret_cast<T**>(that.location())); + T::cast(*reinterpret_cast<T**>(that.location_)); + return Handle<T>(reinterpret_cast<T**>(that.location_)); } static Handle<T> null() { return Handle<T>(); } @@ -84,6 +84,10 @@ class Handle { // implementation in api.h. inline Handle<T> EscapeFrom(v8::HandleScope* scope); +#ifdef DEBUG + bool IsDereferenceAllowed(bool allow_deferred) const; +#endif // DEBUG + private: T** location_; @@ -341,7 +345,7 @@ class NoHandleAllocation BASE_EMBEDDED { class HandleDereferenceGuard BASE_EMBEDDED { public: - enum State { ALLOW, DISALLOW }; + enum State { ALLOW, DISALLOW, DISALLOW_DEFERRED }; #ifndef DEBUG HandleDereferenceGuard(Isolate* isolate, State state) { } ~HandleDereferenceGuard() { } @@ -350,10 +354,18 @@ class HandleDereferenceGuard BASE_EMBEDDED { inline ~HandleDereferenceGuard(); private: Isolate* isolate_; - bool old_state_; + State old_state_; #endif }; +#ifdef DEBUG +#define ALLOW_HANDLE_DEREF(isolate, why_this_is_safe) \ + HandleDereferenceGuard allow_deref(isolate, \ + HandleDereferenceGuard::ALLOW); +#else +#define ALLOW_HANDLE_DEREF(isolate, why_this_is_safe) +#endif // DEBUG + } } // namespace v8::internal #endif // V8_HANDLES_H_ diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h index 43d4a999cd..f937426186 100644 --- a/deps/v8/src/heap-inl.h +++ b/deps/v8/src/heap-inl.h @@ -211,6 +211,7 @@ MaybeObject* Heap::CopyFixedDoubleArray(FixedDoubleArray* src) { MaybeObject* Heap::AllocateRaw(int size_in_bytes, AllocationSpace space, AllocationSpace retry_space) { + SLOW_ASSERT(!isolate_->optimizing_compiler_thread()->IsOptimizerThread()); ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC); ASSERT(space != NEW_SPACE || retry_space == OLD_POINTER_SPACE || @@ -577,56 +578,67 @@ Isolate* Heap::isolate() { // Warning: Do not use the identifiers __object__, __maybe_object__ or // __scope__ in a call to this macro. -#define CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY)\ - do { \ - GC_GREEDY_CHECK(); \ - MaybeObject* __maybe_object__ = FUNCTION_CALL; \ - Object* __object__ = NULL; \ - if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \ - if (__maybe_object__->IsOutOfMemory()) { \ - v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_0", true);\ - } \ - if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \ - ISOLATE->heap()->CollectGarbage(Failure::cast(__maybe_object__)-> \ - allocation_space(), \ - "allocation failure"); \ - __maybe_object__ = FUNCTION_CALL; \ - if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \ - if (__maybe_object__->IsOutOfMemory()) { \ - v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_1", true);\ - } \ - if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \ - ISOLATE->counters()->gc_last_resort_from_handles()->Increment(); \ - ISOLATE->heap()->CollectAllAvailableGarbage("last resort gc"); \ - { \ - AlwaysAllocateScope __scope__; \ - __maybe_object__ = FUNCTION_CALL; \ - } \ - if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \ - if (__maybe_object__->IsOutOfMemory() || \ - __maybe_object__->IsRetryAfterGC()) { \ - /* TODO(1181417): Fix this. */ \ - v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_2", true);\ - } \ - RETURN_EMPTY; \ +#define CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY, OOM)\ + do { \ + GC_GREEDY_CHECK(); \ + MaybeObject* __maybe_object__ = FUNCTION_CALL; \ + Object* __object__ = NULL; \ + if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \ + if (__maybe_object__->IsOutOfMemory()) { \ + OOM; \ + } \ + if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \ + ISOLATE->heap()->CollectGarbage(Failure::cast(__maybe_object__)-> \ + allocation_space(), \ + "allocation failure"); \ + __maybe_object__ = FUNCTION_CALL; \ + if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \ + if (__maybe_object__->IsOutOfMemory()) { \ + OOM; \ + } \ + if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \ + ISOLATE->counters()->gc_last_resort_from_handles()->Increment(); \ + ISOLATE->heap()->CollectAllAvailableGarbage("last resort gc"); \ + { \ + AlwaysAllocateScope __scope__; \ + __maybe_object__ = FUNCTION_CALL; \ + } \ + if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \ + if (__maybe_object__->IsOutOfMemory()) { \ + OOM; \ + } \ + if (__maybe_object__->IsRetryAfterGC()) { \ + /* TODO(1181417): Fix this. */ \ + v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_LAST", true); \ + } \ + RETURN_EMPTY; \ } while (false) +#define CALL_AND_RETRY_OR_DIE( \ + ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY) \ + CALL_AND_RETRY( \ + ISOLATE, \ + FUNCTION_CALL, \ + RETURN_VALUE, \ + RETURN_EMPTY, \ + v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY", true)) -#define CALL_HEAP_FUNCTION(ISOLATE, FUNCTION_CALL, TYPE) \ - CALL_AND_RETRY(ISOLATE, \ - FUNCTION_CALL, \ - return Handle<TYPE>(TYPE::cast(__object__), ISOLATE), \ - return Handle<TYPE>()) +#define CALL_HEAP_FUNCTION(ISOLATE, FUNCTION_CALL, TYPE) \ + CALL_AND_RETRY_OR_DIE(ISOLATE, \ + FUNCTION_CALL, \ + return Handle<TYPE>(TYPE::cast(__object__), ISOLATE), \ + return Handle<TYPE>()) \ -#define CALL_HEAP_FUNCTION_VOID(ISOLATE, FUNCTION_CALL) \ - CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, return, return) +#define CALL_HEAP_FUNCTION_VOID(ISOLATE, FUNCTION_CALL) \ + CALL_AND_RETRY_OR_DIE(ISOLATE, FUNCTION_CALL, return, return) #define CALL_HEAP_FUNCTION_PASS_EXCEPTION(ISOLATE, FUNCTION_CALL) \ CALL_AND_RETRY(ISOLATE, \ FUNCTION_CALL, \ return __object__, \ + return __maybe_object__, \ return __maybe_object__) diff --git a/deps/v8/src/heap-profiler.cc b/deps/v8/src/heap-profiler.cc index 5c1badf9c9..4f6fdb1122 100644 --- a/deps/v8/src/heap-profiler.cc +++ b/deps/v8/src/heap-profiler.cc @@ -140,5 +140,10 @@ void HeapProfiler::ObjectMoveEvent(Address from, Address to) { snapshots_->ObjectMoveEvent(from, to); } +void HeapProfiler::SetRetainedObjectInfo(UniqueId id, + RetainedObjectInfo* info) { + // TODO(yurus, marja): Don't route this information through GlobalHandles. + heap()->isolate()->global_handles()->SetRetainedObjectInfo(id, info); +} } } // namespace v8::internal diff --git a/deps/v8/src/heap-profiler.h b/deps/v8/src/heap-profiler.h index 3f3138db52..1ed73b9ca6 100644 --- a/deps/v8/src/heap-profiler.h +++ b/deps/v8/src/heap-profiler.h @@ -80,6 +80,8 @@ class HeapProfiler { return snapshots_->is_tracking_objects(); } + void SetRetainedObjectInfo(UniqueId id, RetainedObjectInfo* info); + private: Heap* heap() const { return snapshots_->heap(); } diff --git a/deps/v8/src/heap-snapshot-generator.cc b/deps/v8/src/heap-snapshot-generator.cc index d22239f381..3d890f720e 100644 --- a/deps/v8/src/heap-snapshot-generator.cc +++ b/deps/v8/src/heap-snapshot-generator.cc @@ -190,7 +190,6 @@ template <> struct SnapshotSizeConstants<4> { static const int kExpectedHeapEntrySize = 24; static const int kExpectedHeapSnapshotsCollectionSize = 100; static const int kExpectedHeapSnapshotSize = 132; - static const size_t kMaxSerializableSnapshotRawSize = 256 * MB; }; template <> struct SnapshotSizeConstants<8> { @@ -198,8 +197,6 @@ template <> struct SnapshotSizeConstants<8> { static const int kExpectedHeapEntrySize = 32; static const int kExpectedHeapSnapshotsCollectionSize = 152; static const int kExpectedHeapSnapshotSize = 160; - static const uint64_t kMaxSerializableSnapshotRawSize = - static_cast<uint64_t>(6000) * MB; }; } // namespace @@ -1939,18 +1936,19 @@ void NativeObjectsExplorer::FillRetainedObjects() { Isolate* isolate = Isolate::Current(); const GCType major_gc_type = kGCTypeMarkSweepCompact; // Record objects that are joined into ObjectGroups. - isolate->heap()->CallGCPrologueCallbacks(major_gc_type); + isolate->heap()->CallGCPrologueCallbacks( + major_gc_type, kGCCallbackFlagConstructRetainedObjectInfos); List<ObjectGroup*>* groups = isolate->global_handles()->object_groups(); for (int i = 0; i < groups->length(); ++i) { ObjectGroup* group = groups->at(i); - if (group->info_ == NULL) continue; - List<HeapObject*>* list = GetListMaybeDisposeInfo(group->info_); - for (size_t j = 0; j < group->length_; ++j) { - HeapObject* obj = HeapObject::cast(*group->objects_[j]); + if (group->info == NULL) continue; + List<HeapObject*>* list = GetListMaybeDisposeInfo(group->info); + for (size_t j = 0; j < group->length; ++j) { + HeapObject* obj = HeapObject::cast(*group->objects[j]); list->Add(obj); in_groups_.Insert(obj); } - group->info_ = NULL; // Acquire info object ownership. + group->info = NULL; // Acquire info object ownership. } isolate->global_handles()->RemoveObjectGroups(); isolate->heap()->CallGCEpilogueCallbacks(major_gc_type); @@ -1966,12 +1964,12 @@ void NativeObjectsExplorer::FillImplicitReferences() { isolate->global_handles()->implicit_ref_groups(); for (int i = 0; i < groups->length(); ++i) { ImplicitRefGroup* group = groups->at(i); - HeapObject* parent = *group->parent_; + HeapObject* parent = *group->parent; int parent_entry = filler_->FindOrAddEntry(parent, native_entries_allocator_)->index(); ASSERT(parent_entry != HeapEntry::kNoEntry); - Object*** children = group->children_; - for (size_t j = 0; j < group->length_; ++j) { + Object*** children = group->children; + for (size_t j = 0; j < group->length; ++j) { Object* child = *children[j]; HeapEntry* child_entry = filler_->FindOrAddEntry(child, native_entries_allocator_); @@ -2384,42 +2382,9 @@ const int HeapSnapshotJSONSerializer::kNodeFieldsCount = 5; void HeapSnapshotJSONSerializer::Serialize(v8::OutputStream* stream) { ASSERT(writer_ == NULL); writer_ = new OutputStreamWriter(stream); - - HeapSnapshot* original_snapshot = NULL; - if (snapshot_->RawSnapshotSize() >= - SnapshotSizeConstants<kPointerSize>::kMaxSerializableSnapshotRawSize) { - // The snapshot is too big. Serialize a fake snapshot. - original_snapshot = snapshot_; - snapshot_ = CreateFakeSnapshot(); - } - SerializeImpl(); - delete writer_; writer_ = NULL; - - if (original_snapshot != NULL) { - delete snapshot_; - snapshot_ = original_snapshot; - } -} - - -HeapSnapshot* HeapSnapshotJSONSerializer::CreateFakeSnapshot() { - HeapSnapshot* result = new HeapSnapshot(snapshot_->collection(), - snapshot_->title(), - snapshot_->uid()); - result->AddRootEntry(); - const char* text = snapshot_->collection()->names()->GetFormatted( - "The snapshot is too big. " - "Maximum snapshot size is %" V8_PTR_PREFIX "u MB. " - "Actual snapshot size is %" V8_PTR_PREFIX "u MB.", - SnapshotSizeConstants<kPointerSize>::kMaxSerializableSnapshotRawSize / MB, - (snapshot_->RawSnapshotSize() + MB - 1) / MB); - HeapEntry* message = result->AddEntry(HeapEntry::kString, text, 0, 4); - result->root()->SetIndexedReference(HeapGraphEdge::kElement, 1, message); - result->FillChildren(); - return result; } diff --git a/deps/v8/src/heap-snapshot-generator.h b/deps/v8/src/heap-snapshot-generator.h index a4fcef72b9..223b240b8f 100644 --- a/deps/v8/src/heap-snapshot-generator.h +++ b/deps/v8/src/heap-snapshot-generator.h @@ -655,7 +655,6 @@ class HeapSnapshotJSONSerializer { v8::internal::kZeroHashSeed); } - HeapSnapshot* CreateFakeSnapshot(); int GetStringId(const char* s); int entry_index(HeapEntry* e) { return e->index() * kNodeFieldsCount; } void SerializeEdge(HeapGraphEdge* edge, bool first_edge); diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc index 0c83c3aeb0..fb2f9d9e45 100644 --- a/deps/v8/src/heap.cc +++ b/deps/v8/src/heap.cc @@ -157,12 +157,14 @@ Heap::Heap() ms_count_at_last_idle_notification_(0), gc_count_at_last_idle_gc_(0), scavenges_since_last_idle_round_(kIdleScavengeThreshold), + gcs_since_last_deopt_(0), #ifdef VERIFY_HEAP no_weak_embedded_maps_verification_scope_depth_(0), #endif promotion_queue_(this), configured_(false), - chunks_queued_for_free_(NULL) { + chunks_queued_for_free_(NULL), + relocation_mutex_(NULL) { // Allow build-time customization of the max semispace size. Building // V8 with snapshots and a non-default max semispace size is much // easier if you can define it as part of the build environment. @@ -487,6 +489,12 @@ void Heap::GarbageCollectionEpilogue() { if (FLAG_gc_verbose) Print(); if (FLAG_code_stats) ReportCodeStatistics("After GC"); #endif + if (FLAG_deopt_every_n_garbage_collections > 0) { + if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) { + Deoptimizer::DeoptimizeAll(isolate()); + gcs_since_last_deopt_ = 0; + } + } isolate_->counters()->alive_after_last_gc()->Set( static_cast<int>(SizeOfObjects())); @@ -599,7 +607,7 @@ bool Heap::CollectGarbage(AllocationSpace space, const char* gc_reason, const char* collector_reason) { // The VM is in the GC state until exiting this function. - VMState state(isolate_, GC); + VMState<GC> state(isolate_); #ifdef DEBUG // Reset the allocation timeout to the GC interval, but make sure to @@ -885,8 +893,8 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector, { GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL); - VMState state(isolate_, EXTERNAL); - CallGCPrologueCallbacks(gc_type); + VMState<EXTERNAL> state(isolate_); + CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags); } EnsureFromSpaceIsCommitted(); @@ -1007,7 +1015,7 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector, { GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL); - VMState state(isolate_, EXTERNAL); + VMState<EXTERNAL> state(isolate_); CallGCEpilogueCallbacks(gc_type); } @@ -1021,13 +1029,13 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector, } -void Heap::CallGCPrologueCallbacks(GCType gc_type) { +void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) { if (gc_type == kGCTypeMarkSweepCompact && global_gc_prologue_callback_) { global_gc_prologue_callback_(); } for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) { if (gc_type & gc_prologue_callbacks_[i].gc_type) { - gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags); + gc_prologue_callbacks_[i].callback(gc_type, flags); } } } @@ -1293,6 +1301,8 @@ class ScavengeWeakObjectRetainer : public WeakObjectRetainer { void Heap::Scavenge() { + RelocationLock relocation_lock(this); + #ifdef VERIFY_HEAP if (FLAG_verify_heap) VerifyNonPointerSpacePointers(); #endif @@ -2745,7 +2755,7 @@ bool Heap::CreateInitialObjects() { if (!maybe_obj->ToObject(&obj)) return false; } set_minus_zero_value(HeapNumber::cast(obj)); - ASSERT(signbit(minus_zero_value()->Number()) != 0); + ASSERT(std::signbit(minus_zero_value()->Number()) != 0); { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED); if (!maybe_obj->ToObject(&obj)) return false; @@ -3414,14 +3424,14 @@ MaybeObject* Heap::AllocateConsString(String* first, String* second) { return Failure::OutOfMemoryException(0x4); } - bool is_ascii_data_in_two_byte_string = false; + bool is_one_byte_data_in_two_byte_string = false; if (!is_one_byte) { // At least one of the strings uses two-byte representation so we // can't use the fast case code for short ASCII strings below, but // we can try to save memory if all chars actually fit in ASCII. - is_ascii_data_in_two_byte_string = - first->HasOnlyAsciiChars() && second->HasOnlyAsciiChars(); - if (is_ascii_data_in_two_byte_string) { + is_one_byte_data_in_two_byte_string = + first->HasOnlyOneByteChars() && second->HasOnlyOneByteChars(); + if (is_one_byte_data_in_two_byte_string) { isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment(); } } @@ -3456,7 +3466,7 @@ MaybeObject* Heap::AllocateConsString(String* first, String* second) { for (int i = 0; i < second_length; i++) *dest++ = src[i]; return result; } else { - if (is_ascii_data_in_two_byte_string) { + if (is_one_byte_data_in_two_byte_string) { Object* result; { MaybeObject* maybe_result = AllocateRawOneByteString(length); if (!maybe_result->ToObject(&result)) return maybe_result; @@ -3481,7 +3491,7 @@ MaybeObject* Heap::AllocateConsString(String* first, String* second) { } } - Map* map = (is_one_byte || is_ascii_data_in_two_byte_string) ? + Map* map = (is_one_byte || is_one_byte_data_in_two_byte_string) ? cons_ascii_string_map() : cons_string_map(); Object* result; @@ -3627,11 +3637,11 @@ MaybeObject* Heap::AllocateExternalStringFromTwoByte( // For small strings we check whether the resource contains only // one byte characters. If yes, we use a different string map. - static const size_t kAsciiCheckLengthLimit = 32; - bool is_one_byte = length <= kAsciiCheckLengthLimit && + static const size_t kOneByteCheckLengthLimit = 32; + bool is_one_byte = length <= kOneByteCheckLengthLimit && String::IsOneByte(resource->data(), static_cast<int>(length)); Map* map = is_one_byte ? - external_string_with_ascii_data_map() : external_string_map(); + external_string_with_one_byte_data_map() : external_string_map(); Object* result; { MaybeObject* maybe_result = Allocate(map, NEW_SPACE); if (!maybe_result->ToObject(&result)) return maybe_result; @@ -4967,14 +4977,14 @@ Map* Heap::InternalizedStringMapForString(String* string) { case EXTERNAL_STRING_TYPE: return external_internalized_string_map(); case EXTERNAL_ASCII_STRING_TYPE: return external_ascii_internalized_string_map(); - case EXTERNAL_STRING_WITH_ASCII_DATA_TYPE: - return external_internalized_string_with_ascii_data_map(); + case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE: + return external_internalized_string_with_one_byte_data_map(); case SHORT_EXTERNAL_STRING_TYPE: return short_external_internalized_string_map(); case SHORT_EXTERNAL_ASCII_STRING_TYPE: return short_external_ascii_internalized_string_map(); - case SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE: - return short_external_internalized_string_with_ascii_data_map(); + case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE: + return short_external_internalized_string_with_one_byte_data_map(); default: return NULL; // No match found. } } @@ -6628,6 +6638,11 @@ bool Heap::SetUp() { store_buffer()->SetUp(); + if (FLAG_parallel_recompilation) relocation_mutex_ = OS::CreateMutex(); +#ifdef DEBUG + relocation_mutex_locked_by_optimizer_thread_ = false; +#endif // DEBUG + return true; } @@ -6730,6 +6745,8 @@ void Heap::TearDown() { incremental_marking()->TearDown(); isolate_->memory_allocator()->TearDown(); + + delete relocation_mutex_; } @@ -7689,7 +7706,8 @@ void ErrorObjectList::DeferredFormatStackTrace(Isolate* isolate) { if (!getter_obj->IsJSFunction()) continue; getter_fun = JSFunction::cast(getter_obj); String* key = isolate->heap()->hidden_stack_trace_string(); - if (key != getter_fun->GetHiddenProperty(key)) continue; + Object* value = getter_fun->GetHiddenProperty(key); + if (key != value) continue; } budget--; @@ -7859,4 +7877,15 @@ void Heap::CheckpointObjectStats() { ClearObjectStats(); } + +Heap::RelocationLock::RelocationLock(Heap* heap) : heap_(heap) { + if (FLAG_parallel_recompilation) { + heap_->relocation_mutex_->Lock(); +#ifdef DEBUG + heap_->relocation_mutex_locked_by_optimizer_thread_ = + heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread(); +#endif // DEBUG + } +} + } } // namespace v8::internal diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h index 66897ea862..7722079e55 100644 --- a/deps/v8/src/heap.h +++ b/deps/v8/src/heap.h @@ -28,7 +28,7 @@ #ifndef V8_HEAP_H_ #define V8_HEAP_H_ -#include <math.h> +#include <cmath> #include "allocation.h" #include "globals.h" @@ -95,12 +95,14 @@ namespace internal { V(Map, sliced_string_map, SlicedStringMap) \ V(Map, sliced_ascii_string_map, SlicedAsciiStringMap) \ V(Map, external_string_map, ExternalStringMap) \ - V(Map, external_string_with_ascii_data_map, ExternalStringWithAsciiDataMap) \ + V(Map, \ + external_string_with_one_byte_data_map, \ + ExternalStringWithOneByteDataMap) \ V(Map, external_ascii_string_map, ExternalAsciiStringMap) \ V(Map, short_external_string_map, ShortExternalStringMap) \ V(Map, \ - short_external_string_with_ascii_data_map, \ - ShortExternalStringWithAsciiDataMap) \ + short_external_string_with_one_byte_data_map, \ + ShortExternalStringWithOneByteDataMap) \ V(Map, internalized_string_map, InternalizedStringMap) \ V(Map, ascii_internalized_string_map, AsciiInternalizedStringMap) \ V(Map, cons_internalized_string_map, ConsInternalizedStringMap) \ @@ -109,8 +111,8 @@ namespace internal { external_internalized_string_map, \ ExternalInternalizedStringMap) \ V(Map, \ - external_internalized_string_with_ascii_data_map, \ - ExternalInternalizedStringWithAsciiDataMap) \ + external_internalized_string_with_one_byte_data_map, \ + ExternalInternalizedStringWithOneByteDataMap) \ V(Map, \ external_ascii_internalized_string_map, \ ExternalAsciiInternalizedStringMap) \ @@ -118,8 +120,8 @@ namespace internal { short_external_internalized_string_map, \ ShortExternalInternalizedStringMap) \ V(Map, \ - short_external_internalized_string_with_ascii_data_map, \ - ShortExternalInternalizedStringWithAsciiDataMap) \ + short_external_internalized_string_with_one_byte_data_map, \ + ShortExternalInternalizedStringWithOneByteDataMap) \ V(Map, \ short_external_ascii_internalized_string_map, \ ShortExternalAsciiInternalizedStringMap) \ @@ -240,6 +242,8 @@ namespace internal { V(elements_field_string, "%elements") \ V(length_field_string, "%length") \ V(function_class_string, "Function") \ + V(properties_field_symbol, "%properties") \ + V(payload_field_symbol, "%payload") \ V(illegal_argument_string, "illegal argument") \ V(MakeReferenceError_string, "MakeReferenceError") \ V(MakeSyntaxError_string, "MakeSyntaxError") \ @@ -693,6 +697,12 @@ class Heap { // Please note this does not perform a garbage collection. MUST_USE_RESULT MaybeObject* AllocateFunctionPrototype(JSFunction* function); + // Allocates a JS ArrayBuffer object. + // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation + // failed. + // Please note this does not perform a garbage collection. + MUST_USE_RESULT MaybeObject* AllocateJSArrayBuffer(); + // Allocates a Harmony proxy or function proxy. // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. @@ -1543,7 +1553,8 @@ class Heap { 8 * (Page::kPageSize > MB ? Page::kPageSize : MB); intptr_t OldGenPromotionLimit(intptr_t old_gen_size) { - const int divisor = FLAG_stress_compaction ? 10 : 3; + const int divisor = FLAG_stress_compaction ? 10 : + new_space_high_promotion_mode_active_ ? 1 : 3; intptr_t limit = Max(old_gen_size + old_gen_size / divisor, kMinimumPromotionLimit); limit += new_space_.Capacity(); @@ -1553,7 +1564,8 @@ class Heap { } intptr_t OldGenAllocationLimit(intptr_t old_gen_size) { - const int divisor = FLAG_stress_compaction ? 8 : 2; + const int divisor = FLAG_stress_compaction ? 8 : + new_space_high_promotion_mode_active_ ? 1 : 2; intptr_t limit = Max(old_gen_size + old_gen_size / divisor, kMinimumAllocationLimit); limit += new_space_.Capacity(); @@ -1753,7 +1765,7 @@ class Heap { inline Isolate* isolate(); - void CallGCPrologueCallbacks(GCType gc_type); + void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags); void CallGCEpilogueCallbacks(GCType gc_type); inline bool OldGenerationAllocationLimitReached(); @@ -1848,6 +1860,31 @@ class Heap { void CheckpointObjectStats(); + // We don't use a ScopedLock here since we want to lock the heap + // only when FLAG_parallel_recompilation is true. + class RelocationLock { + public: + explicit RelocationLock(Heap* heap); + + ~RelocationLock() { + if (FLAG_parallel_recompilation) { +#ifdef DEBUG + heap_->relocation_mutex_locked_by_optimizer_thread_ = false; +#endif // DEBUG + heap_->relocation_mutex_->Unlock(); + } + } + +#ifdef DEBUG + static bool IsLockedByOptimizerThread(Heap* heap) { + return heap->relocation_mutex_locked_by_optimizer_thread_; + } +#endif // DEBUG + + private: + Heap* heap_; + }; + private: Heap(); @@ -2295,6 +2332,11 @@ class Heap { unsigned int gc_count_at_last_idle_gc_; int scavenges_since_last_idle_round_; + // If the --deopt_every_n_garbage_collections flag is set to a positive value, + // this variable holds the number of garbage collections since the last + // deoptimization triggered by garbage collection. + int gcs_since_last_deopt_; + #ifdef VERIFY_HEAP int no_weak_embedded_maps_verification_scope_depth_; #endif @@ -2317,6 +2359,11 @@ class Heap { MemoryChunk* chunks_queued_for_free_; + Mutex* relocation_mutex_; +#ifdef DEBUG + bool relocation_mutex_locked_by_optimizer_thread_; +#endif // DEBUG; + friend class Factory; friend class GCTracer; friend class DisallowAllocationFailure; diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc index e7060beee6..5f0cd9d317 100644 --- a/deps/v8/src/hydrogen-instructions.cc +++ b/deps/v8/src/hydrogen-instructions.cc @@ -1310,20 +1310,18 @@ const char* HUnaryMathOperation::OpName() const { switch (op()) { case kMathFloor: return "floor"; case kMathRound: return "round"; - case kMathCeil: return "ceil"; case kMathAbs: return "abs"; case kMathLog: return "log"; case kMathSin: return "sin"; case kMathCos: return "cos"; case kMathTan: return "tan"; - case kMathASin: return "asin"; - case kMathACos: return "acos"; - case kMathATan: return "atan"; case kMathExp: return "exp"; case kMathSqrt: return "sqrt"; - default: break; + case kMathPowHalf: return "pow-half"; + default: + UNREACHABLE(); + return NULL; } - return "(unknown operation)"; } @@ -1453,7 +1451,7 @@ HValue* HSub::Canonicalize() { HValue* HMul::Canonicalize() { if (IsIdentityOperation(left(), right(), 1)) return left(); if (IsIdentityOperation(right(), left(), 1)) return right(); - return HArithmeticBinaryOperation::Canonicalize(); + return this; } @@ -1683,9 +1681,15 @@ void HInstanceOf::PrintDataTo(StringStream* stream) { Range* HValue::InferRange(Zone* zone) { - // Untagged integer32 cannot be -0, all other representations can. - Range* result = new(zone) Range(); - result->set_can_be_minus_zero(!representation().IsInteger32()); + Range* result; + if (type().IsSmi()) { + result = new(zone) Range(Smi::kMinValue, Smi::kMaxValue); + result->set_can_be_minus_zero(false); + } else { + // Untagged integer32 cannot be -0, all other representations can. + result = new(zone) Range(); + result->set_can_be_minus_zero(!representation().IsInteger32()); + } return result; } @@ -2139,7 +2143,7 @@ HConstant::HConstant(double double_value, has_int32_value_(IsInteger32(double_value)), has_double_value_(true), is_internalized_string_(false), - boolean_value_(double_value != 0 && !isnan(double_value)), + boolean_value_(double_value != 0 && !std::isnan(double_value)), int32_value_(DoubleToInt32(double_value)), double_value_(double_value) { Initialize(r); @@ -2194,13 +2198,6 @@ void HConstant::PrintDataTo(StringStream* stream) { } -bool HArrayLiteral::IsCopyOnWrite() const { - if (!boilerplate_object_->IsJSObject()) return false; - return Handle<JSObject>::cast(boilerplate_object_)->elements()->map() == - HEAP->fixed_cow_array_map(); -} - - void HBinaryOperation::PrintDataTo(StringStream* stream) { left()->PrintNameTo(stream); stream->Add(" "); @@ -2222,13 +2219,24 @@ void HBinaryOperation::InferRepresentation(HInferRepresentation* h_infer) { } +bool HBinaryOperation::IgnoreObservedOutputRepresentation( + Representation current_rep) { + return observed_output_representation_.IsDouble() && + current_rep.IsInteger32() && + // Mul in Integer32 mode would be too precise. + !this->IsMul() && + // TODO(jkummerow): Remove blacklisting of Div when the Div + // instruction has learned not to deopt when the remainder is + // non-zero but all uses are truncating. + !this->IsDiv() && + CheckUsesForFlag(kTruncatingToInt32); +} + + Representation HBinaryOperation::RepresentationFromInputs() { // Determine the worst case of observed input representations and // the currently assumed output representation. Representation rep = representation(); - if (observed_output_representation_.is_more_general_than(rep)) { - rep = observed_output_representation_; - } for (int i = 1; i <= 2; ++i) { Representation input_rep = observed_input_representation(i); if (input_rep.is_more_general_than(rep)) rep = input_rep; @@ -2238,20 +2246,26 @@ Representation HBinaryOperation::RepresentationFromInputs() { Representation left_rep = left()->representation(); Representation right_rep = right()->representation(); - if (left_rep.is_more_general_than(rep) && - left()->CheckFlag(kFlexibleRepresentation)) { + if (left_rep.is_more_general_than(rep) && !left_rep.IsTagged()) { rep = left_rep; } - if (right_rep.is_more_general_than(rep) && - right()->CheckFlag(kFlexibleRepresentation)) { + if (right_rep.is_more_general_than(rep) && !right_rep.IsTagged()) { rep = right_rep; } + // Consider observed output representation, but ignore it if it's Double, + // this instruction is not a division, and all its uses are truncating + // to Integer32. + if (observed_output_representation_.is_more_general_than(rep) && + !IgnoreObservedOutputRepresentation(rep)) { + rep = observed_output_representation_; + } return rep; } void HBinaryOperation::AssumeRepresentation(Representation r) { - set_observed_input_representation(r, r); + set_observed_input_representation(1, r); + set_observed_input_representation(2, r); HValue::AssumeRepresentation(r); } @@ -3176,7 +3190,7 @@ HInstruction* HStringCharFromCode::New( HConstant* c_code = HConstant::cast(char_code); Isolate* isolate = Isolate::Current(); if (c_code->HasNumberValue()) { - if (isfinite(c_code->DoubleValue())) { + if (std::isfinite(c_code->DoubleValue())) { uint32_t code = c_code->NumberValueAsInteger32() & 0xffff; return new(zone) HConstant(LookupSingleCharacterStringFromCode(isolate, code), @@ -3209,10 +3223,10 @@ HInstruction* HUnaryMathOperation::New( HConstant* constant = HConstant::cast(value); if (!constant->HasNumberValue()) break; double d = constant->DoubleValue(); - if (isnan(d)) { // NaN poisons everything. + if (std::isnan(d)) { // NaN poisons everything. return H_CONSTANT_DOUBLE(OS::nan_value()); } - if (isinf(d)) { // +Infinity and -Infinity. + if (std::isinf(d)) { // +Infinity and -Infinity. switch (op) { case kMathSin: case kMathCos: @@ -3276,7 +3290,7 @@ HInstruction* HPower::New(Zone* zone, HValue* left, HValue* right) { if (c_left->HasNumberValue() && c_right->HasNumberValue()) { double result = power_helper(c_left->DoubleValue(), c_right->DoubleValue()); - return H_CONSTANT_DOUBLE(isnan(result) ? OS::nan_value() : result); + return H_CONSTANT_DOUBLE(std::isnan(result) ? OS::nan_value() : result); } } return new(zone) HPower(left, right); @@ -3449,6 +3463,42 @@ void HBitwise::PrintDataTo(StringStream* stream) { } +void HPhi::SimplifyConstantInputs() { + // Convert constant inputs to integers when all uses are truncating. + // This must happen before representation inference takes place. + if (!CheckUsesForFlag(kTruncatingToInt32)) return; + for (int i = 0; i < OperandCount(); ++i) { + if (!OperandAt(i)->IsConstant()) return; + } + HGraph* graph = block()->graph(); + for (int i = 0; i < OperandCount(); ++i) { + HConstant* operand = HConstant::cast(OperandAt(i)); + if (operand->HasInteger32Value()) { + continue; + } else if (operand->HasDoubleValue()) { + HConstant* integer_input = + new(graph->zone()) HConstant(DoubleToInt32(operand->DoubleValue()), + Representation::Integer32()); + integer_input->InsertAfter(operand); + SetOperandAt(i, integer_input); + } else if (operand == graph->GetConstantTrue()) { + SetOperandAt(i, graph->GetConstant1()); + } else { + // This catches |false|, |undefined|, strings and objects. + SetOperandAt(i, graph->GetConstant0()); + } + } + // Overwrite observed input representations because they are likely Tagged. + for (HUseIterator it(uses()); !it.Done(); it.Advance()) { + HValue* use = it.value(); + if (use->IsBinaryOperation()) { + HBinaryOperation::cast(use)->set_observed_input_representation( + it.index(), Representation::Integer32()); + } + } +} + + void HPhi::InferRepresentation(HInferRepresentation* h_infer) { ASSERT(CheckFlag(kFlexibleRepresentation)); // If there are non-Phi uses, and all of them have observed the same diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h index cfbcc135df..aa89f7146f 100644 --- a/deps/v8/src/hydrogen-instructions.h +++ b/deps/v8/src/hydrogen-instructions.h @@ -1757,11 +1757,15 @@ class HChange: public HUnaryOperation { ASSERT(!value->representation().IsNone() && !to.IsNone()); ASSERT(!value->representation().Equals(to)); set_representation(to); - set_type(HType::TaggedNumber()); SetFlag(kUseGVN); if (deoptimize_on_undefined) SetFlag(kDeoptimizeOnUndefined); if (is_truncating) SetFlag(kTruncatingToInt32); - if (to.IsTagged()) SetGVNFlag(kChangesNewSpacePromotion); + if (value->type().IsSmi()) { + set_type(HType::Smi()); + } else { + set_type(HType::TaggedNumber()); + if (to.IsTagged()) SetGVNFlag(kChangesNewSpacePromotion); + } } virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited); @@ -2226,6 +2230,8 @@ class HInvokeFunction: public HBinaryCall { int argument_count) : HBinaryCall(context, function, argument_count), known_function_(known_function) { + formal_parameter_count_ = known_function.is_null() + ? 0 : known_function->shared()->formal_parameter_count(); } virtual Representation RequiredInputRepresentation(int index) { @@ -2235,20 +2241,25 @@ class HInvokeFunction: public HBinaryCall { HValue* context() { return first(); } HValue* function() { return second(); } Handle<JSFunction> known_function() { return known_function_; } + int formal_parameter_count() const { return formal_parameter_count_; } DECLARE_CONCRETE_INSTRUCTION(InvokeFunction) private: Handle<JSFunction> known_function_; + int formal_parameter_count_; }; class HCallConstantFunction: public HCall<0> { public: HCallConstantFunction(Handle<JSFunction> function, int argument_count) - : HCall<0>(argument_count), function_(function) { } + : HCall<0>(argument_count), + function_(function), + formal_parameter_count_(function->shared()->formal_parameter_count()) {} Handle<JSFunction> function() const { return function_; } + int formal_parameter_count() const { return formal_parameter_count_; } bool IsApplyFunction() const { return function_->code() == @@ -2265,6 +2276,7 @@ class HCallConstantFunction: public HCall<0> { private: Handle<JSFunction> function_; + int formal_parameter_count_; }; @@ -2349,11 +2361,14 @@ class HCallGlobal: public HUnaryCall { class HCallKnownGlobal: public HCall<0> { public: HCallKnownGlobal(Handle<JSFunction> target, int argument_count) - : HCall<0>(argument_count), target_(target) { } + : HCall<0>(argument_count), + target_(target), + formal_parameter_count_(target->shared()->formal_parameter_count()) { } virtual void PrintDataTo(StringStream* stream); Handle<JSFunction> target() const { return target_; } + int formal_parameter_count() const { return formal_parameter_count_; } virtual Representation RequiredInputRepresentation(int index) { return Representation::None(); @@ -2363,6 +2378,7 @@ class HCallKnownGlobal: public HCall<0> { private: Handle<JSFunction> target_; + int formal_parameter_count_; }; @@ -2587,24 +2603,26 @@ class HUnaryMathOperation: public HTemplateInstruction<2> { switch (op) { case kMathFloor: case kMathRound: - case kMathCeil: set_representation(Representation::Integer32()); break; case kMathAbs: // Not setting representation here: it is None intentionally. SetFlag(kFlexibleRepresentation); + // TODO(svenpanne) This flag is actually only needed if representation() + // is tagged, and not when it is an unboxed double or unboxed integer. SetGVNFlag(kChangesNewSpacePromotion); break; - case kMathSqrt: - case kMathPowHalf: case kMathLog: case kMathSin: case kMathCos: case kMathTan: set_representation(Representation::Double()); + // These operations use the TranscendentalCache, so they may allocate. SetGVNFlag(kChangesNewSpacePromotion); break; case kMathExp: + case kMathSqrt: + case kMathPowHalf: set_representation(Representation::Double()); break; default: @@ -2680,39 +2698,27 @@ class HLoadExternalArrayPointer: public HUnaryOperation { class HCheckMaps: public HTemplateInstruction<2> { public: - HCheckMaps(HValue* value, Handle<Map> map, Zone* zone, - HValue* typecheck = NULL) - : map_unique_ids_(0, zone) { - SetOperandAt(0, value); - // If callers don't depend on a typecheck, they can pass in NULL. In that - // case we use a copy of the |value| argument as a dummy value. - SetOperandAt(1, typecheck != NULL ? typecheck : value); - set_representation(Representation::Tagged()); - SetFlag(kUseGVN); - SetFlag(kTrackSideEffectDominators); - SetGVNFlag(kDependsOnMaps); - SetGVNFlag(kDependsOnElementsKind); - map_set()->Add(map, zone); + static HCheckMaps* New(HValue* value, Handle<Map> map, Zone* zone, + HValue *typecheck = NULL) { + HCheckMaps* check_map = new(zone) HCheckMaps(value, zone, typecheck); + check_map->map_set_.Add(map, zone); + return check_map; } - HCheckMaps(HValue* value, SmallMapList* maps, Zone* zone) - : map_unique_ids_(0, zone) { - SetOperandAt(0, value); - SetOperandAt(1, value); - set_representation(Representation::Tagged()); - SetFlag(kUseGVN); - SetFlag(kTrackSideEffectDominators); - SetGVNFlag(kDependsOnMaps); - SetGVNFlag(kDependsOnElementsKind); + + static HCheckMaps* New(HValue* value, SmallMapList* maps, Zone* zone, + HValue *typecheck = NULL) { + HCheckMaps* check_map = new(zone) HCheckMaps(value, zone, typecheck); for (int i = 0; i < maps->length(); i++) { - map_set()->Add(maps->at(i), zone); + check_map->map_set_.Add(maps->at(i), zone); } - map_set()->Sort(); + check_map->map_set_.Sort(); + return check_map; } - static HCheckMaps* NewWithTransitions(HValue* object, Handle<Map> map, + static HCheckMaps* NewWithTransitions(HValue* value, Handle<Map> map, Zone* zone) { - HCheckMaps* check_map = new(zone) HCheckMaps(object, map, zone); - SmallMapList* map_set = check_map->map_set(); + HCheckMaps* check_map = new(zone) HCheckMaps(value, zone, value); + check_map->map_set_.Add(map, zone); // Since transitioned elements maps of the initial map don't fail the map // check, the CheckMaps instruction doesn't need to depend on ElementsKinds. @@ -2725,10 +2731,10 @@ class HCheckMaps: public HTemplateInstruction<2> { Map* transitioned_map = map->LookupElementsTransitionMap(kind); if (transitioned_map) { - map_set->Add(Handle<Map>(transitioned_map), zone); + check_map->map_set_.Add(Handle<Map>(transitioned_map), zone); } }; - map_set->Sort(); + check_map->map_set_.Sort(); return check_map; } @@ -2763,6 +2769,20 @@ class HCheckMaps: public HTemplateInstruction<2> { } private: + // Clients should use one of the static New* methods above. + HCheckMaps(HValue* value, Zone *zone, HValue* typecheck) + : map_unique_ids_(0, zone) { + SetOperandAt(0, value); + // Use the object value for the dependency if NULL is passed. + // TODO(titzer): do GVN flags already express this dependency? + SetOperandAt(1, typecheck != NULL ? typecheck : value); + set_representation(Representation::Tagged()); + SetFlag(kUseGVN); + SetFlag(kTrackSideEffectDominators); + SetGVNFlag(kDependsOnMaps); + SetGVNFlag(kDependsOnElementsKind); + } + SmallMapList map_set_; ZoneList<UniqueValueId> map_unique_ids_; }; @@ -3123,6 +3143,8 @@ class HPhi: public HValue { return true; } + void SimplifyConstantInputs(); + protected: virtual void DeleteFromGraph(); virtual void InternalSetOperandAt(int index, HValue* value) { @@ -3231,6 +3253,7 @@ class HConstant: public HTemplateInstruction<0> { if (handle_.is_null()) { handle_ = FACTORY->NewNumber(double_value_, TENURED); } + ALLOW_HANDLE_DEREF(Isolate::Current(), "smi check"); ASSERT(has_int32_value_ || !handle_->IsSmi()); return handle_; } @@ -3239,7 +3262,7 @@ class HConstant: public HTemplateInstruction<0> { return has_double_value_ && (BitCast<int64_t>(double_value_) == BitCast<int64_t>(-0.0) || FixedDoubleArray::is_the_hole_nan(double_value_) || - isnan(double_value_)); + std::isnan(double_value_)); } bool ImmortalImmovable() const { @@ -3254,8 +3277,6 @@ class HConstant: public HTemplateInstruction<0> { } ASSERT(!handle_.is_null()); - HandleDereferenceGuard allow_dereference_for_immovable_check( - isolate(), HandleDereferenceGuard::ALLOW); Heap* heap = isolate()->heap(); ASSERT(unique_id_ != UniqueValueId(heap->minus_zero_value())); ASSERT(unique_id_ != UniqueValueId(heap->nan_value())); @@ -3275,9 +3296,7 @@ class HConstant: public HTemplateInstruction<0> { return has_int32_value_; } - virtual bool EmitAtUses() { - return !representation().IsDouble() || IsSpecialDouble(); - } + virtual bool EmitAtUses() { return !representation().IsDouble(); } virtual void PrintDataTo(StringStream* stream); virtual HType CalculateInferredType(); bool IsInteger() { return handle()->IsSmi(); } @@ -3427,10 +3446,9 @@ class HBinaryOperation: public HTemplateInstruction<3> { return right(); } - void set_observed_input_representation(Representation left, - Representation right) { - observed_input_representation_[0] = left; - observed_input_representation_[1] = right; + void set_observed_input_representation(int index, Representation rep) { + ASSERT(index >= 1 && index <= 2); + observed_input_representation_[index - 1] = rep; } virtual void initialize_output_representation(Representation observed) { @@ -3453,6 +3471,8 @@ class HBinaryOperation: public HTemplateInstruction<3> { DECLARE_ABSTRACT_INSTRUCTION(BinaryOperation) private: + bool IgnoreObservedOutputRepresentation(Representation current_rep); + Representation observed_input_representation_[2]; Representation observed_output_representation_; }; @@ -3932,6 +3952,10 @@ class HCompareObjectEqAndBranch: public HTemplateControlInstruction<2, 2> { return Representation::Tagged(); } + virtual Representation observed_input_representation(int index) { + return Representation::Tagged(); + } + DECLARE_CONCRETE_INSTRUCTION(CompareObjectEqAndBranch) }; @@ -4398,6 +4422,17 @@ class HMul: public HArithmeticBinaryOperation { HValue* left, HValue* right); + static HInstruction* NewImul(Zone* zone, + HValue* context, + HValue* left, + HValue* right) { + HMul* mul = new(zone) HMul(context, left, right); + // TODO(mstarzinger): Prevent bailout on minus zero for imul. + mul->AssumeRepresentation(Representation::Integer32()); + mul->ClearFlag(HValue::kCanOverflow); + return mul; + } + virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited); virtual HValue* Canonicalize(); @@ -4878,6 +4913,12 @@ class HAllocateObject: public HTemplateInstruction<1> { SetOperandAt(0, context); set_representation(Representation::Tagged()); SetGVNFlag(kChangesNewSpacePromotion); + constructor_initial_map_ = constructor->has_initial_map() + ? Handle<Map>(constructor->initial_map()) + : Handle<Map>::null(); + // If slack tracking finished, the instance size and property counts + // remain unchanged so that we can allocate memory for the object. + ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress()); } // Maximum instance size for which allocations will be inlined. @@ -4885,13 +4926,14 @@ class HAllocateObject: public HTemplateInstruction<1> { HValue* context() { return OperandAt(0); } Handle<JSFunction> constructor() { return constructor_; } + Handle<Map> constructor_initial_map() { return constructor_initial_map_; } virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } virtual Handle<Map> GetMonomorphicJSObjectMap() { - ASSERT(constructor()->has_initial_map()); - return Handle<Map>(constructor()->initial_map()); + ASSERT(!constructor_initial_map_.is_null()); + return constructor_initial_map_; } virtual HType CalculateInferredType(); @@ -4902,6 +4944,7 @@ class HAllocateObject: public HTemplateInstruction<1> { // virtual bool IsDeletable() const { return true; } Handle<JSFunction> constructor_; + Handle<Map> constructor_initial_map_; }; @@ -4923,6 +4966,19 @@ class HAllocate: public HTemplateInstruction<2> { SetGVNFlag(kChangesNewSpacePromotion); } + static Flags DefaultFlags() { + return CAN_ALLOCATE_IN_NEW_SPACE; + } + + static Flags DefaultFlags(ElementsKind kind) { + Flags flags = CAN_ALLOCATE_IN_NEW_SPACE; + if (IsFastDoubleElementsKind(kind)) { + flags = static_cast<HAllocate::Flags>( + flags | HAllocate::ALLOCATE_DOUBLE_ALIGNED); + } + return flags; + } + HValue* context() { return OperandAt(0); } HValue* size() { return OperandAt(1); } @@ -6049,27 +6105,35 @@ class HArrayLiteral: public HMaterializedLiteral<1> { public: HArrayLiteral(HValue* context, Handle<HeapObject> boilerplate_object, + Handle<FixedArray> literals, int length, int literal_index, int depth, AllocationSiteMode mode) : HMaterializedLiteral<1>(literal_index, depth, mode), length_(length), - boilerplate_object_(boilerplate_object) { + boilerplate_object_(boilerplate_object), + literals_(literals) { SetOperandAt(0, context); SetGVNFlag(kChangesNewSpacePromotion); + + boilerplate_elements_kind_ = boilerplate_object_->IsJSObject() + ? Handle<JSObject>::cast(boilerplate_object_)->GetElementsKind() + : TERMINAL_FAST_ELEMENTS_KIND; + + is_copy_on_write_ = boilerplate_object_->IsJSObject() && + (Handle<JSObject>::cast(boilerplate_object_)->elements()->map() == + HEAP->fixed_cow_array_map()); } HValue* context() { return OperandAt(0); } ElementsKind boilerplate_elements_kind() const { - if (!boilerplate_object_->IsJSObject()) { - return TERMINAL_FAST_ELEMENTS_KIND; - } - return Handle<JSObject>::cast(boilerplate_object_)->GetElementsKind(); + return boilerplate_elements_kind_; } Handle<HeapObject> boilerplate_object() const { return boilerplate_object_; } + Handle<FixedArray> literals() const { return literals_; } int length() const { return length_; } - bool IsCopyOnWrite() const; + bool IsCopyOnWrite() const { return is_copy_on_write_; } virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); @@ -6081,6 +6145,9 @@ class HArrayLiteral: public HMaterializedLiteral<1> { private: int length_; Handle<HeapObject> boilerplate_object_; + Handle<FixedArray> literals_; + ElementsKind boilerplate_elements_kind_; + bool is_copy_on_write_; }; @@ -6088,12 +6155,15 @@ class HObjectLiteral: public HMaterializedLiteral<1> { public: HObjectLiteral(HValue* context, Handle<FixedArray> constant_properties, + Handle<FixedArray> literals, bool fast_elements, int literal_index, int depth, bool has_function) : HMaterializedLiteral<1>(literal_index, depth), constant_properties_(constant_properties), + constant_properties_length_(constant_properties->length()), + literals_(literals), fast_elements_(fast_elements), has_function_(has_function) { SetOperandAt(0, context); @@ -6104,6 +6174,10 @@ class HObjectLiteral: public HMaterializedLiteral<1> { Handle<FixedArray> constant_properties() const { return constant_properties_; } + int constant_properties_length() const { + return constant_properties_length_; + } + Handle<FixedArray> literals() const { return literals_; } bool fast_elements() const { return fast_elements_; } bool has_function() const { return has_function_; } @@ -6116,8 +6190,10 @@ class HObjectLiteral: public HMaterializedLiteral<1> { private: Handle<FixedArray> constant_properties_; - bool fast_elements_; - bool has_function_; + int constant_properties_length_; + Handle<FixedArray> literals_; + bool fast_elements_ : 1; + bool has_function_ : 1; }; @@ -6160,7 +6236,11 @@ class HFunctionLiteral: public HTemplateInstruction<1> { HFunctionLiteral(HValue* context, Handle<SharedFunctionInfo> shared, bool pretenure) - : shared_info_(shared), pretenure_(pretenure) { + : shared_info_(shared), + pretenure_(pretenure), + has_no_literals_(shared->num_literals() == 0), + is_generator_(shared->is_generator()), + language_mode_(shared->language_mode()) { SetOperandAt(0, context); set_representation(Representation::Tagged()); SetGVNFlag(kChangesNewSpacePromotion); @@ -6177,12 +6257,18 @@ class HFunctionLiteral: public HTemplateInstruction<1> { Handle<SharedFunctionInfo> shared_info() const { return shared_info_; } bool pretenure() const { return pretenure_; } + bool has_no_literals() const { return has_no_literals_; } + bool is_generator() const { return is_generator_; } + LanguageMode language_mode() const { return language_mode_; } private: virtual bool IsDeletable() const { return true; } Handle<SharedFunctionInfo> shared_info_; - bool pretenure_; + bool pretenure_ : 1; + bool has_no_literals_ : 1; + bool is_generator_ : 1; + LanguageMode language_mode_; }; diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc index e5270575e9..a978834c0f 100644 --- a/deps/v8/src/hydrogen.cc +++ b/deps/v8/src/hydrogen.cc @@ -25,9 +25,11 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#include "v8.h" #include "hydrogen.h" +#include <algorithm> + +#include "v8.h" #include "codegen.h" #include "full-codegen.h" #include "hashmap.h" @@ -509,9 +511,8 @@ class ReachabilityAnalyzer BASE_EMBEDDED { void HGraph::Verify(bool do_full_verify) const { - // Allow dereferencing for debug mode verification. - HandleDereferenceGuard allow_handle_deref(isolate(), - HandleDereferenceGuard::ALLOW); + Heap::RelocationLock(isolate()->heap()); + ALLOW_HANDLE_DEREF(isolate(), "debug mode verification"); for (int i = 0; i < blocks_.length(); i++) { HBasicBlock* block = blocks_.at(i); @@ -603,6 +604,19 @@ HConstant* HGraph::GetConstantInt32(SetOncePointer<HConstant>* pointer, } +HConstant* HGraph::GetConstantSmi(SetOncePointer<HConstant>* pointer, + int32_t value) { + if (!pointer->is_set()) { + HConstant* constant = + new(zone()) HConstant(Handle<Object>(Smi::FromInt(value), isolate()), + Representation::Tagged()); + constant->InsertAfter(GetConstantUndefined()); + pointer->set(constant); + } + return pointer->get(); +} + + HConstant* HGraph::GetConstant0() { return GetConstantInt32(&constant_0_, 0); } @@ -638,69 +652,20 @@ HConstant* HGraph::GetConstant##Name() { \ DEFINE_GET_CONSTANT(True, true, HType::Boolean(), true) DEFINE_GET_CONSTANT(False, false, HType::Boolean(), false) DEFINE_GET_CONSTANT(Hole, the_hole, HType::Tagged(), false) - -#undef DEFINE_GET_CONSTANT - - -HGraphBuilder::CheckBuilder::CheckBuilder(HGraphBuilder* builder) - : builder_(builder), - finished_(false) { - HEnvironment* env = builder->environment(); - failure_block_ = builder->CreateBasicBlock(env->Copy()); - merge_block_ = builder->CreateBasicBlock(env->Copy()); -} - - -HValue* HGraphBuilder::CheckBuilder::CheckNotUndefined(HValue* value) { - HEnvironment* env = builder_->environment(); - HCompareObjectEqAndBranch* compare = - new(zone()) HCompareObjectEqAndBranch( - value, - builder_->graph()->GetConstantUndefined()); - HBasicBlock* success_block = builder_->CreateBasicBlock(env->Copy()); - HBasicBlock* failure_block = builder_->CreateBasicBlock(env->Copy()); - compare->SetSuccessorAt(0, failure_block); - compare->SetSuccessorAt(1, success_block); - failure_block->GotoNoSimulate(failure_block_); - builder_->current_block()->Finish(compare); - builder_->set_current_block(success_block); - return compare; -} +DEFINE_GET_CONSTANT(Null, null, HType::Tagged(), false) -HValue* HGraphBuilder::CheckBuilder::CheckIntegerCompare(HValue* left, - HValue* right, - Token::Value op) { - HEnvironment* env = builder_->environment(); - HCompareIDAndBranch* compare = - new(zone()) HCompareIDAndBranch(left, right, op); - compare->AssumeRepresentation(Representation::Integer32()); - HBasicBlock* success_block = builder_->CreateBasicBlock(env->Copy()); - HBasicBlock* failure_block = builder_->CreateBasicBlock(env->Copy()); - compare->SetSuccessorAt(0, success_block); - compare->SetSuccessorAt(1, failure_block); - failure_block->GotoNoSimulate(failure_block_); - builder_->current_block()->Finish(compare); - builder_->set_current_block(success_block); - return compare; +HConstant* HGraph::GetConstantSmi0() { + return GetConstantSmi(&constant_smi_0_, 0); } -HValue* HGraphBuilder::CheckBuilder::CheckIntegerEq(HValue* left, - HValue* right) { - return CheckIntegerCompare(left, right, Token::EQ); +HConstant* HGraph::GetConstantSmi1() { + return GetConstantSmi(&constant_smi_1_, 1); } -void HGraphBuilder::CheckBuilder::End() { - ASSERT(!finished_); - builder_->current_block()->GotoNoSimulate(merge_block_); - if (failure_block_->HasPredecessor()) { - failure_block_->FinishExitWithDeoptimization(HDeoptimize::kUseAll); - } - builder_->set_current_block(merge_block_); - finished_ = true; -} +#undef DEFINE_GET_CONSTANT HConstant* HGraph::GetInvalidContext() { @@ -714,8 +679,6 @@ HGraphBuilder::IfBuilder::IfBuilder(HGraphBuilder* builder, int position) finished_(false), did_then_(false), did_else_(false), - deopt_then_(false), - deopt_else_(false), did_and_(false), did_or_(false), captured_(false), @@ -736,8 +699,6 @@ HGraphBuilder::IfBuilder::IfBuilder( finished_(false), did_then_(false), did_else_(false), - deopt_then_(false), - deopt_else_(false), did_and_(false), did_or_(false), captured_(false), @@ -835,8 +796,9 @@ void HGraphBuilder::IfBuilder::CaptureContinuation( HBasicBlock* true_block = last_true_block_ == NULL ? first_true_block_ : last_true_block_; - HBasicBlock* false_block = - did_else_ ? builder_->current_block() : first_false_block_; + HBasicBlock* false_block = did_else_ && (first_false_block_ != NULL) + ? builder_->current_block() + : first_false_block_; continuation->Capture(true_block, false_block, position_); captured_ = true; End(); @@ -869,12 +831,23 @@ void HGraphBuilder::IfBuilder::Else() { void HGraphBuilder::IfBuilder::Deopt() { - ASSERT(!(did_then_ ^ did_else_)); HBasicBlock* block = builder_->current_block(); block->FinishExitWithDeoptimization(HDeoptimize::kUseAll); if (did_else_) { first_false_block_ = NULL; - did_else_ = false; + } else { + first_true_block_ = NULL; + } +} + + +void HGraphBuilder::IfBuilder::Return(HValue* value) { + HBasicBlock* block = builder_->current_block(); + block->Finish(new(zone()) HReturn(value, + builder_->environment()->LookupContext(), + builder_->graph()->GetConstantMinus1())); + if (did_else_) { + first_false_block_ = NULL; } else { first_true_block_ = NULL; } @@ -888,8 +861,9 @@ void HGraphBuilder::IfBuilder::End() { last_true_block_ = builder_->current_block(); } if (first_true_block_ == NULL) { - // Deopt on true. Nothing to do, just continue the else block. + // Deopt on true. Nothing to do, just continue the false block. } else if (first_false_block_ == NULL) { + // Deopt on false. Nothing to do except switching to the true block. builder_->set_current_block(last_true_block_); } else { HEnvironment* merge_env = last_true_block_->last_environment()->Copy(); @@ -1081,7 +1055,7 @@ HValue* HGraphBuilder::BuildCheckNonSmi(HValue* obj) { HValue* HGraphBuilder::BuildCheckMap(HValue* obj, Handle<Map> map) { - HCheckMaps* check = new(zone()) HCheckMaps(obj, map, zone()); + HCheckMaps* check = HCheckMaps::New(obj, map, zone()); AddInstruction(check); return check; } @@ -1297,7 +1271,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess( AddInstruction(new(zone) HLoadElements(object, mapcheck)); if (is_store && (fast_elements || fast_smi_only_elements) && store_mode != STORE_NO_TRANSITION_HANDLE_COW) { - HCheckMaps* check_cow_map = new(zone) HCheckMaps( + HCheckMaps* check_cow_map = HCheckMaps::New( elements, isolate()->factory()->fixed_array_map(), zone); check_cow_map->ClearGVNFlag(kDependsOnElementsKind); AddInstruction(check_cow_map); @@ -1319,14 +1293,15 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess( IfBuilder length_checker(this); length_checker.IfCompare(key, length, Token::LT); length_checker.Then(); - CheckBuilder negative_checker(this); - HValue* bounds_check = negative_checker.CheckIntegerCompare( + IfBuilder negative_checker(this); + HValue* bounds_check = negative_checker.IfCompare( key, graph()->GetConstant0(), Token::GTE); - negative_checker.End(); + negative_checker.Then(); HInstruction* result = BuildExternalArrayElementAccess( external_elements, key, val, bounds_check, elements_kind, is_store); AddInstruction(result); + negative_checker.ElseDeopt(); length_checker.End(); return result; } else { @@ -1371,7 +1346,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess( elements = BuildCopyElementsOnWrite(object, elements, elements_kind, length); } else { - HCheckMaps* check_cow_map = new(zone) HCheckMaps( + HCheckMaps* check_cow_map = HCheckMaps::New( elements, isolate()->factory()->fixed_array_map(), zone); check_cow_map->ClearGVNFlag(kDependsOnElementsKind); AddInstruction(check_cow_map); @@ -1407,8 +1382,10 @@ HValue* HGraphBuilder::BuildAllocateElements(HValue* context, total_size->ChangeRepresentation(Representation::Integer32()); total_size->ClearFlag(HValue::kCanOverflow); - HAllocate::Flags flags = HAllocate::CAN_ALLOCATE_IN_NEW_SPACE; + HAllocate::Flags flags = HAllocate::DefaultFlags(kind); if (FLAG_pretenure_literals) { + // TODO(hpayer): When pretenuring can be internalized, flags can become + // private to HAllocate. if (IsFastDoubleElementsKind(kind)) { flags = static_cast<HAllocate::Flags>( flags | HAllocate::CAN_ALLOCATE_IN_OLD_DATA_SPACE); @@ -1417,10 +1394,6 @@ HValue* HGraphBuilder::BuildAllocateElements(HValue* context, flags | HAllocate::CAN_ALLOCATE_IN_OLD_POINTER_SPACE); } } - if (IsFastDoubleElementsKind(kind)) { - flags = static_cast<HAllocate::Flags>( - flags | HAllocate::ALLOCATE_DOUBLE_ALIGNED); - } HValue* elements = AddInstruction(new(zone) HAllocate(context, total_size, @@ -1456,6 +1429,63 @@ HValue* HGraphBuilder::BuildAllocateAndInitializeElements(HValue* context, } +HInnerAllocatedObject* HGraphBuilder::BuildJSArrayHeader(HValue* array, + HValue* array_map, + AllocationSiteMode mode, + HValue* allocation_site_payload, + HValue* length_field) { + + BuildStoreMap(array, array_map); + + HConstant* empty_fixed_array = + new(zone()) HConstant( + Handle<FixedArray>(isolate()->heap()->empty_fixed_array()), + Representation::Tagged()); + AddInstruction(empty_fixed_array); + + AddInstruction(new(zone()) HStoreNamedField(array, + isolate()->factory()->properties_field_symbol(), + empty_fixed_array, + true, + JSArray::kPropertiesOffset)); + + HInstruction* length_store = AddInstruction( + new(zone()) HStoreNamedField(array, + isolate()->factory()->length_field_string(), + length_field, + true, + JSArray::kLengthOffset)); + length_store->SetGVNFlag(kChangesArrayLengths); + + if (mode == TRACK_ALLOCATION_SITE) { + BuildCreateAllocationSiteInfo(array, + JSArray::kSize, + allocation_site_payload); + } + + int elements_location = JSArray::kSize; + if (mode == TRACK_ALLOCATION_SITE) { + elements_location += AllocationSiteInfo::kSize; + } + + HInnerAllocatedObject* elements = new(zone()) HInnerAllocatedObject( + array, + elements_location); + AddInstruction(elements); + + HInstruction* elements_store = AddInstruction( + new(zone()) HStoreNamedField( + array, + isolate()->factory()->elements_field_string(), + elements, + true, + JSArray::kElementsOffset)); + elements_store->SetGVNFlag(kChangesElementsPointer); + + return elements; +} + + HInstruction* HGraphBuilder::BuildStoreMap(HValue* object, HValue* map) { Zone* zone = this->zone(); @@ -1569,13 +1599,38 @@ void HGraphBuilder::BuildFillElementsWithHole(HValue* context, : AddInstruction(new(zone) HConstant(nan_double, Representation::Double())); - LoopBuilder builder(this, context, LoopBuilder::kPostIncrement); + // Special loop unfolding case + static const int kLoopUnfoldLimit = 4; + bool unfold_loop = false; + int initial_capacity = JSArray::kPreallocatedArrayElements; + if (from->IsConstant() && to->IsConstant() && + initial_capacity <= kLoopUnfoldLimit) { + HConstant* constant_from = HConstant::cast(from); + HConstant* constant_to = HConstant::cast(to); - HValue* key = builder.BeginBody(from, to, Token::LT); + if (constant_from->HasInteger32Value() && + constant_from->Integer32Value() == 0 && + constant_to->HasInteger32Value() && + constant_to->Integer32Value() == initial_capacity) { + unfold_loop = true; + } + } - AddInstruction(new(zone) HStoreKeyed(elements, key, hole, elements_kind)); + if (unfold_loop) { + for (int i = 0; i < initial_capacity; i++) { + HInstruction* key = AddInstruction(new(zone) + HConstant(i, Representation::Integer32())); + AddInstruction(new(zone) HStoreKeyed(elements, key, hole, elements_kind)); + } + } else { + LoopBuilder builder(this, context, LoopBuilder::kPostIncrement); - builder.EndBody(); + HValue* key = builder.BeginBody(from, to, Token::LT); + + AddInstruction(new(zone) HStoreKeyed(elements, key, hole, elements_kind)); + + builder.EndBody(); + } } @@ -1642,12 +1697,7 @@ HValue* HGraphBuilder::BuildCloneShallowArray(HContext* context, : FixedArray::SizeFor(length); } - HAllocate::Flags allocate_flags = HAllocate::CAN_ALLOCATE_IN_NEW_SPACE; - if (IsFastDoubleElementsKind(kind)) { - allocate_flags = static_cast<HAllocate::Flags>( - allocate_flags | HAllocate::ALLOCATE_DOUBLE_ALIGNED); - } - + HAllocate::Flags allocate_flags = HAllocate::DefaultFlags(kind); // Allocate both the JS array and the elements array in one big // allocation. This avoids multiple limit checks. HValue* size_in_bytes = @@ -1676,15 +1726,7 @@ HValue* HGraphBuilder::BuildCloneShallowArray(HContext* context, // Create an allocation site info if requested. if (mode == TRACK_ALLOCATION_SITE) { - HValue* alloc_site = - AddInstruction(new(zone) HInnerAllocatedObject(object, JSArray::kSize)); - Handle<Map> alloc_site_map(isolate()->heap()->allocation_site_info_map()); - BuildStoreMap(alloc_site, alloc_site_map); - int alloc_payload_offset = AllocationSiteInfo::kPayloadOffset; - AddInstruction(new(zone) HStoreNamedField(alloc_site, - factory->empty_string(), - boilerplate, - true, alloc_payload_offset)); + BuildCreateAllocationSiteInfo(object, JSArray::kSize, boilerplate); } if (length > 0) { @@ -1733,6 +1775,205 @@ HValue* HGraphBuilder::BuildCloneShallowArray(HContext* context, } +void HGraphBuilder::BuildCompareNil( + HValue* value, + EqualityKind kind, + CompareNilICStub::Types types, + Handle<Map> map, + int position, + HIfContinuation* continuation) { + IfBuilder if_nil(this, position); + bool needs_or = false; + if ((types & CompareNilICStub::kCompareAgainstNull) != 0) { + if (needs_or) if_nil.Or(); + if_nil.If<HCompareObjectEqAndBranch>(value, graph()->GetConstantNull()); + needs_or = true; + } + if ((types & CompareNilICStub::kCompareAgainstUndefined) != 0) { + if (needs_or) if_nil.Or(); + if_nil.If<HCompareObjectEqAndBranch>(value, + graph()->GetConstantUndefined()); + needs_or = true; + } + // Handle either undetectable or monomorphic, not both. + ASSERT(((types & CompareNilICStub::kCompareAgainstUndetectable) == 0) || + ((types & CompareNilICStub::kCompareAgainstMonomorphicMap) == 0)); + if ((types & CompareNilICStub::kCompareAgainstUndetectable) != 0) { + if (needs_or) if_nil.Or(); + if_nil.If<HIsUndetectableAndBranch>(value); + } else { + if_nil.Then(); + if_nil.Else(); + if ((types & CompareNilICStub::kCompareAgainstMonomorphicMap) != 0) { + BuildCheckNonSmi(value); + // For ICs, the map checked below is a sentinel map that gets replaced by + // the monomorphic map when the code is used as a template to generate a + // new IC. For optimized functions, there is no sentinel map, the map + // emitted below is the actual monomorphic map. + BuildCheckMap(value, map); + } else { + if (kind == kNonStrictEquality) { + if_nil.Deopt(); + } + } + } + + if_nil.CaptureContinuation(continuation); +} + + +HValue* HGraphBuilder::BuildCreateAllocationSiteInfo(HValue* previous_object, + int previous_object_size, + HValue* payload) { + HInnerAllocatedObject* alloc_site = new(zone()) + HInnerAllocatedObject(previous_object, previous_object_size); + AddInstruction(alloc_site); + Handle<Map> alloc_site_map(isolate()->heap()->allocation_site_info_map()); + BuildStoreMap(alloc_site, alloc_site_map); + AddInstruction(new(zone()) HStoreNamedField(alloc_site, + isolate()->factory()->payload_string(), + payload, + true, + AllocationSiteInfo::kPayloadOffset)); + return alloc_site; +} + + +HGraphBuilder::JSArrayBuilder::JSArrayBuilder(HGraphBuilder* builder, + ElementsKind kind, + HValue* allocation_site_payload, + AllocationSiteMode mode) : + builder_(builder), + kind_(kind), + allocation_site_payload_(allocation_site_payload) { + if (mode == DONT_TRACK_ALLOCATION_SITE) { + mode_ = mode; + } else { + mode_ = AllocationSiteInfo::GetMode(kind); + } +} + + +HValue* HGraphBuilder::JSArrayBuilder::EmitMapCode(HValue* context) { + // Get the global context, the native context, the map array + HInstruction* global_object = AddInstruction(new(zone()) + HGlobalObject(context)); + HInstruction* native_context = AddInstruction(new(zone()) + HLoadNamedField(global_object, true, GlobalObject::kNativeContextOffset)); + int offset = Context::kHeaderSize + + kPointerSize * Context::JS_ARRAY_MAPS_INDEX; + HInstruction* map_array = AddInstruction(new(zone()) + HLoadNamedField(native_context, true, offset)); + offset = kind_ * kPointerSize + FixedArrayBase::kHeaderSize; + return AddInstruction(new(zone()) HLoadNamedField(map_array, true, offset)); +} + + +HValue* HGraphBuilder::JSArrayBuilder::EstablishAllocationSize( + HValue* length_node) { + HValue* context = builder()->environment()->LookupContext(); + ASSERT(length_node != NULL); + + int base_size = JSArray::kSize; + if (mode_ == TRACK_ALLOCATION_SITE) { + base_size += AllocationSiteInfo::kSize; + } + + if (IsFastDoubleElementsKind(kind_)) { + base_size += FixedDoubleArray::kHeaderSize; + } else { + base_size += FixedArray::kHeaderSize; + } + + HInstruction* elements_size_value = new(zone()) + HConstant(elements_size(), Representation::Integer32()); + AddInstruction(elements_size_value); + HInstruction* mul = HMul::New(zone(), context, length_node, + elements_size_value); + mul->ChangeRepresentation(Representation::Integer32()); + mul->ClearFlag(HValue::kCanOverflow); + AddInstruction(mul); + + HInstruction* base = new(zone()) HConstant(base_size, + Representation::Integer32()); + AddInstruction(base); + HInstruction* total_size = HAdd::New(zone(), context, base, mul); + total_size->ChangeRepresentation(Representation::Integer32()); + total_size->ClearFlag(HValue::kCanOverflow); + AddInstruction(total_size); + return total_size; +} + + +HValue* HGraphBuilder::JSArrayBuilder::EstablishEmptyArrayAllocationSize() { + int base_size = JSArray::kSize; + if (mode_ == TRACK_ALLOCATION_SITE) { + base_size += AllocationSiteInfo::kSize; + } + + base_size += IsFastDoubleElementsKind(kind_) + ? FixedDoubleArray::SizeFor(initial_capacity()) + : FixedArray::SizeFor(initial_capacity()); + + HConstant* array_size = + new(zone()) HConstant(base_size, Representation::Integer32()); + AddInstruction(array_size); + return array_size; +} + + +HValue* HGraphBuilder::JSArrayBuilder::AllocateEmptyArray() { + HValue* size_in_bytes = EstablishEmptyArrayAllocationSize(); + HConstant* capacity = + new(zone()) HConstant(initial_capacity(), Representation::Integer32()); + AddInstruction(capacity); + return AllocateArray(size_in_bytes, + capacity, + builder()->graph()->GetConstant0(), + true); +} + + +HValue* HGraphBuilder::JSArrayBuilder::AllocateArray(HValue* capacity, + HValue* length_field, + bool fill_with_hole) { + HValue* size_in_bytes = EstablishAllocationSize(capacity); + return AllocateArray(size_in_bytes, capacity, length_field, fill_with_hole); +} + + +HValue* HGraphBuilder::JSArrayBuilder::AllocateArray(HValue* size_in_bytes, + HValue* capacity, + HValue* length_field, + bool fill_with_hole) { + HValue* context = builder()->environment()->LookupContext(); + + // Allocate (dealing with failure appropriately) + HAllocate::Flags flags = HAllocate::DefaultFlags(kind_); + HAllocate* new_object = new(zone()) HAllocate(context, size_in_bytes, + HType::JSArray(), flags); + AddInstruction(new_object); + + // Fill in the fields: map, properties, length + HValue* map = EmitMapCode(context); + elements_location_ = builder()->BuildJSArrayHeader(new_object, + map, + mode_, + allocation_site_payload_, + length_field); + + // Initialize the elements + builder()->BuildInitializeElements(elements_location_, kind_, capacity); + + if (fill_with_hole) { + builder()->BuildFillElementsWithHole(context, elements_location_, kind_, + graph()->GetConstant0(), capacity); + } + + return new_object; +} + + HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info, TypeFeedbackOracle* oracle) : HGraphBuilder(info), @@ -3506,7 +3747,12 @@ void HInferRepresentation::Analyze() { } } - // (3a) Use the phi reachability information from step 2 to + // Simplify constant phi inputs where possible. + for (int i = 0; i < phi_count; ++i) { + phi_list->at(i)->SimplifyConstantInputs(); + } + + // Use the phi reachability information from step 2 to // push information about values which can't be converted to integer // without deoptimization through the phi use-def chains, avoiding // unnecessary deoptimizations later. @@ -3523,7 +3769,7 @@ void HInferRepresentation::Analyze() { } } - // (3b) Use the phi reachability information from step 2 to + // Use the phi reachability information from step 2 to // sum up the non-phi use counts of all connected phis. for (int i = 0; i < phi_count; ++i) { HPhi* phi = phi_list->at(i); @@ -6412,9 +6658,11 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) { pointer_size, DONT_TRACK_ALLOCATION_SITE); } else { + Handle<FixedArray> closure_literals(closure->literals(), isolate()); literal = AddInstruction( new(zone()) HObjectLiteral(context, expr->constant_properties(), + closure_literals, expr->fast_elements(), expr->literal_index(), expr->depth(), @@ -6503,7 +6751,7 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) { HValue* context = environment()->LookupContext(); HInstruction* literal; - Handle<FixedArray> literals(environment()->closure()->literals()); + Handle<FixedArray> literals(environment()->closure()->literals(), isolate()); Handle<Object> raw_boilerplate(literals->get(expr->literal_index()), isolate()); @@ -6555,6 +6803,7 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) { literal = AddInstruction( new(zone()) HArrayLiteral(context, original_boilerplate_object, + literals, length, expr->literal_index(), expr->depth(), @@ -6652,7 +6901,7 @@ static int ComputeLoadStoreFieldIndex(Handle<Map> type, void HOptimizedGraphBuilder::AddCheckMap(HValue* object, Handle<Map> map) { AddInstruction(new(zone()) HCheckNonSmi(object)); - AddInstruction(new(zone()) HCheckMaps(object, map, zone())); + AddInstruction(HCheckMaps::New(object, map, zone())); } @@ -6781,7 +7030,7 @@ bool HOptimizedGraphBuilder::HandlePolymorphicArrayLengthLoad( AddInstruction(new(zone()) HCheckNonSmi(object)); HInstruction* typecheck = - AddInstruction(new(zone()) HCheckMaps(object, types, zone())); + AddInstruction(HCheckMaps::New(object, types, zone())); HInstruction* instr = HLoadNamedField::NewArrayLength(zone(), object, typecheck); instr->set_position(expr->position()); @@ -6833,7 +7082,7 @@ void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr, AddInstruction(new(zone()) HCheckNonSmi(object)); HInstruction* instr; if (count == types->length() && is_monomorphic_field) { - AddInstruction(new(zone()) HCheckMaps(object, types, zone())); + AddInstruction(HCheckMaps::New(object, types, zone())); instr = BuildLoadNamedField(object, map, &lookup); } else { HValue* context = environment()->LookupContext(); @@ -7510,8 +7759,7 @@ HInstruction* HOptimizedGraphBuilder::BuildMonomorphicElementAccess( Handle<Map> map, bool is_store, KeyedAccessStoreMode store_mode) { - HCheckMaps* mapcheck = new(zone()) HCheckMaps(object, map, - zone(), dependency); + HCheckMaps* mapcheck = HCheckMaps::New(object, map, zone(), dependency); AddInstruction(mapcheck); if (dependency) { mapcheck->ClearGVNFlag(kDependsOnElementsKind); @@ -7568,7 +7816,7 @@ HInstruction* HOptimizedGraphBuilder::TryBuildConsolidatedElementLoad( } if (!has_double_maps && !has_smi_or_object_maps) return NULL; - HCheckMaps* check_maps = new(zone()) HCheckMaps(object, maps, zone()); + HCheckMaps* check_maps = HCheckMaps::New(object, maps, zone()); AddInstruction(check_maps); HInstruction* instr = BuildUncheckedMonomorphicElementAccess( object, key, val, check_maps, @@ -7720,7 +7968,7 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess( HInstruction* access; if (IsFastElementsKind(elements_kind)) { if (is_store && !IsFastDoubleElementsKind(elements_kind)) { - AddInstruction(new(zone()) HCheckMaps( + AddInstruction(HCheckMaps::New( elements, isolate()->factory()->fixed_array_map(), zone(), elements_kind_branch)); } @@ -7754,10 +8002,12 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess( } *has_side_effects |= access->HasObservableSideEffects(); + // The caller will use has_side_effects and add correct Simulate. + access->SetFlag(HValue::kHasNoObservableSideEffects); if (position != -1) { access->set_position(position); } - if_jsarray->Goto(join); + if_jsarray->GotoNoSimulate(join); set_current_block(if_fastobject); length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements)); @@ -7777,18 +8027,19 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess( elements_kind_branch, elements_kind, is_store)); } *has_side_effects |= access->HasObservableSideEffects(); + // The caller will use has_side_effects and add correct Simulate. + access->SetFlag(HValue::kHasNoObservableSideEffects); if (position != RelocInfo::kNoPosition) access->set_position(position); if (!is_store) { Push(access); } - current_block()->Goto(join); + current_block()->GotoNoSimulate(join); set_current_block(if_false); } } // Deopt if none of the cases matched. current_block()->FinishExitWithDeoptimization(HDeoptimize::kNoUses); - join->SetJoinId(ast_id); set_current_block(join); return is_store ? NULL : Pop(); } @@ -8067,14 +8318,12 @@ class FunctionSorter { }; -static int CompareHotness(void const* a, void const* b) { - FunctionSorter const* function1 = reinterpret_cast<FunctionSorter const*>(a); - FunctionSorter const* function2 = reinterpret_cast<FunctionSorter const*>(b); - int diff = function1->ticks() - function2->ticks(); - if (diff != 0) return -diff; - diff = function1->ast_length() - function2->ast_length(); - if (diff != 0) return diff; - return function1->src_length() - function2->src_length(); +inline bool operator<(const FunctionSorter& lhs, const FunctionSorter& rhs) { + int diff = lhs.ticks() - rhs.ticks(); + if (diff != 0) return diff > 0; + diff = lhs.ast_length() - rhs.ast_length(); + if (diff != 0) return diff < 0; + return lhs.src_length() < rhs.src_length(); } @@ -8117,10 +8366,7 @@ void HOptimizedGraphBuilder::HandlePolymorphicCallNamed( } } - qsort(reinterpret_cast<void*>(&order[0]), - ordered_functions, - sizeof(order[0]), - &CompareHotness); + std::sort(order, order + ordered_functions); HBasicBlock* number_block = NULL; @@ -8697,6 +8943,18 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr, return true; } break; + case kMathImul: + if (expr->arguments()->length() == 2) { + HValue* right = Pop(); + HValue* left = Pop(); + Drop(1); // Receiver. + HValue* context = environment()->LookupContext(); + HInstruction* op = HMul::NewImul(zone(), context, left, right); + if (drop_extra) Drop(1); // Optionally drop the function. + ast_context()->ReturnInstruction(op, expr->id()); + return true; + } + break; default: // Not supported for inlining yet. break; @@ -8844,6 +9102,18 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall( return true; } break; + case kMathImul: + if (argument_count == 3 && check_type == RECEIVER_MAP_CHECK) { + AddCheckConstantFunction(expr->holder(), receiver, receiver_map); + HValue* right = Pop(); + HValue* left = Pop(); + Drop(1); // Receiver. + HValue* context = environment()->LookupContext(); + HInstruction* result = HMul::NewImul(zone(), context, left, right); + ast_context()->ReturnInstruction(result, expr->id()); + return true; + } + break; default: // Not yet supported for inlining. break; @@ -9276,19 +9546,31 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) { } else { // The constructor function is both an operand to the instruction and an // argument to the construct call. + bool use_call_new_array = FLAG_optimize_constructed_arrays && + !(expr->target().is_null()) && + *(expr->target()) == isolate()->global_context()->array_function(); + CHECK_ALIVE(VisitArgument(expr->expression())); HValue* constructor = HPushArgument::cast(Top())->argument(); CHECK_ALIVE(VisitArgumentList(expr->arguments())); HCallNew* call; - if (FLAG_optimize_constructed_arrays && - !(expr->target().is_null()) && - *(expr->target()) == isolate()->global_context()->array_function()) { + if (use_call_new_array) { + AddInstruction(new(zone()) HCheckFunction(constructor, + Handle<JSFunction>(isolate()->global_context()->array_function()))); Handle<Object> feedback = oracle()->GetInfo(expr->CallNewFeedbackId()); ASSERT(feedback->IsSmi()); + + // TODO(mvstanton): It would be better to use the already created global + // property cell that is shared by full code gen. That way, any transition + // information that happened after crankshaft won't be lost. The right + // way to do that is to begin passing the cell to the type feedback oracle + // instead of just the value in the cell. Do this in a follow-up checkin. Handle<JSGlobalPropertyCell> cell = isolate()->factory()->NewJSGlobalPropertyCell(feedback); - AddInstruction(new(zone()) HCheckFunction(constructor, - Handle<JSFunction>(isolate()->global_context()->array_function()))); + + // TODO(mvstanton): Here we should probably insert code to check if the + // type cell elements kind is different from when we compiled, and deopt + // in that case. Do this in a follow-up checin. call = new(zone()) HCallNewArray(context, constructor, argument_count, cell); } else { @@ -9433,7 +9715,8 @@ void HOptimizedGraphBuilder::VisitSub(UnaryOperation* expr) { info = TypeInfo::Unknown(); } if (instr->IsBinaryOperation()) { - HBinaryOperation::cast(instr)->set_observed_input_representation(rep, rep); + HBinaryOperation::cast(instr)->set_observed_input_representation(1, rep); + HBinaryOperation::cast(instr)->set_observed_input_representation(2, rep); } return ast_context()->ReturnInstruction(instr, expr->id()); } @@ -9874,7 +10157,8 @@ HInstruction* HOptimizedGraphBuilder::BuildBinaryOperation( if (instr->IsBinaryOperation()) { HBinaryOperation* binop = HBinaryOperation::cast(instr); - binop->set_observed_input_representation(left_rep, right_rep); + binop->set_observed_input_representation(1, left_rep); + binop->set_observed_input_representation(2, right_rep); binop->initialize_output_representation(result_rep); } return instr; @@ -10254,7 +10538,8 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) { if (combined_rep.IsTagged() || combined_rep.IsNone()) { HCompareGeneric* result = new(zone()) HCompareGeneric(context, left, right, op); - result->set_observed_input_representation(left_rep, right_rep); + result->set_observed_input_representation(1, left_rep); + result->set_observed_input_representation(2, right_rep); result->set_position(expr->position()); return ast_context()->ReturnInstruction(result, expr->id()); } else { @@ -10276,9 +10561,24 @@ void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr, ASSERT(current_block()->HasPredecessor()); EqualityKind kind = expr->op() == Token::EQ_STRICT ? kStrictEquality : kNonStrictEquality; - HIsNilAndBranch* instr = new(zone()) HIsNilAndBranch(value, kind, nil); - instr->set_position(expr->position()); - return ast_context()->ReturnControl(instr, expr->id()); + HIfContinuation continuation; + TypeFeedbackId id = expr->CompareOperationFeedbackId(); + CompareNilICStub::Types types; + if (kind == kStrictEquality) { + if (nil == kNullValue) { + types = CompareNilICStub::kCompareAgainstNull; + } else { + types = CompareNilICStub::kCompareAgainstUndefined; + } + } else { + types = static_cast<CompareNilICStub::Types>( + oracle()->CompareNilTypes(id)); + if (types == 0) types = CompareNilICStub::kFullCompare; + } + Handle<Map> map_handle(oracle()->CompareNilMonomorphicReceiverType(id)); + BuildCompareNil(value, kind, types, map_handle, + expr->position(), &continuation); + return ast_context()->ReturnContinuation(&continuation, expr->id()); } @@ -10401,15 +10701,7 @@ void HOptimizedGraphBuilder::BuildEmitDeepCopy( // Build Allocation Site Info if desired if (create_allocation_site_info) { - HValue* alloc_site = - AddInstruction(new(zone) HInnerAllocatedObject(target, JSArray::kSize)); - Handle<Map> alloc_site_map(isolate()->heap()->allocation_site_info_map()); - BuildStoreMap(alloc_site, alloc_site_map); - int alloc_payload_offset = AllocationSiteInfo::kPayloadOffset; - AddInstruction(new(zone) HStoreNamedField(alloc_site, - factory->payload_string(), - original_boilerplate, - true, alloc_payload_offset)); + BuildCreateAllocationSiteInfo(target, JSArray::kSize, original_boilerplate); } if (object_elements != NULL) { @@ -11201,6 +11493,17 @@ void HOptimizedGraphBuilder::GenerateFastAsciiArrayJoin(CallRuntime* call) { } +// Support for generators. +void HOptimizedGraphBuilder::GenerateGeneratorSend(CallRuntime* call) { + return Bailout("inlined runtime function: GeneratorSend"); +} + + +void HOptimizedGraphBuilder::GenerateGeneratorThrow(CallRuntime* call) { + return Bailout("inlined runtime function: GeneratorThrow"); +} + + #undef CHECK_BAILOUT #undef CHECK_ALIVE @@ -11521,16 +11824,14 @@ void HTracer::TraceCompilation(CompilationInfo* info) { void HTracer::TraceLithium(const char* name, LChunk* chunk) { ASSERT(!FLAG_parallel_recompilation); - HandleDereferenceGuard allow_handle_deref(chunk->isolate(), - HandleDereferenceGuard::ALLOW); + ALLOW_HANDLE_DEREF(chunk->isolate(), "debug output"); Trace(name, chunk->graph(), chunk); } void HTracer::TraceHydrogen(const char* name, HGraph* graph) { ASSERT(!FLAG_parallel_recompilation); - HandleDereferenceGuard allow_handle_deref(graph->isolate(), - HandleDereferenceGuard::ALLOW); + ALLOW_HANDLE_DEREF(graph->isolate(), "debug output"); Trace(name, graph, NULL); } diff --git a/deps/v8/src/hydrogen.h b/deps/v8/src/hydrogen.h index 809942b065..ab721bd3af 100644 --- a/deps/v8/src/hydrogen.h +++ b/deps/v8/src/hydrogen.h @@ -36,6 +36,7 @@ #include "hydrogen-instructions.h" #include "type-info.h" #include "zone.h" +#include "scopes.h" namespace v8 { namespace internal { @@ -304,10 +305,13 @@ class HGraph: public ZoneObject { HConstant* GetConstantUndefined() const { return undefined_constant_.get(); } HConstant* GetConstant0(); HConstant* GetConstant1(); + HConstant* GetConstantSmi0(); + HConstant* GetConstantSmi1(); HConstant* GetConstantMinus1(); HConstant* GetConstantTrue(); HConstant* GetConstantFalse(); HConstant* GetConstantHole(); + HConstant* GetConstantNull(); HConstant* GetInvalidContext(); HBasicBlock* CreateBasicBlock(); @@ -395,6 +399,8 @@ class HGraph: public ZoneObject { private: HConstant* GetConstantInt32(SetOncePointer<HConstant>* pointer, int32_t integer_value); + HConstant* GetConstantSmi(SetOncePointer<HConstant>* pointer, + int32_t integer_value); void MarkAsDeoptimizingRecursively(HBasicBlock* block); void NullifyUnreachableInstructions(); @@ -424,10 +430,13 @@ class HGraph: public ZoneObject { SetOncePointer<HConstant> undefined_constant_; SetOncePointer<HConstant> constant_0_; SetOncePointer<HConstant> constant_1_; + SetOncePointer<HConstant> constant_smi_0_; + SetOncePointer<HConstant> constant_smi_1_; SetOncePointer<HConstant> constant_minus1_; SetOncePointer<HConstant> constant_true_; SetOncePointer<HConstant> constant_false_; SetOncePointer<HConstant> constant_the_hole_; + SetOncePointer<HConstant> constant_null_; SetOncePointer<HConstant> constant_invalid_context_; SetOncePointer<HArgumentsObject> arguments_object_; @@ -890,7 +899,6 @@ class HIfContinuation { HBasicBlock* false_branch, int position) { ASSERT(!continuation_captured_); - ASSERT(true_branch != NULL || false_branch != NULL); true_branch_ = true_branch; false_branch_ = false_branch; position_ = position; @@ -940,6 +948,10 @@ class HGraphBuilder { HGraph* CreateGraph(); + // Bailout environment manipulation. + void Push(HValue* value) { environment()->Push(value); } + HValue* Pop() { return environment()->Pop(); } + // Adding instructions. HInstruction* AddInstruction(HInstruction* instr); void AddSimulate(BailoutId id, @@ -1013,27 +1025,6 @@ class HGraphBuilder { HInstruction* BuildStoreMap(HValue* object, HValue* map); HInstruction* BuildStoreMap(HValue* object, Handle<Map> map); - class CheckBuilder { - public: - explicit CheckBuilder(HGraphBuilder* builder); - ~CheckBuilder() { - if (!finished_) End(); - } - - HValue* CheckNotUndefined(HValue* value); - HValue* CheckIntegerCompare(HValue* left, HValue* right, Token::Value op); - HValue* CheckIntegerEq(HValue* left, HValue* right); - void End(); - - private: - Zone* zone() { return builder_->zone(); } - - HGraphBuilder* builder_; - bool finished_; - HBasicBlock* failure_block_; - HBasicBlock* merge_block_; - }; - class IfBuilder { public: explicit IfBuilder(HGraphBuilder* builder, @@ -1067,7 +1058,17 @@ class HGraphBuilder { return compare; } - template<class Condition> + template<class Condition, class P2> + HInstruction* IfNot(HValue* p1, P2 p2) { + HControlInstruction* compare = new(zone()) Condition(p1, p2); + AddCompare(compare); + HBasicBlock* block0 = compare->SuccessorAt(0); + HBasicBlock* block1 = compare->SuccessorAt(1); + compare->SetSuccessorAt(0, block1); + compare->SetSuccessorAt(1, block0); + return compare; + } + HInstruction* OrIfCompare( HValue* p1, HValue* p2, @@ -1094,7 +1095,6 @@ class HGraphBuilder { return If<Condition>(p1, p2); } - template<class Condition> HInstruction* AndIfCompare( HValue* p1, HValue* p2, @@ -1131,6 +1131,13 @@ class HGraphBuilder { void End(); void Deopt(); + void ElseDeopt() { + Else(); + Deopt(); + End(); + } + + void Return(HValue* value); private: void AddCompare(HControlInstruction* compare); @@ -1142,8 +1149,6 @@ class HGraphBuilder { bool finished_ : 1; bool did_then_ : 1; bool did_else_ : 1; - bool deopt_then_ : 1; - bool deopt_else_ : 1; bool did_and_ : 1; bool did_or_ : 1; bool captured_ : 1; @@ -1212,6 +1217,46 @@ class HGraphBuilder { void BuildNewSpaceArrayCheck(HValue* length, ElementsKind kind); + class JSArrayBuilder { + public: + JSArrayBuilder(HGraphBuilder* builder, + ElementsKind kind, + HValue* allocation_site_payload, + AllocationSiteMode mode); + + HValue* AllocateEmptyArray(); + HValue* AllocateArray(HValue* capacity, HValue* length_field, + bool fill_with_hole); + HValue* GetElementsLocation() { return elements_location_; } + + private: + Zone* zone() const { return builder_->zone(); } + int elements_size() const { + return IsFastDoubleElementsKind(kind_) ? kDoubleSize : kPointerSize; + } + HInstruction* AddInstruction(HInstruction* instr) { + return builder_->AddInstruction(instr); + } + HGraphBuilder* builder() { return builder_; } + HGraph* graph() { return builder_->graph(); } + int initial_capacity() { + STATIC_ASSERT(JSArray::kPreallocatedArrayElements > 0); + return JSArray::kPreallocatedArrayElements; + } + + HValue* EmitMapCode(HValue* context); + HValue* EstablishEmptyArrayAllocationSize(); + HValue* EstablishAllocationSize(HValue* length_node); + HValue* AllocateArray(HValue* size_in_bytes, HValue* capacity, + HValue* length_field, bool fill_with_hole); + + HGraphBuilder* builder_; + ElementsKind kind_; + AllocationSiteMode mode_; + HValue* allocation_site_payload_; + HInnerAllocatedObject* elements_location_; + }; + HValue* BuildAllocateElements(HValue* context, ElementsKind kind, HValue* capacity); @@ -1224,6 +1269,16 @@ class HGraphBuilder { ElementsKind kind, HValue* capacity); + // array must have been allocated with enough room for + // 1) the JSArray, 2) a AllocationSiteInfo if mode requires it, + // 3) a FixedArray or FixedDoubleArray. + // A pointer to the Fixed(Double)Array is returned. + HInnerAllocatedObject* BuildJSArrayHeader(HValue* array, + HValue* array_map, + AllocationSiteMode mode, + HValue* allocation_site_payload, + HValue* length_field); + HValue* BuildGrowElementsCapacity(HValue* object, HValue* elements, ElementsKind kind, @@ -1250,6 +1305,18 @@ class HGraphBuilder { ElementsKind kind, int length); + void BuildCompareNil( + HValue* value, + EqualityKind kind, + CompareNilICStub::Types types, + Handle<Map> map, + int position, + HIfContinuation* continuation); + + HValue* BuildCreateAllocationSiteInfo(HValue* previous_object, + int previous_object_size, + HValue* payload); + private: HGraphBuilder(); CompilationInfo* info_; @@ -1328,10 +1395,6 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor { void AddSoftDeoptimize(); - // Bailout environment manipulation. - void Push(HValue* value) { environment()->Push(value); } - HValue* Pop() { return environment()->Pop(); } - void Bailout(const char* reason); HBasicBlock* CreateJoin(HBasicBlock* first, diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h index fbe2f242d6..cccacf7820 100644 --- a/deps/v8/src/ia32/assembler-ia32-inl.h +++ b/deps/v8/src/ia32/assembler-ia32-inl.h @@ -330,9 +330,14 @@ Immediate::Immediate(Label* internal_offset) { Immediate::Immediate(Handle<Object> handle) { +#ifdef DEBUG + Isolate* isolate = Isolate::Current(); +#endif + ALLOW_HANDLE_DEREF(isolate, + "using and embedding raw address, heap object check"); // Verify all Objects referred by code are NOT in new space. Object* obj = *handle; - ASSERT(!HEAP->InNewSpace(obj)); + ASSERT(!isolate->heap()->InNewSpace(obj)); if (obj->IsHeapObject()) { x_ = reinterpret_cast<intptr_t>(handle.location()); rmode_ = RelocInfo::EMBEDDED_OBJECT; @@ -363,6 +368,7 @@ void Assembler::emit(uint32_t x) { void Assembler::emit(Handle<Object> handle) { + ALLOW_HANDLE_DEREF(isolate(), "heap object check"); // Verify all Objects referred by code are NOT in new space. Object* obj = *handle; ASSERT(!isolate()->heap()->InNewSpace(obj)); @@ -386,6 +392,14 @@ void Assembler::emit(uint32_t x, RelocInfo::Mode rmode, TypeFeedbackId id) { } +void Assembler::emit(Handle<Code> code, + RelocInfo::Mode rmode, + TypeFeedbackId id) { + ALLOW_HANDLE_DEREF(isolate(), "embedding raw address"); + emit(reinterpret_cast<intptr_t>(code.location()), rmode, id); +} + + void Assembler::emit(const Immediate& x) { if (x.rmode_ == RelocInfo::INTERNAL_REFERENCE) { Label* label = reinterpret_cast<Label*>(x.x_); diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc index 32fe6a9c10..7b32f1b1de 100644 --- a/deps/v8/src/ia32/assembler-ia32.cc +++ b/deps/v8/src/ia32/assembler-ia32.cc @@ -1459,7 +1459,7 @@ void Assembler::call(Handle<Code> code, EnsureSpace ensure_space(this); ASSERT(RelocInfo::IsCodeTarget(rmode)); EMIT(0xE8); - emit(reinterpret_cast<intptr_t>(code.location()), rmode, ast_id); + emit(code, rmode, ast_id); } @@ -1513,7 +1513,7 @@ void Assembler::jmp(Handle<Code> code, RelocInfo::Mode rmode) { EnsureSpace ensure_space(this); ASSERT(RelocInfo::IsCodeTarget(rmode)); EMIT(0xE9); - emit(reinterpret_cast<intptr_t>(code.location()), rmode); + emit(code, rmode); } @@ -1568,7 +1568,7 @@ void Assembler::j(Condition cc, Handle<Code> code) { // 0000 1111 1000 tttn #32-bit disp EMIT(0x0F); EMIT(0x80 | cc); - emit(reinterpret_cast<intptr_t>(code.location()), RelocInfo::CODE_TARGET); + emit(code, RelocInfo::CODE_TARGET); } diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h index d1c1fbf003..8ded78558b 100644 --- a/deps/v8/src/ia32/assembler-ia32.h +++ b/deps/v8/src/ia32/assembler-ia32.h @@ -411,6 +411,7 @@ class Operand BASE_EMBEDDED { } static Operand Cell(Handle<JSGlobalPropertyCell> cell) { + ALLOW_HANDLE_DEREF(Isolate::Current(), "embedding raw address"); return Operand(reinterpret_cast<int32_t>(cell.location()), RelocInfo::GLOBAL_PROPERTY_CELL); } @@ -1149,6 +1150,9 @@ class Assembler : public AssemblerBase { inline void emit(uint32_t x, RelocInfo::Mode rmode, TypeFeedbackId id = TypeFeedbackId::None()); + inline void emit(Handle<Code> code, + RelocInfo::Mode rmode, + TypeFeedbackId id = TypeFeedbackId::None()); inline void emit(const Immediate& x); inline void emit_w(const Immediate& x); diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc index 08bc22784e..c6e10f4b41 100644 --- a/deps/v8/src/ia32/builtins-ia32.cc +++ b/deps/v8/src/ia32/builtins-ia32.cc @@ -1207,9 +1207,9 @@ static void AllocateJSArray(MacroAssembler* masm, // that for a construct call the constructor function in edi needs to be // preserved for entering the generic code. In both cases argc in eax needs to // be preserved. -static void ArrayNativeCode(MacroAssembler* masm, - bool construct_call, - Label* call_generic_code) { +void ArrayNativeCode(MacroAssembler* masm, + bool construct_call, + Label* call_generic_code) { Label argc_one_or_more, argc_two_or_more, prepare_generic_code_call, empty_array, not_empty_array, finish, cant_transition_map, not_double; @@ -1494,7 +1494,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) { } -void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) { +void Builtins::Generate_CommonArrayConstructCode(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- eax : argc // -- ebx : type info cell @@ -1513,50 +1513,18 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) { __ Assert(not_zero, "Unexpected initial map for Array function"); __ CmpObjectType(ecx, MAP_TYPE, ecx); __ Assert(equal, "Unexpected initial map for Array function"); - - if (FLAG_optimize_constructed_arrays) { - // We should either have undefined in ebx or a valid jsglobalpropertycell - Label okay_here; - Handle<Object> undefined_sentinel( - masm->isolate()->heap()->undefined_value(), masm->isolate()); - Handle<Map> global_property_cell_map( - masm->isolate()->heap()->global_property_cell_map()); - __ cmp(ebx, Immediate(undefined_sentinel)); - __ j(equal, &okay_here); - __ cmp(FieldOperand(ebx, 0), Immediate(global_property_cell_map)); - __ Assert(equal, "Expected property cell in register ebx"); - __ bind(&okay_here); - } } - if (FLAG_optimize_constructed_arrays) { - Label not_zero_case, not_one_case; - __ test(eax, eax); - __ j(not_zero, ¬_zero_case); - ArrayNoArgumentConstructorStub no_argument_stub; - __ TailCallStub(&no_argument_stub); - - __ bind(¬_zero_case); - __ cmp(eax, 1); - __ j(greater, ¬_one_case); - ArraySingleArgumentConstructorStub single_argument_stub; - __ TailCallStub(&single_argument_stub); - - __ bind(¬_one_case); - ArrayNArgumentsConstructorStub n_argument_stub; - __ TailCallStub(&n_argument_stub); - } else { - Label generic_constructor; - // Run the native code for the Array function called as constructor. - ArrayNativeCode(masm, true, &generic_constructor); - - // Jump to the generic construct code in case the specialized code cannot - // handle the construction. - __ bind(&generic_constructor); - Handle<Code> generic_construct_stub = - masm->isolate()->builtins()->JSConstructStubGeneric(); - __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET); - } + Label generic_constructor; + // Run the native code for the Array function called as constructor. + ArrayNativeCode(masm, true, &generic_constructor); + + // Jump to the generic construct code in case the specialized code cannot + // handle the construction. + __ bind(&generic_constructor); + Handle<Code> generic_construct_stub = + masm->isolate()->builtins()->JSConstructStubGeneric(); + __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET); } diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc index 84fe688f72..96d2411865 100644 --- a/deps/v8/src/ia32/code-stubs-ia32.cc +++ b/deps/v8/src/ia32/code-stubs-ia32.cc @@ -101,16 +101,21 @@ void TransitionElementsKindStub::InitializeInterfaceDescriptor( } -static void InitializeArrayConstructorDescriptor(Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor) { +static void InitializeArrayConstructorDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor, + int constant_stack_parameter_count) { // register state - // edi -- constructor function + // eax -- number of arguments // ebx -- type info cell with elements kind - // eax -- number of arguments to the constructor function - static Register registers[] = { edi, ebx }; - descriptor->register_param_count_ = 2; - // stack param count needs (constructor pointer, and single argument) - descriptor->stack_parameter_count_ = &eax; + static Register registers[] = { ebx }; + descriptor->register_param_count_ = 1; + + if (constant_stack_parameter_count != 0) { + // stack param count needs (constructor pointer, and single argument) + descriptor->stack_parameter_count_ = &eax; + } + descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count; descriptor->register_params_ = registers; descriptor->function_mode_ = JS_FUNCTION_STUB_MODE; descriptor->deoptimization_handler_ = @@ -121,26 +126,64 @@ static void InitializeArrayConstructorDescriptor(Isolate* isolate, void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor( Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeArrayConstructorDescriptor(isolate, descriptor); + InitializeArrayConstructorDescriptor(isolate, descriptor, 0); } void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor( Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeArrayConstructorDescriptor(isolate, descriptor); + InitializeArrayConstructorDescriptor(isolate, descriptor, 1); } void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeArrayConstructorDescriptor(isolate, descriptor); + InitializeArrayConstructorDescriptor(isolate, descriptor, -1); +} + + +void CompareNilICStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + static Register registers[] = { eax }; + descriptor->register_param_count_ = 1; + descriptor->register_params_ = registers; + descriptor->deoptimization_handler_ = + FUNCTION_ADDR(CompareNilIC_Miss); + descriptor->miss_handler_ = + ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate); } #define __ ACCESS_MASM(masm) + +void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) { + // Update the static counter each time a new code stub is generated. + Isolate* isolate = masm->isolate(); + isolate->counters()->code_stubs()->Increment(); + + CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate); + int param_count = descriptor->register_param_count_; + { + // Call the runtime system in a fresh internal frame. + FrameScope scope(masm, StackFrame::INTERNAL); + ASSERT(descriptor->register_param_count_ == 0 || + eax.is(descriptor->register_params_[param_count - 1])); + // Push arguments + for (int i = 0; i < param_count; ++i) { + __ push(descriptor->register_params_[i]); + } + ExternalReference miss = descriptor->miss_handler_; + __ CallExternalReference(miss, descriptor->register_param_count_); + } + + __ ret(0); +} + + void ToNumberStub::Generate(MacroAssembler* masm) { // The ToNumber stub takes one argument in eax. Label check_heap_number, call_builtin; @@ -531,7 +574,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { AllowExternalCallThatCantCauseGC scope(masm); __ PrepareCallCFunction(argument_count, ecx); __ mov(Operand(esp, 0 * kPointerSize), - Immediate(ExternalReference::isolate_address())); + Immediate(ExternalReference::isolate_address(masm->isolate()))); __ CallCFunction( ExternalReference::store_buffer_overflow_function(masm->isolate()), argument_count); @@ -3851,7 +3894,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Argument 9: Pass current isolate address. __ mov(Operand(esp, 8 * kPointerSize), - Immediate(ExternalReference::isolate_address())); + Immediate(ExternalReference::isolate_address(masm->isolate()))); // Argument 8: Indicate that this is a direct call from JavaScript. __ mov(Operand(esp, 7 * kPointerSize), Immediate(1)); @@ -4927,6 +4970,9 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { StubFailureTrampolineStub::GenerateAheadOfTime(isolate); // It is important that the store buffer overflow stubs are generated first. RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate); + if (FLAG_optimize_constructed_arrays) { + ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); + } } @@ -5005,7 +5051,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, __ mov(Operand(esp, 0 * kPointerSize), edi); // argc. __ mov(Operand(esp, 1 * kPointerSize), esi); // argv. __ mov(Operand(esp, 2 * kPointerSize), - Immediate(ExternalReference::isolate_address())); + Immediate(ExternalReference::isolate_address(masm->isolate()))); __ call(ebx); // Result is in eax or edx:eax - do not destroy these registers! @@ -5013,12 +5059,17 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, __ dec(Operand::StaticVariable(scope_depth)); } - // Make sure we're not trying to return 'the hole' from the runtime - // call as this may lead to crashes in the IC code later. + // Runtime functions should not return 'the hole'. Allowing it to escape may + // lead to crashes in the IC code later. if (FLAG_debug_code) { Label okay; __ cmp(eax, masm->isolate()->factory()->the_hole_value()); __ j(not_equal, &okay, Label::kNear); + // TODO(wingo): Currently SuspendJSGeneratorObject returns the hole. Change + // to return another sentinel like a harmony symbol. + __ cmp(ebx, Immediate(ExternalReference( + Runtime::kSuspendJSGeneratorObject, masm->isolate()))); + __ j(equal, &okay, Label::kNear); __ int3(); __ bind(&okay); } @@ -5777,17 +5828,17 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ ret(2 * kPointerSize); __ bind(&non_ascii); // At least one of the strings is two-byte. Check whether it happens - // to contain only ASCII characters. + // to contain only one byte characters. // ecx: first instance type AND second instance type. // edi: second instance type. - __ test(ecx, Immediate(kAsciiDataHintMask)); + __ test(ecx, Immediate(kOneByteDataHintMask)); __ j(not_zero, &ascii_data); __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset)); __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset)); __ xor_(edi, ecx); - STATIC_ASSERT(kOneByteStringTag != 0 && kAsciiDataHintTag != 0); - __ and_(edi, kOneByteStringTag | kAsciiDataHintTag); - __ cmp(edi, kOneByteStringTag | kAsciiDataHintTag); + STATIC_ASSERT(kOneByteStringTag != 0 && kOneByteDataHintTag != 0); + __ and_(edi, kOneByteStringTag | kOneByteDataHintTag); + __ cmp(edi, kOneByteStringTag | kOneByteDataHintTag); __ j(equal, &ascii_data); // Allocate a two byte cons string. __ AllocateTwoByteConsString(ecx, edi, no_reg, &call_runtime); @@ -7446,7 +7497,7 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) { __ mov(Operand(esp, 0 * kPointerSize), regs_.object()); __ mov(Operand(esp, 1 * kPointerSize), regs_.address()); // Slot. __ mov(Operand(esp, 2 * kPointerSize), - Immediate(ExternalReference::isolate_address())); + Immediate(ExternalReference::isolate_address(masm->isolate()))); AllowExternalCallThatCantCauseGC scope(masm); if (mode == INCREMENTAL_COMPACTION) { @@ -7686,6 +7737,197 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) { __ ret(0); } + +template<class T> +static void CreateArrayDispatch(MacroAssembler* masm) { + int last_index = GetSequenceIndexFromFastElementsKind( + TERMINAL_FAST_ELEMENTS_KIND); + for (int i = 0; i <= last_index; ++i) { + Label next; + ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); + __ cmp(edx, kind); + __ j(not_equal, &next); + T stub(kind); + __ TailCallStub(&stub); + __ bind(&next); + } + + // If we reached this point there is a problem. + __ Abort("Unexpected ElementsKind in array constructor"); +} + + +static void CreateArrayDispatchOneArgument(MacroAssembler* masm) { + // ebx - type info cell + // edx - kind + // eax - number of arguments + // edi - constructor? + // esp[0] - return address + // esp[4] - last argument + ASSERT(FAST_SMI_ELEMENTS == 0); + ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); + ASSERT(FAST_ELEMENTS == 2); + ASSERT(FAST_HOLEY_ELEMENTS == 3); + ASSERT(FAST_DOUBLE_ELEMENTS == 4); + ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5); + + Handle<Object> undefined_sentinel( + masm->isolate()->heap()->undefined_value(), + masm->isolate()); + + // is the low bit set? If so, we are holey and that is good. + __ test_b(edx, 1); + Label normal_sequence; + __ j(not_zero, &normal_sequence); + + // look at the first argument + __ mov(ecx, Operand(esp, kPointerSize)); + __ test(ecx, ecx); + __ j(zero, &normal_sequence); + + // We are going to create a holey array, but our kind is non-holey. + // Fix kind and retry + __ inc(edx); + __ cmp(ebx, Immediate(undefined_sentinel)); + __ j(equal, &normal_sequence); + + // Save the resulting elements kind in type info + __ SmiTag(edx); + __ mov(FieldOperand(ebx, kPointerSize), edx); + __ SmiUntag(edx); + + __ bind(&normal_sequence); + int last_index = GetSequenceIndexFromFastElementsKind( + TERMINAL_FAST_ELEMENTS_KIND); + for (int i = 0; i <= last_index; ++i) { + Label next; + ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); + __ cmp(edx, kind); + __ j(not_equal, &next); + ArraySingleArgumentConstructorStub stub(kind); + __ TailCallStub(&stub); + __ bind(&next); + } + + // If we reached this point there is a problem. + __ Abort("Unexpected ElementsKind in array constructor"); +} + + +template<class T> +static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) { + int to_index = GetSequenceIndexFromFastElementsKind( + TERMINAL_FAST_ELEMENTS_KIND); + for (int i = 0; i <= to_index; ++i) { + ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); + T stub(kind); + stub.GetCode(isolate)->set_is_pregenerated(true); + } +} + + +void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) { + ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>( + isolate); + ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>( + isolate); + ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>( + isolate); +} + + +void ArrayConstructorStub::Generate(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- eax : argc (only if argument_count_ == ANY) + // -- ebx : type info cell + // -- edi : constructor + // -- esp[0] : return address + // -- esp[4] : last argument + // ----------------------------------- + Handle<Object> undefined_sentinel( + masm->isolate()->heap()->undefined_value(), + masm->isolate()); + + if (FLAG_debug_code) { + // The array construct code is only set for the global and natives + // builtin Array functions which always have maps. + + // Initial map for the builtin Array function should be a map. + __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset)); + // Will both indicate a NULL and a Smi. + __ test(ecx, Immediate(kSmiTagMask)); + __ Assert(not_zero, "Unexpected initial map for Array function"); + __ CmpObjectType(ecx, MAP_TYPE, ecx); + __ Assert(equal, "Unexpected initial map for Array function"); + + // We should either have undefined in ebx or a valid jsglobalpropertycell + Label okay_here; + Handle<Map> global_property_cell_map( + masm->isolate()->heap()->global_property_cell_map()); + __ cmp(ebx, Immediate(undefined_sentinel)); + __ j(equal, &okay_here); + __ cmp(FieldOperand(ebx, 0), Immediate(global_property_cell_map)); + __ Assert(equal, "Expected property cell in register ebx"); + __ bind(&okay_here); + } + + if (FLAG_optimize_constructed_arrays) { + Label no_info, switch_ready; + // Get the elements kind and case on that. + __ cmp(ebx, Immediate(undefined_sentinel)); + __ j(equal, &no_info); + __ mov(edx, FieldOperand(ebx, kPointerSize)); + + // There is no info if the call site went megamorphic either + + // TODO(mvstanton): Really? I thought if it was the array function that + // the cell wouldn't get stamped as megamorphic. + __ cmp(edx, Immediate(TypeFeedbackCells::MegamorphicSentinel( + masm->isolate()))); + __ j(equal, &no_info); + __ SmiUntag(edx); + __ jmp(&switch_ready); + __ bind(&no_info); + __ mov(edx, Immediate(GetInitialFastElementsKind())); + __ bind(&switch_ready); + + if (argument_count_ == ANY) { + Label not_zero_case, not_one_case; + __ test(eax, eax); + __ j(not_zero, ¬_zero_case); + CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm); + + __ bind(¬_zero_case); + __ cmp(eax, 1); + __ j(greater, ¬_one_case); + CreateArrayDispatchOneArgument(masm); + + __ bind(¬_one_case); + CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm); + } else if (argument_count_ == NONE) { + CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm); + } else if (argument_count_ == ONE) { + CreateArrayDispatchOneArgument(masm); + } else if (argument_count_ == MORE_THAN_ONE) { + CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm); + } else { + UNREACHABLE(); + } + } else { + Label generic_constructor; + // Run the native code for the Array function called as constructor. + ArrayNativeCode(masm, true, &generic_constructor); + + // Jump to the generic construct code in case the specialized code cannot + // handle the construction. + __ bind(&generic_constructor); + Handle<Code> generic_construct_stub = + masm->isolate()->builtins()->JSConstructStubGeneric(); + __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET); + } +} + + #undef __ } } // namespace v8::internal diff --git a/deps/v8/src/ia32/code-stubs-ia32.h b/deps/v8/src/ia32/code-stubs-ia32.h index 07563cd02a..fbf1a68e66 100644 --- a/deps/v8/src/ia32/code-stubs-ia32.h +++ b/deps/v8/src/ia32/code-stubs-ia32.h @@ -36,6 +36,10 @@ namespace v8 { namespace internal { +void ArrayNativeCode(MacroAssembler* masm, + bool construct_call, + Label* call_generic_code); + // Compute a transcendental math function natively, or call the // TranscendentalCache runtime function. class TranscendentalCacheStub: public PlatformCodeStub { diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc index caf808b953..7663c6a7fd 100644 --- a/deps/v8/src/ia32/codegen-ia32.cc +++ b/deps/v8/src/ia32/codegen-ia32.cc @@ -635,6 +635,8 @@ OS::MemMoveFunction CreateMemMoveFunction() { ASSERT(!RelocInfo::RequiresRelocation(desc)); CPU::FlushICache(buffer, actual_size); OS::ProtectCode(buffer, actual_size); + // TODO(jkummerow): It would be nice to register this code creation event + // with the PROFILE / GDBJIT system. return FUNCTION_CAST<OS::MemMoveFunction>(buffer); } diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc index f0436225c5..f8aff48d13 100644 --- a/deps/v8/src/ia32/deoptimizer-ia32.cc +++ b/deps/v8/src/ia32/deoptimizer-ia32.cc @@ -716,8 +716,6 @@ void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) { void Deoptimizer::EntryGenerator::Generate() { GeneratePrologue(); - Isolate* isolate = masm()->isolate(); - // Save all general purpose registers before messing with them. const int kNumberOfRegisters = Register::kNumRegisters; @@ -762,10 +760,10 @@ void Deoptimizer::EntryGenerator::Generate() { __ mov(Operand(esp, 3 * kPointerSize), ecx); // Code address or 0. __ mov(Operand(esp, 4 * kPointerSize), edx); // Fp-to-sp delta. __ mov(Operand(esp, 5 * kPointerSize), - Immediate(ExternalReference::isolate_address())); + Immediate(ExternalReference::isolate_address(isolate()))); { AllowExternalCallThatCantCauseGC scope(masm()); - __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6); + __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6); } // Preserve deoptimizer object in register eax and get the input @@ -828,7 +826,7 @@ void Deoptimizer::EntryGenerator::Generate() { { AllowExternalCallThatCantCauseGC scope(masm()); __ CallCFunction( - ExternalReference::compute_output_frames_function(isolate), 1); + ExternalReference::compute_output_frames_function(isolate()), 1); } __ pop(eax); diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc index a35f12d8cc..f71a76dd09 100644 --- a/deps/v8/src/ia32/full-codegen-ia32.cc +++ b/deps/v8/src/ia32/full-codegen-ia32.cc @@ -1883,6 +1883,156 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) { } +void FullCodeGenerator::VisitYield(Yield* expr) { + Comment cmnt(masm_, "[ Yield"); + // Evaluate yielded value first; the initial iterator definition depends on + // this. It stays on the stack while we update the iterator. + VisitForStackValue(expr->expression()); + + switch (expr->yield_kind()) { + case Yield::INITIAL: + case Yield::SUSPEND: { + VisitForStackValue(expr->generator_object()); + __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1); + __ mov(context_register(), + Operand(ebp, StandardFrameConstants::kContextOffset)); + + Label resume; + __ CompareRoot(result_register(), Heap::kTheHoleValueRootIndex); + __ j(not_equal, &resume); + __ pop(result_register()); + if (expr->yield_kind() == Yield::SUSPEND) { + // TODO(wingo): Box into { value: VALUE, done: false }. + } + EmitReturnSequence(); + + __ bind(&resume); + context()->Plug(result_register()); + break; + } + + case Yield::FINAL: { + VisitForAccumulatorValue(expr->generator_object()); + __ mov(FieldOperand(result_register(), + JSGeneratorObject::kContinuationOffset), + Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorClosed))); + __ pop(result_register()); + // TODO(wingo): Box into { value: VALUE, done: true }. + + // Exit all nested statements. + NestedStatement* current = nesting_stack_; + int stack_depth = 0; + int context_length = 0; + while (current != NULL) { + current = current->Exit(&stack_depth, &context_length); + } + __ Drop(stack_depth); + EmitReturnSequence(); + break; + } + + case Yield::DELEGATING: + UNIMPLEMENTED(); + } +} + + +void FullCodeGenerator::EmitGeneratorResume(Expression *generator, + Expression *value, + JSGeneratorObject::ResumeMode resume_mode) { + // The value stays in eax, and is ultimately read by the resumed generator, as + // if the CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. ebx + // will hold the generator object until the activation has been resumed. + VisitForStackValue(generator); + VisitForAccumulatorValue(value); + __ pop(ebx); + + // Check generator state. + Label wrong_state, done; + STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting <= 0); + STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed <= 0); + __ cmp(FieldOperand(ebx, JSGeneratorObject::kContinuationOffset), + Immediate(Smi::FromInt(0))); + __ j(less_equal, &wrong_state); + + // Load suspended function and context. + __ mov(esi, FieldOperand(ebx, JSGeneratorObject::kContextOffset)); + __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset)); + + // Push receiver. + __ push(FieldOperand(ebx, JSGeneratorObject::kReceiverOffset)); + + // Push holes for arguments to generator function. + __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); + __ mov(edx, + FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset)); + __ mov(ecx, isolate()->factory()->the_hole_value()); + Label push_argument_holes, push_frame; + __ bind(&push_argument_holes); + __ sub(edx, Immediate(1)); + __ j(carry, &push_frame); + __ push(ecx); + __ jmp(&push_argument_holes); + + // Enter a new JavaScript frame, and initialize its slots as they were when + // the generator was suspended. + Label resume_frame; + __ bind(&push_frame); + __ call(&resume_frame); + __ jmp(&done); + __ bind(&resume_frame); + __ push(ebp); // Caller's frame pointer. + __ mov(ebp, esp); + __ push(esi); // Callee's context. + __ push(edi); // Callee's JS Function. + + // Load the operand stack size. + __ mov(edx, FieldOperand(ebx, JSGeneratorObject::kOperandStackOffset)); + __ mov(edx, FieldOperand(edx, FixedArray::kLengthOffset)); + __ SmiUntag(edx); + + // If we are sending a value and there is no operand stack, we can jump back + // in directly. + if (resume_mode == JSGeneratorObject::SEND) { + Label slow_resume; + __ cmp(edx, Immediate(0)); + __ j(not_zero, &slow_resume); + __ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset)); + __ mov(ecx, FieldOperand(ebx, JSGeneratorObject::kContinuationOffset)); + __ SmiUntag(ecx); + __ add(edx, ecx); + __ mov(FieldOperand(ebx, JSGeneratorObject::kContinuationOffset), + Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting))); + __ jmp(edx); + __ bind(&slow_resume); + } + + // Otherwise, we push holes for the operand stack and call the runtime to fix + // up the stack and the handlers. + Label push_operand_holes, call_resume; + __ bind(&push_operand_holes); + __ sub(edx, Immediate(1)); + __ j(carry, &call_resume); + __ push(ecx); + __ jmp(&push_operand_holes); + __ bind(&call_resume); + __ push(ebx); + __ push(result_register()); + __ Push(Smi::FromInt(resume_mode)); + __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3); + // Not reached: the runtime call returns elsewhere. + __ Abort("Generator failed to resume."); + + // Throw error if we attempt to operate on a running generator. + __ bind(&wrong_state); + __ push(ebx); + __ CallRuntime(Runtime::kThrowGeneratorStateError, 1); + + __ bind(&done); + context()->Plug(result_register()); +} + + void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) { SetSourcePosition(prop->position()); Literal* key = prop->key()->AsLiteral(); @@ -4384,24 +4534,21 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr, VisitForAccumulatorValue(sub_expr); PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); - Handle<Object> nil_value = nil == kNullValue ? - isolate()->factory()->null_value() : - isolate()->factory()->undefined_value(); - __ cmp(eax, nil_value); - if (expr->op() == Token::EQ_STRICT) { + + EqualityKind kind = expr->op() == Token::EQ_STRICT + ? kStrictEquality : kNonStrictEquality; + Handle<Object> nil_value = nil == kNullValue + ? isolate()->factory()->null_value() + : isolate()->factory()->undefined_value(); + if (kind == kStrictEquality) { + __ cmp(eax, nil_value); Split(equal, if_true, if_false, fall_through); } else { - Handle<Object> other_nil_value = nil == kNullValue ? - isolate()->factory()->undefined_value() : - isolate()->factory()->null_value(); - __ j(equal, if_true); - __ cmp(eax, other_nil_value); - __ j(equal, if_true); - __ JumpIfSmi(eax, if_false); - // It can be an undetectable object. - __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset)); - __ movzx_b(edx, FieldOperand(edx, Map::kBitFieldOffset)); - __ test(edx, Immediate(1 << Map::kIsUndetectable)); + Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), + kNonStrictEquality, + nil); + CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId()); + __ test(eax, eax); Split(not_zero, if_true, if_false, fall_through); } context()->Plug(if_true, if_false); diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc index 665c85e827..d93c27ad28 100644 --- a/deps/v8/src/ia32/lithium-codegen-ia32.cc +++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc @@ -336,49 +336,28 @@ bool LCodeGen::GenerateBody() { !is_aborted() && current_instruction_ < instructions_->length(); current_instruction_++) { LInstruction* instr = instructions_->at(current_instruction_); + + // Don't emit code for basic blocks with a replacement. if (instr->IsLabel()) { - LLabel* label = LLabel::cast(instr); - emit_instructions = !label->HasReplacement(); + emit_instructions = !LLabel::cast(instr)->HasReplacement(); } + if (!emit_instructions) continue; - if (emit_instructions) { - if (FLAG_code_comments) { - HValue* hydrogen = instr->hydrogen_value(); - if (hydrogen != NULL) { - if (hydrogen->IsChange()) { - HValue* changed_value = HChange::cast(hydrogen)->value(); - int use_id = 0; - const char* use_mnemo = "dead"; - if (hydrogen->UseCount() >= 1) { - HValue* use_value = hydrogen->uses().value(); - use_id = use_value->id(); - use_mnemo = use_value->Mnemonic(); - } - Comment(";;; @%d: %s. <of #%d %s for #%d %s>", - current_instruction_, instr->Mnemonic(), - changed_value->id(), changed_value->Mnemonic(), - use_id, use_mnemo); - } else { - Comment(";;; @%d: %s. <#%d>", current_instruction_, - instr->Mnemonic(), hydrogen->id()); - } - } else { - Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic()); - } - } - - if (!CpuFeatures::IsSupported(SSE2)) { - FlushX87StackIfNecessary(instr); - } + if (FLAG_code_comments && instr->HasInterestingComment(this)) { + Comment(";;; <@%d,#%d> %s", + current_instruction_, + instr->hydrogen_value()->id(), + instr->Mnemonic()); + } - instr->CompileToNative(this); + if (!CpuFeatures::IsSupported(SSE2)) FlushX87StackIfNecessary(instr); - if (!CpuFeatures::IsSupported(SSE2)) { - ASSERT(!instr->HasDoubleRegisterResult() || x87_stack_depth_ == 1); + instr->CompileToNative(this); - if (FLAG_debug_code && FLAG_enable_slow_asserts) { - __ VerifyX87StackDepth(x87_stack_depth_); - } + if (!CpuFeatures::IsSupported(SSE2)) { + ASSERT(!instr->HasDoubleRegisterResult() || x87_stack_depth_ == 1); + if (FLAG_debug_code && FLAG_enable_slow_asserts) { + __ VerifyX87StackDepth(x87_stack_depth_); } } } @@ -390,6 +369,9 @@ bool LCodeGen::GenerateBody() { bool LCodeGen::GenerateJumpTable() { Label needs_frame_not_call; Label needs_frame_is_call; + if (jump_table_.length() > 0) { + Comment(";;; -------------------- Jump table --------------------"); + } for (int i = 0; i < jump_table_.length(); i++) { __ bind(&jump_table_[i].label); Address entry = jump_table_[i].address; @@ -465,11 +447,14 @@ bool LCodeGen::GenerateDeferredCode() { if (deferred_.length() > 0) { for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { LDeferredCode* code = deferred_[i]; + Comment(";;; <@%d,#%d> " + "-------------------- Deferred %s --------------------", + code->instruction_index(), + code->instr()->hydrogen_value()->id(), + code->instr()->Mnemonic()); __ bind(code->entry()); if (NeedsDeferredFrame()) { - Comment(";;; Deferred build frame @%d: %s.", - code->instruction_index(), - code->instr()->Mnemonic()); + Comment(";;; Build frame"); ASSERT(!frame_is_built_); ASSERT(info()->IsStub()); frame_is_built_ = true; @@ -478,15 +463,11 @@ bool LCodeGen::GenerateDeferredCode() { __ push(Operand(ebp, StandardFrameConstants::kContextOffset)); __ push(Immediate(Smi::FromInt(StackFrame::STUB))); __ lea(ebp, Operand(esp, 2 * kPointerSize)); + Comment(";;; Deferred code"); } - Comment(";;; Deferred code @%d: %s.", - code->instruction_index(), - code->instr()->Mnemonic()); code->Generate(); if (NeedsDeferredFrame()) { - Comment(";;; Deferred destroy frame @%d: %s.", - code->instruction_index(), - code->instr()->Mnemonic()); + Comment(";;; Destroy frame"); ASSERT(frame_is_built_); frame_is_built_ = false; __ mov(esp, ebp); @@ -654,7 +635,7 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, pushed_arguments_index, pushed_arguments_count); bool has_closure_id = !info()->closure().is_null() && - *info()->closure() != *environment->closure(); + !info()->closure().is_identical_to(environment->closure()); int closure_id = has_closure_id ? DefineDeoptimizationLiteral(environment->closure()) : Translation::kSelfLiteralId; @@ -1021,10 +1002,13 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { Handle<FixedArray> literals = factory()->NewFixedArray(deoptimization_literals_.length(), TENURED); - for (int i = 0; i < deoptimization_literals_.length(); i++) { - literals->set(i, *deoptimization_literals_[i]); + { ALLOW_HANDLE_DEREF(isolate(), + "copying a ZoneList of handles into a FixedArray"); + for (int i = 0; i < deoptimization_literals_.length(); i++) { + literals->set(i, *deoptimization_literals_[i]); + } + data->SetLiteralArray(*literals); } - data->SetLiteralArray(*literals); data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt())); data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_)); @@ -1125,10 +1109,19 @@ void LCodeGen::RecordPosition(int position) { } +static const char* LabelType(LLabel* label) { + if (label->is_loop_header()) return " (loop header)"; + if (label->is_osr_entry()) return " (OSR entry)"; + return ""; +} + + void LCodeGen::DoLabel(LLabel* label) { - Comment(";;; -------------------- B%d%s --------------------", + Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------", + current_instruction_, + label->hydrogen_value()->id(), label->block_id(), - label->is_loop_header() ? " (loop header)" : ""); + LabelType(label)); __ bind(label->label()); current_block_ = label->block_id(); DoGap(label); @@ -1797,6 +1790,7 @@ void LCodeGen::DoConstantD(LConstantD* instr) { void LCodeGen::DoConstantT(LConstantT* instr) { Register reg = ToRegister(instr->result()); Handle<Object> handle = instr->value(); + ALLOW_HANDLE_DEREF(isolate(), "smi check"); if (handle->IsHeapObject()) { __ LoadHeapObject(reg, Handle<HeapObject>::cast(handle)); } else { @@ -2056,17 +2050,16 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) { } -int LCodeGen::GetNextEmittedBlock(int block) { - for (int i = block + 1; i < graph()->blocks()->length(); ++i) { - LLabel* label = chunk_->GetLabel(i); - if (!label->HasReplacement()) return i; +int LCodeGen::GetNextEmittedBlock() const { + for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) { + if (!chunk_->GetLabel(i)->HasReplacement()) return i; } return -1; } void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) { - int next_block = GetNextEmittedBlock(current_block_); + int next_block = GetNextEmittedBlock(); right_block = chunk_->LookupDestination(right_block); left_block = chunk_->LookupDestination(left_block); @@ -2204,10 +2197,8 @@ void LCodeGen::DoBranch(LBranch* instr) { void LCodeGen::EmitGoto(int block) { - block = chunk_->LookupDestination(block); - int next_block = GetNextEmittedBlock(current_block_); - if (block != next_block) { - __ jmp(chunk_->GetAssemblyLabel(block)); + if (!IsNextEmittedBlock(block)) { + __ jmp(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block))); } } @@ -2786,6 +2777,8 @@ void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) { __ Ret((parameter_count + extra_value_count) * kPointerSize, ecx); } else { Register reg = ToRegister(instr->parameter_count()); + // The argument count parameter is a smi + __ SmiUntag(reg); Register return_addr_reg = reg.is(ecx) ? ebx : ecx; if (dynamic_frame_alignment && FLAG_debug_code) { ASSERT(extra_value_count == 2); @@ -3019,6 +3012,7 @@ void LCodeGen::EmitPushTaggedOperand(LOperand* operand) { ASSERT(!operand->IsDoubleRegister()); if (operand->IsConstantOperand()) { Handle<Object> object = ToHandle(LConstantOperand::cast(operand)); + ALLOW_HANDLE_DEREF(isolate(), "smi check"); if (object->IsSmi()) { __ Push(Handle<Smi>::cast(object)); } else { @@ -3198,13 +3192,21 @@ void LCodeGen::DoLoadExternalArrayPointer( void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { Register arguments = ToRegister(instr->arguments()); - Register length = ToRegister(instr->length()); - Operand index = ToOperand(instr->index()); Register result = ToRegister(instr->result()); - // There are two words between the frame pointer and the last argument. - // Subtracting from length accounts for one of them add one more. - __ sub(length, index); - __ mov(result, Operand(arguments, length, times_4, kPointerSize)); + if (instr->length()->IsConstantOperand() && + instr->index()->IsConstantOperand()) { + int const_index = ToInteger32(LConstantOperand::cast(instr->index())); + int const_length = ToInteger32(LConstantOperand::cast(instr->length())); + int index = (const_length - const_index) + 1; + __ mov(result, Operand(arguments, index * kPointerSize)); + } else { + Register length = ToRegister(instr->length()); + Operand index = ToOperand(instr->index()); + // There are two words between the frame pointer and the last argument. + // Subtracting from length accounts for one of them add one more. + __ sub(length, index); + __ mov(result, Operand(arguments, length, times_4, kPointerSize)); + } } @@ -3595,12 +3597,15 @@ void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) { void LCodeGen::CallKnownFunction(Handle<JSFunction> function, + int formal_parameter_count, int arity, LInstruction* instr, CallKind call_kind, EDIState edi_state) { - bool can_invoke_directly = !function->NeedsArgumentsAdaption() || - function->shared()->formal_parameter_count() == arity; + bool dont_adapt_arguments = + formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel; + bool can_invoke_directly = + dont_adapt_arguments || formal_parameter_count == arity; LPointerMap* pointers = instr->pointer_map(); RecordPosition(pointers->position()); @@ -3615,13 +3620,13 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function, // Set eax to arguments count if adaption is not needed. Assumes that eax // is available to write to at this point. - if (!function->NeedsArgumentsAdaption()) { + if (dont_adapt_arguments) { __ mov(eax, arity); } // Invoke function directly. __ SetCallKind(ecx, call_kind); - if (*function == *info()->closure()) { + if (function.is_identical_to(info()->closure())) { __ CallSelf(); } else { __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset)); @@ -3632,14 +3637,17 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function, SafepointGenerator generator( this, pointers, Safepoint::kLazyDeopt); ParameterCount count(arity); - __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind); + ParameterCount expected(formal_parameter_count); + __ InvokeFunction( + function, expected, count, CALL_FUNCTION, generator, call_kind); } } void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) { ASSERT(ToRegister(instr->result()).is(eax)); - CallKnownFunction(instr->function(), + CallKnownFunction(instr->hydrogen()->function(), + instr->hydrogen()->formal_parameter_count(), instr->arity(), instr, CALL_AS_METHOD, @@ -4101,7 +4109,8 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { ASSERT(ToRegister(instr->function()).is(edi)); ASSERT(instr->HasPointerMap()); - if (instr->known_function().is_null()) { + Handle<JSFunction> known_function = instr->hydrogen()->known_function(); + if (known_function.is_null()) { LPointerMap* pointers = instr->pointer_map(); RecordPosition(pointers->position()); SafepointGenerator generator( @@ -4109,7 +4118,8 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { ParameterCount count(instr->arity()); __ InvokeFunction(edi, count, CALL_FUNCTION, generator, CALL_AS_METHOD); } else { - CallKnownFunction(instr->known_function(), + CallKnownFunction(known_function, + instr->hydrogen()->formal_parameter_count(), instr->arity(), instr, CALL_AS_METHOD, @@ -4169,7 +4179,8 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) { void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) { ASSERT(ToRegister(instr->result()).is(eax)); - CallKnownFunction(instr->target(), + CallKnownFunction(instr->hydrogen()->target(), + instr->hydrogen()->formal_parameter_count(), instr->arity(), instr, CALL_AS_FUNCTION, @@ -4200,11 +4211,20 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) { ASSERT(ToRegister(instr->result()).is(eax)); ASSERT(FLAG_optimize_constructed_arrays); - __ mov(ebx, instr->hydrogen()->property_cell()); - Handle<Code> array_construct_code = - isolate()->builtins()->ArrayConstructCode(); __ Set(eax, Immediate(instr->arity())); - CallCode(array_construct_code, RelocInfo::CONSTRUCT_CALL, instr); + __ mov(ebx, instr->hydrogen()->property_cell()); + Object* cell_value = instr->hydrogen()->property_cell()->value(); + ElementsKind kind = static_cast<ElementsKind>(Smi::cast(cell_value)->value()); + if (instr->arity() == 0) { + ArrayNoArgumentConstructorStub stub(kind); + CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + } else if (instr->arity() == 1) { + ArraySingleArgumentConstructorStub stub(kind); + CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + } else { + ArrayNArgumentsConstructorStub stub(kind); + CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + } } @@ -4222,7 +4242,6 @@ void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) { void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { Register object = ToRegister(instr->object()); - Register value = ToRegister(instr->value()); int offset = instr->offset(); if (!instr->transition().is_null()) { @@ -4248,34 +4267,42 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { HType type = instr->hydrogen()->value()->type(); SmiCheck check_needed = type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; - if (instr->is_in_object()) { - __ mov(FieldOperand(object, offset), value); - if (instr->hydrogen()->NeedsWriteBarrier()) { - Register temp = ToRegister(instr->temp()); - // Update the write barrier for the object for in-object properties. - __ RecordWriteField(object, - offset, - value, - temp, - GetSaveFPRegsMode(), - EMIT_REMEMBERED_SET, - check_needed); + + Register write_register = object; + if (!instr->is_in_object()) { + write_register = ToRegister(instr->temp()); + __ mov(write_register, + FieldOperand(object, JSObject::kPropertiesOffset)); + } + + if (instr->value()->IsConstantOperand()) { + LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); + if (IsInteger32(operand_value)) { + // In lithium register preparation, we made sure that the constant integer + // operand fits into smi range. + Smi* smi_value = Smi::FromInt(ToInteger32(operand_value)); + __ mov(FieldOperand(write_register, offset), Immediate(smi_value)); + } else if (operand_value->IsRegister()) { + __ mov(FieldOperand(write_register, offset), ToRegister(operand_value)); + } else { + Handle<Object> handle_value = ToHandle(operand_value); + __ mov(FieldOperand(write_register, offset), handle_value); } } else { - Register temp = ToRegister(instr->temp()); - __ mov(temp, FieldOperand(object, JSObject::kPropertiesOffset)); - __ mov(FieldOperand(temp, offset), value); - if (instr->hydrogen()->NeedsWriteBarrier()) { - // Update the write barrier for the properties array. - // object is used as a scratch register. - __ RecordWriteField(temp, - offset, - value, - object, - GetSaveFPRegsMode(), - EMIT_REMEMBERED_SET, - check_needed); - } + __ mov(FieldOperand(write_register, offset), ToRegister(instr->value())); + } + + if (instr->hydrogen()->NeedsWriteBarrier()) { + Register value = ToRegister(instr->value()); + Register temp = instr->is_in_object() ? ToRegister(instr->temp()) : object; + // Update the write barrier for the object for in-object properties. + __ RecordWriteField(write_register, + offset, + value, + temp, + GetSaveFPRegsMode(), + EMIT_REMEMBERED_SET, + check_needed); } } @@ -4453,7 +4480,6 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { - Register value = ToRegister(instr->value()); Register elements = ToRegister(instr->elements()); Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; @@ -4464,9 +4490,22 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { FAST_ELEMENTS, FixedArray::kHeaderSize - kHeapObjectTag, instr->additional_index()); - __ mov(operand, value); + if (instr->value()->IsRegister()) { + __ mov(operand, ToRegister(instr->value())); + } else { + LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); + if (IsInteger32(operand_value)) { + Smi* smi_value = Smi::FromInt(ToInteger32(operand_value)); + __ mov(operand, Immediate(smi_value)); + } else { + Handle<Object> handle_value = ToHandle(operand_value); + __ mov(operand, handle_value); + } + } if (instr->hydrogen()->NeedsWriteBarrier()) { + ASSERT(instr->value()->IsRegister()); + Register value = ToRegister(instr->value()); ASSERT(!instr->key()->IsConstantOperand()); HType type = instr->hydrogen()->value()->type(); SmiCheck check_needed = @@ -5876,16 +5915,12 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) { Register result = ToRegister(instr->result()); Register scratch = ToRegister(instr->temp()); Handle<JSFunction> constructor = instr->hydrogen()->constructor(); - Handle<Map> initial_map(constructor->initial_map()); + Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map(); int instance_size = initial_map->instance_size(); ASSERT(initial_map->pre_allocated_property_fields() + initial_map->unused_property_fields() - initial_map->inobject_properties() == 0); - // Allocate memory for the object. The initial map might change when - // the constructor's prototype changes, but instance size and property - // counts remain unchanged (if slack tracking finished). - ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress()); __ Allocate(instance_size, result, no_reg, scratch, deferred->entry(), TAG_OBJECT); @@ -5936,8 +5971,7 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) { void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) { Register result = ToRegister(instr->result()); - Handle<JSFunction> constructor = instr->hydrogen()->constructor(); - Handle<Map> initial_map(constructor->initial_map()); + Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map(); int instance_size = initial_map->instance_size(); // TODO(3095996): Get rid of this. For now, we need to make the @@ -6016,7 +6050,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) { void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { ASSERT(ToRegister(instr->context()).is(esi)); - Handle<FixedArray> literals(instr->environment()->closure()->literals()); + Handle<FixedArray> literals = instr->hydrogen()->literals(); ElementsKind boilerplate_elements_kind = instr->hydrogen()->boilerplate_elements_kind(); AllocationSiteMode allocation_site_mode = @@ -6077,7 +6111,7 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) { ASSERT(ToRegister(instr->context()).is(esi)); - Handle<FixedArray> literals(instr->environment()->closure()->literals()); + Handle<FixedArray> literals = instr->hydrogen()->literals(); Handle<FixedArray> constant_properties = instr->hydrogen()->constant_properties(); @@ -6090,7 +6124,7 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) { // Set up the parameters to the stub/runtime call and pick the right // runtime function or stub to call. - int properties_count = constant_properties->length() / 2; + int properties_count = instr->hydrogen()->constant_properties_length() / 2; if (instr->hydrogen()->depth() > 1) { __ PushHeapObject(literals); __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index()))); @@ -6178,19 +6212,17 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { ASSERT(ToRegister(instr->context()).is(esi)); // Use the fast case closure allocation code that allocates in new // space for nested functions that don't need literals cloning. - Handle<SharedFunctionInfo> shared_info = instr->shared_info(); bool pretenure = instr->hydrogen()->pretenure(); - if (!pretenure && shared_info->num_literals() == 0) { - FastNewClosureStub stub(shared_info->language_mode(), - shared_info->is_generator()); - __ push(Immediate(shared_info)); + if (!pretenure && instr->hydrogen()->has_no_literals()) { + FastNewClosureStub stub(instr->hydrogen()->language_mode(), + instr->hydrogen()->is_generator()); + __ push(Immediate(instr->hydrogen()->shared_info())); CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } else { __ push(esi); - __ push(Immediate(shared_info)); - __ push(Immediate(pretenure - ? factory()->true_value() - : factory()->false_value())); + __ push(Immediate(instr->hydrogen()->shared_info())); + __ push(Immediate(pretenure ? factory()->true_value() + : factory()->false_value())); CallRuntime(Runtime::kNewClosure, 3, instr); } } diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.h b/deps/v8/src/ia32/lithium-codegen-ia32.h index b268456b7d..5b44d87b08 100644 --- a/deps/v8/src/ia32/lithium-codegen-ia32.h +++ b/deps/v8/src/ia32/lithium-codegen-ia32.h @@ -84,10 +84,20 @@ class LCodeGen BASE_EMBEDDED { Heap* heap() const { return isolate()->heap(); } Zone* zone() const { return zone_; } + // TODO(svenpanne) Use this consistently. + int LookupDestination(int block_id) const { + return chunk()->LookupDestination(block_id); + } + + bool IsNextEmittedBlock(int block_id) const { + return LookupDestination(block_id) == GetNextEmittedBlock(); + } + bool NeedsEagerFrame() const { return GetStackSlotCount() > 0 || info()->is_non_deferred_calling() || - !info()->IsStub(); + !info()->IsStub() || + info()->requires_frame(); } bool NeedsDeferredFrame() const { return !NeedsEagerFrame() && info()->is_deferred_calling(); @@ -188,9 +198,9 @@ class LCodeGen BASE_EMBEDDED { LPlatformChunk* chunk() const { return chunk_; } Scope* scope() const { return scope_; } - HGraph* graph() const { return chunk_->graph(); } + HGraph* graph() const { return chunk()->graph(); } - int GetNextEmittedBlock(int block); + int GetNextEmittedBlock() const; void EmitClassOfTest(Label* if_true, Label* if_false, @@ -254,6 +264,7 @@ class LCodeGen BASE_EMBEDDED { // Generate a direct call to a known function. Expects the function // to be in edi. void CallKnownFunction(Handle<JSFunction> function, + int formal_parameter_count, int arity, LInstruction* instr, CallKind call_kind, diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc index c4a8be1bcc..c023fd15b6 100644 --- a/deps/v8/src/ia32/lithium-ia32.cc +++ b/deps/v8/src/ia32/lithium-ia32.cc @@ -99,7 +99,7 @@ bool LInstruction::HasDoubleRegisterResult() { bool LInstruction::HasDoubleRegisterInput() { for (int i = 0; i < InputCount(); i++) { LOperand* op = InputAt(i); - if (op->IsDoubleRegister()) { + if (op != NULL && op->IsDoubleRegister()) { return true; } } @@ -210,6 +210,11 @@ const char* LArithmeticT::Mnemonic() const { } +bool LGoto::HasInterestingComment(LCodeGen* gen) const { + return !gen->IsNextEmittedBlock(block_id()); +} + + void LGoto::PrintDataTo(StringStream* stream) { stream->Add("B%d", block_id()); } @@ -1056,11 +1061,13 @@ LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) { LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) { + info()->MarkAsRequiresFrame(); return DefineAsRegister(new(zone()) LArgumentsLength(Use(length->value()))); } LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) { + info()->MarkAsRequiresFrame(); return DefineAsRegister(new(zone()) LArgumentsElements); } @@ -2280,6 +2287,19 @@ LOperand* LChunkBuilder::GetStoreKeyedValueOperand(HStoreKeyed* instr) { } +// DoStoreKeyed and DoStoreNamedField have special considerations for allowing +// use of a constant instead of a register. +static bool StoreConstantValueAllowed(HValue* value) { + if (value->IsConstant()) { + HConstant* constant_value = HConstant::cast(value); + return constant_value->HasSmiValue() + || constant_value->HasDoubleValue() + || constant_value->ImmortalImmovable(); + } + return false; +} + + LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) { if (!instr->is_external()) { ASSERT(instr->elements()->representation().IsTagged()); @@ -2295,19 +2315,30 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) { val = UseX87TopOfStack(instr->value()); } LOperand* key = UseRegisterOrConstantAtStart(instr->key()); - return new(zone()) LStoreKeyed(object, key, val); } else { ASSERT(instr->value()->representation().IsTagged()); bool needs_write_barrier = instr->NeedsWriteBarrier(); LOperand* obj = UseRegister(instr->elements()); - LOperand* val = needs_write_barrier - ? UseTempRegister(instr->value()) - : UseRegisterAtStart(instr->value()); - LOperand* key = needs_write_barrier - ? UseTempRegister(instr->key()) - : UseRegisterOrConstantAtStart(instr->key()); + LOperand* val; + LOperand* key; + if (needs_write_barrier) { + val = UseTempRegister(instr->value()); + key = UseTempRegister(instr->key()); + } else { + if (StoreConstantValueAllowed(instr->value())) { + val = UseRegisterOrConstantAtStart(instr->value()); + } else { + val = UseRegisterAtStart(instr->value()); + } + + if (StoreConstantValueAllowed(instr->key())) { + key = UseRegisterOrConstantAtStart(instr->key()); + } else { + key = UseRegisterAtStart(instr->key()); + } + } return new(zone()) LStoreKeyed(obj, key, val); } } @@ -2407,9 +2438,14 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) { : UseRegisterAtStart(instr->object()); } - LOperand* val = needs_write_barrier - ? UseTempRegister(instr->value()) - : UseRegister(instr->value()); + LOperand* val; + if (needs_write_barrier) { + val = UseTempRegister(instr->value()); + } else if (StoreConstantValueAllowed(instr->value())) { + val = UseRegisterOrConstant(instr->value()); + } else { + val = UseRegister(instr->value()); + } // We only need a scratch register if we have a write barrier or we // have a store into the properties array (not in-object-property). @@ -2480,6 +2516,7 @@ LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) { LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) { info()->MarkAsDeferredCalling(); LOperand* context = UseAny(instr->context()); + // TODO(mvstanton): why can't size be a constant if possible? LOperand* size = UseTempRegister(instr->size()); LOperand* temp = TempRegister(); LAllocate* result = new(zone()) LAllocate(context, size, temp); @@ -2541,7 +2578,8 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) { ASSERT(info()->IsStub()); CodeStubInterfaceDescriptor* descriptor = info()->code_stub()->GetInterfaceDescriptor(info()->isolate()); - Register reg = descriptor->register_params_[instr->index()]; + int index = static_cast<int>(instr->index()); + Register reg = DESCRIPTOR_GET_PARAMETER_REGISTER(descriptor, index); return DefineFixed(result, reg); } } @@ -2575,9 +2613,17 @@ LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) { LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) { + info()->MarkAsRequiresFrame(); LOperand* args = UseRegister(instr->arguments()); - LOperand* length = UseTempRegister(instr->length()); - LOperand* index = Use(instr->index()); + LOperand* length; + LOperand* index; + if (instr->length()->IsConstant() && instr->index()->IsConstant()) { + length = UseRegisterOrConstant(instr->length()); + index = UseOrConstant(instr->index()); + } else { + length = UseTempRegister(instr->length()); + index = Use(instr->index()); + } return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index)); } diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h index 10272fd425..49462cb88f 100644 --- a/deps/v8/src/ia32/lithium-ia32.h +++ b/deps/v8/src/ia32/lithium-ia32.h @@ -278,6 +278,8 @@ class LInstruction: public ZoneObject { LOperand* FirstInput() { return InputAt(0); } LOperand* Output() { return HasResult() ? result() : NULL; } + virtual bool HasInterestingComment(LCodeGen* gen) const { return true; } + #ifdef DEBUG void VerifyCall(); #endif @@ -378,6 +380,10 @@ class LInstructionGap: public LGap { explicit LInstructionGap(HBasicBlock* block) : LGap(block) { } virtual bool ClobbersDoubleRegisters() const { return false; } + virtual bool HasInterestingComment(LCodeGen* gen) const { + return !IsRedundant(); + } + DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap") }; @@ -386,6 +392,7 @@ class LGoto: public LTemplateInstruction<0, 0, 0> { public: explicit LGoto(int block_id) : block_id_(block_id) { } + virtual bool HasInterestingComment(LCodeGen* gen) const; DECLARE_CONCRETE_INSTRUCTION(Goto, "goto") virtual void PrintDataTo(StringStream* stream); virtual bool IsControl() const { return true; } @@ -423,12 +430,14 @@ class LLabel: public LGap { explicit LLabel(HBasicBlock* block) : LGap(block), replacement_(NULL) { } + virtual bool HasInterestingComment(LCodeGen* gen) const { return false; } DECLARE_CONCRETE_INSTRUCTION(Label, "label") virtual void PrintDataTo(StringStream* stream); int block_id() const { return block()->block_id(); } bool is_loop_header() const { return block()->IsLoopHeader(); } + bool is_osr_entry() const { return block()->is_osr_entry(); } Label* label() { return &label_; } LLabel* replacement() const { return replacement_; } void set_replacement(LLabel* label) { replacement_ = label; } @@ -442,6 +451,7 @@ class LLabel: public LGap { class LParameter: public LTemplateInstruction<1, 0, 0> { public: + virtual bool HasInterestingComment(LCodeGen* gen) const { return false; } DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter") }; @@ -465,6 +475,7 @@ class LCallStub: public LTemplateInstruction<1, 1, 0> { class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> { public: + virtual bool HasInterestingComment(LCodeGen* gen) const { return false; } DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value") }; @@ -1475,6 +1486,7 @@ class LReturn: public LTemplateInstruction<0, 3, 0> { LOperand* parameter_count() { return inputs_[2]; } DECLARE_CONCRETE_INSTRUCTION(Return, "return") + DECLARE_HYDROGEN_ACCESSOR(Return) }; @@ -1853,7 +1865,6 @@ class LInvokeFunction: public LTemplateInstruction<1, 2, 0> { virtual void PrintDataTo(StringStream* stream); int arity() const { return hydrogen()->argument_count() - 1; } - Handle<JSFunction> known_function() { return hydrogen()->known_function(); } }; @@ -1936,7 +1947,6 @@ class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> { virtual void PrintDataTo(StringStream* stream); - Handle<JSFunction> target() const { return hydrogen()->target(); } int arity() const { return hydrogen()->argument_count() - 1; } }; @@ -2604,8 +2614,6 @@ class LFunctionLiteral: public LTemplateInstruction<1, 1, 0> { DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal") DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral) - - Handle<SharedFunctionInfo> shared_info() { return hydrogen()->shared_info(); } }; @@ -2673,6 +2681,7 @@ class LOsrEntry: public LTemplateInstruction<0, 0, 0> { public: LOsrEntry(); + virtual bool HasInterestingComment(LCodeGen* gen) const { return false; } DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry") LOperand** SpilledRegisterArray() { return register_spills_; } diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc index 3228e8370e..733dbdb033 100644 --- a/deps/v8/src/ia32/macro-assembler-ia32.cc +++ b/deps/v8/src/ia32/macro-assembler-ia32.cc @@ -1984,8 +1984,10 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address, if (FLAG_log_timer_events) { FrameScope frame(this, StackFrame::MANUAL); PushSafepointRegisters(); - PrepareCallCFunction(0, eax); - CallCFunction(ExternalReference::log_enter_external_function(isolate()), 0); + PrepareCallCFunction(1, eax); + mov(Operand(esp, 0), + Immediate(ExternalReference::isolate_address(isolate()))); + CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1); PopSafepointRegisters(); } @@ -1995,8 +1997,10 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address, if (FLAG_log_timer_events) { FrameScope frame(this, StackFrame::MANUAL); PushSafepointRegisters(); - PrepareCallCFunction(0, eax); - CallCFunction(ExternalReference::log_leave_external_function(isolate()), 0); + PrepareCallCFunction(1, eax); + mov(Operand(esp, 0), + Immediate(ExternalReference::isolate_address(isolate()))); + CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1); PopSafepointRegisters(); } @@ -2086,7 +2090,8 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address, bind(&delete_allocated_handles); mov(Operand::StaticVariable(limit_address), edi); mov(edi, eax); - mov(Operand(esp, 0), Immediate(ExternalReference::isolate_address())); + mov(Operand(esp, 0), + Immediate(ExternalReference::isolate_address(isolate()))); mov(eax, Immediate(delete_extensions)); call(eax); mov(eax, edi); @@ -2278,6 +2283,7 @@ void MacroAssembler::InvokeFunction(Register fun, void MacroAssembler::InvokeFunction(Handle<JSFunction> function, + const ParameterCount& expected, const ParameterCount& actual, InvokeFlag flag, const CallWrapper& call_wrapper, @@ -2289,7 +2295,6 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function, LoadHeapObject(edi, function); mov(esi, FieldOperand(edi, JSFunction::kContextOffset)); - ParameterCount expected(function->shared()->formal_parameter_count()); // We call indirectly through the code field in the function to // allow recompilation to take effect without changing any of the // call sites. @@ -2480,6 +2485,7 @@ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { void MacroAssembler::LoadHeapObject(Register result, Handle<HeapObject> object) { + ALLOW_HANDLE_DEREF(isolate(), "embedding raw address"); if (isolate()->heap()->InNewSpace(*object)) { Handle<JSGlobalPropertyCell> cell = isolate()->factory()->NewJSGlobalPropertyCell(object); @@ -2491,6 +2497,7 @@ void MacroAssembler::LoadHeapObject(Register result, void MacroAssembler::PushHeapObject(Handle<HeapObject> object) { + ALLOW_HANDLE_DEREF(isolate(), "using raw address"); if (isolate()->heap()->InNewSpace(*object)) { Handle<JSGlobalPropertyCell> cell = isolate()->factory()->NewJSGlobalPropertyCell(object); @@ -2531,10 +2538,7 @@ void MacroAssembler::VerifyX87StackDepth(uint32_t depth) { and_(eax, kTopMask); shr(eax, 11); cmp(eax, Immediate(tos)); - Label all_ok; - j(equal, &all_ok); Check(equal, "Unexpected FPU stack depth after instruction"); - bind(&all_ok); fnclex(); pop(eax); } diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h index 159ae6e6b6..2b7641c9cb 100644 --- a/deps/v8/src/ia32/macro-assembler-ia32.h +++ b/deps/v8/src/ia32/macro-assembler-ia32.h @@ -271,6 +271,7 @@ class MacroAssembler: public Assembler { void PushHeapObject(Handle<HeapObject> object); void LoadObject(Register result, Handle<Object> object) { + ALLOW_HANDLE_DEREF(isolate(), "heap object check"); if (object->IsHeapObject()) { LoadHeapObject(result, Handle<HeapObject>::cast(object)); } else { @@ -320,6 +321,7 @@ class MacroAssembler: public Assembler { CallKind call_kind); void InvokeFunction(Handle<JSFunction> function, + const ParameterCount& expected, const ParameterCount& actual, InvokeFlag flag, const CallWrapper& call_wrapper, diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc index d8f2e8f0e7..d635fe1a8a 100644 --- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc +++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc @@ -401,7 +401,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase( // Set isolate. __ mov(Operand(esp, 3 * kPointerSize), - Immediate(ExternalReference::isolate_address())); + Immediate(ExternalReference::isolate_address(isolate()))); // Set byte_length. __ mov(Operand(esp, 2 * kPointerSize), ebx); // Set byte_offset2. @@ -417,7 +417,7 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase( { AllowExternalCallThatCantCauseGC scope(masm_); ExternalReference compare = - ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate()); + ExternalReference::re_case_insensitive_compare_uc16(isolate()); __ CallCFunction(compare, argument_count); } // Pop original values before reacting on result value. @@ -745,7 +745,7 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) { Label stack_ok; ExternalReference stack_limit = - ExternalReference::address_of_stack_limit(masm_->isolate()); + ExternalReference::address_of_stack_limit(isolate()); __ mov(ecx, esp); __ sub(ecx, Operand::StaticVariable(stack_limit)); // Handle it if the stack pointer is already below the stack limit. @@ -972,12 +972,12 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) { static const int num_arguments = 3; __ PrepareCallCFunction(num_arguments, ebx); __ mov(Operand(esp, 2 * kPointerSize), - Immediate(ExternalReference::isolate_address())); + Immediate(ExternalReference::isolate_address(isolate()))); __ lea(eax, Operand(ebp, kStackHighEnd)); __ mov(Operand(esp, 1 * kPointerSize), eax); __ mov(Operand(esp, 0 * kPointerSize), backtrack_stackpointer()); ExternalReference grow_stack = - ExternalReference::re_grow_stack(masm_->isolate()); + ExternalReference::re_grow_stack(isolate()); __ CallCFunction(grow_stack, num_arguments); // If return NULL, we have failed to grow the stack, and // must exit with a stack-overflow exception. @@ -1002,10 +1002,10 @@ Handle<HeapObject> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) { CodeDesc code_desc; masm_->GetCode(&code_desc); Handle<Code> code = - masm_->isolate()->factory()->NewCode(code_desc, - Code::ComputeFlags(Code::REGEXP), - masm_->CodeObject()); - PROFILE(masm_->isolate(), RegExpCodeCreateEvent(*code, *source)); + isolate()->factory()->NewCode(code_desc, + Code::ComputeFlags(Code::REGEXP), + masm_->CodeObject()); + PROFILE(isolate(), RegExpCodeCreateEvent(*code, *source)); return Handle<HeapObject>::cast(code); } @@ -1161,7 +1161,7 @@ void RegExpMacroAssemblerIA32::CallCheckStackGuardState(Register scratch) { __ lea(eax, Operand(esp, -kPointerSize)); __ mov(Operand(esp, 0 * kPointerSize), eax); ExternalReference check_stack_guard = - ExternalReference::re_check_stack_guard_state(masm_->isolate()); + ExternalReference::re_check_stack_guard_state(isolate()); __ CallCFunction(check_stack_guard, num_arguments); } @@ -1353,7 +1353,7 @@ void RegExpMacroAssemblerIA32::CheckPreemption() { // Check for preemption. Label no_preempt; ExternalReference stack_limit = - ExternalReference::address_of_stack_limit(masm_->isolate()); + ExternalReference::address_of_stack_limit(isolate()); __ cmp(esp, Operand::StaticVariable(stack_limit)); __ j(above, &no_preempt); @@ -1366,7 +1366,7 @@ void RegExpMacroAssemblerIA32::CheckPreemption() { void RegExpMacroAssemblerIA32::CheckStackLimit() { Label no_stack_overflow; ExternalReference stack_limit = - ExternalReference::address_of_regexp_stack_limit(masm_->isolate()); + ExternalReference::address_of_regexp_stack_limit(isolate()); __ cmp(backtrack_stackpointer(), Operand::StaticVariable(stack_limit)); __ j(above, &no_stack_overflow); diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.h b/deps/v8/src/ia32/regexp-macro-assembler-ia32.h index 7aea385863..6040d8058a 100644 --- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.h +++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.h @@ -30,6 +30,7 @@ #include "ia32/assembler-ia32.h" #include "ia32/assembler-ia32-inl.h" +#include "macro-assembler.h" namespace v8 { namespace internal { @@ -196,6 +197,8 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler { // (ecx) and increments it by a word size. inline void Pop(Register target); + Isolate* isolate() const { return masm_->isolate(); } + MacroAssembler* masm_; // Which mode to generate code for (ASCII or UC16). diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc index cb3c68ea8e..a44beec299 100644 --- a/deps/v8/src/ia32/stub-cache-ia32.cc +++ b/deps/v8/src/ia32/stub-cache-ia32.cc @@ -635,7 +635,9 @@ class CallInterceptorCompiler BASE_EMBEDDED { CallKind call_kind = CallICBase::Contextual::decode(extra_state_) ? CALL_AS_FUNCTION : CALL_AS_METHOD; - __ InvokeFunction(optimization.constant_function(), arguments_, + Handle<JSFunction> function = optimization.constant_function(); + ParameterCount expected(function); + __ InvokeFunction(function, expected, arguments_, JUMP_FUNCTION, NullCallWrapper(), call_kind); } @@ -2056,8 +2058,9 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall( CallKind call_kind = CallICBase::Contextual::decode(extra_state_) ? CALL_AS_FUNCTION : CALL_AS_METHOD; - __ InvokeFunction(function, arguments(), JUMP_FUNCTION, - NullCallWrapper(), call_kind); + ParameterCount expected(function); + __ InvokeFunction(function, expected, arguments(), + JUMP_FUNCTION, NullCallWrapper(), call_kind); __ bind(&miss); // ecx: function name. @@ -2186,8 +2189,9 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall( // Tail call the full function. We do not have to patch the receiver // because the function makes no use of it. __ bind(&slow); - __ InvokeFunction(function, arguments(), JUMP_FUNCTION, - NullCallWrapper(), CALL_AS_METHOD); + ParameterCount expected(function); + __ InvokeFunction(function, expected, arguments(), + JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); __ bind(&miss); // ecx: function name. @@ -2291,8 +2295,9 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall( // Tail call the full function. We do not have to patch the receiver // because the function makes no use of it. __ bind(&slow); - __ InvokeFunction(function, arguments(), JUMP_FUNCTION, - NullCallWrapper(), CALL_AS_METHOD); + ParameterCount expected(function); + __ InvokeFunction(function, expected, arguments(), + JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); __ bind(&miss); // ecx: function name. @@ -2474,8 +2479,9 @@ void CallStubCompiler::CompileHandlerBackend(Handle<JSFunction> function) { CallKind call_kind = CallICBase::Contextual::decode(extra_state_) ? CALL_AS_FUNCTION : CALL_AS_METHOD; - __ InvokeFunction(function, arguments(), JUMP_FUNCTION, - NullCallWrapper(), call_kind); + ParameterCount expected(function); + __ InvokeFunction(function, expected, arguments(), + JUMP_FUNCTION, NullCallWrapper(), call_kind); } @@ -2687,8 +2693,9 @@ void StoreStubCompiler::GenerateStoreViaSetter( __ push(edx); __ push(eax); ParameterCount actual(1); - __ InvokeFunction(setter, actual, CALL_FUNCTION, NullCallWrapper(), - CALL_AS_METHOD); + ParameterCount expected(setter); + __ InvokeFunction(setter, expected, actual, + CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); } else { // If we generate a global code snippet for deoptimization only, remember // the place to continue after deoptimization. @@ -2900,8 +2907,9 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm, // Call the JavaScript getter with the receiver on the stack. __ push(edx); ParameterCount actual(0); - __ InvokeFunction(getter, actual, CALL_FUNCTION, NullCallWrapper(), - CALL_AS_METHOD); + ParameterCount expected(getter); + __ InvokeFunction(getter, expected, actual, + CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); } else { // If we generate a global code snippet for deoptimization only, remember // the place to continue after deoptimization. diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc index 78fb29753c..40676abc3d 100644 --- a/deps/v8/src/ic.cc +++ b/deps/v8/src/ic.cc @@ -347,6 +347,7 @@ void IC::Clear(Address address) { case Code::CALL_IC: return CallIC::Clear(address, target); case Code::KEYED_CALL_IC: return KeyedCallIC::Clear(address, target); case Code::COMPARE_IC: return CompareIC::Clear(address, target); + case Code::COMPARE_NIL_IC: return CompareNilIC::Clear(address, target); case Code::UNARY_OP_IC: case Code::BINARY_OP_IC: case Code::TO_BOOLEAN_IC: @@ -877,7 +878,7 @@ MaybeObject* LoadIC::Load(State state, if (FLAG_trace_ic) PrintF("[LoadIC : +#prototype /function]\n"); #endif } - return Accessors::FunctionGetPrototype(*object, 0); + return *Accessors::FunctionGetPrototype(object); } } @@ -887,7 +888,7 @@ MaybeObject* LoadIC::Load(State state, if (kind() == Code::KEYED_LOAD_IC && name->AsArrayIndex(&index)) { // Rewrite to the generic keyed load stub. if (FLAG_use_ic) set_target(*generic_stub()); - return Runtime::GetElementOrCharAt(isolate(), object, index); + return Runtime::GetElementOrCharAtOrFail(isolate(), object, index); } // Named lookup in the object. @@ -922,7 +923,7 @@ MaybeObject* LoadIC::Load(State state, } // Get the property. - return object->GetProperty(*object, &lookup, *name, &attr); + return Object::GetPropertyOrFail(object, object, &lookup, name, &attr); } @@ -1260,7 +1261,7 @@ static Handle<Object> TryConvertKey(Handle<Object> key, Isolate* isolate) { // non-smi keys of keyed loads/stores to a smi or a string. if (key->IsHeapNumber()) { double value = Handle<HeapNumber>::cast(key)->value(); - if (isnan(value)) { + if (std::isnan(value)) { key = isolate->factory()->nan_string(); } else { int int_value = FastD2I(value); @@ -1476,8 +1477,8 @@ MaybeObject* StoreIC::Store(State state, JSReceiver::StoreFromKeyed store_mode) { // Handle proxies. if (object->IsJSProxy()) { - return JSProxy::cast(*object)-> - SetProperty(*name, *value, NONE, strict_mode); + return JSReceiver::SetPropertyOrFail( + Handle<JSReceiver>::cast(object), name, value, NONE, strict_mode); } // If the object is undefined or null it's illegal to try to set any @@ -1509,7 +1510,8 @@ MaybeObject* StoreIC::Store(State state, // Observed objects are always modified through the runtime. if (FLAG_harmony_observation && receiver->map()->is_observed()) { - return receiver->SetProperty(*name, *value, NONE, strict_mode, store_mode); + return JSReceiver::SetPropertyOrFail( + receiver, name, value, NONE, strict_mode, store_mode); } // Use specialized code for setting the length of arrays with fast @@ -1524,7 +1526,8 @@ MaybeObject* StoreIC::Store(State state, StoreArrayLengthStub(kind(), strict_mode).GetCode(isolate()); set_target(*stub); TRACE_IC("StoreIC", name, state, *stub); - return receiver->SetProperty(*name, *value, NONE, strict_mode, store_mode); + return JSReceiver::SetPropertyOrFail( + receiver, name, value, NONE, strict_mode, store_mode); } if (receiver->IsJSGlobalProxy()) { @@ -1537,7 +1540,8 @@ MaybeObject* StoreIC::Store(State state, set_target(*stub); TRACE_IC("StoreIC", name, state, *stub); } - return receiver->SetProperty(*name, *value, NONE, strict_mode, store_mode); + return JSReceiver::SetPropertyOrFail( + receiver, name, value, NONE, strict_mode, store_mode); } LookupResult lookup(isolate()); @@ -1553,7 +1557,8 @@ MaybeObject* StoreIC::Store(State state, } // Set the property. - return receiver->SetProperty(*name, *value, NONE, strict_mode, store_mode); + return JSReceiver::SetPropertyOrFail( + receiver, name, value, NONE, strict_mode, store_mode); } @@ -2766,6 +2771,100 @@ RUNTIME_FUNCTION(Code*, CompareIC_Miss) { } +Code* CompareNilIC::GetRawUninitialized(EqualityKind kind, + NilValue nil) { + CompareNilICStub stub(kind, nil); + Code* code = NULL; + CHECK(stub.FindCodeInCache(&code, Isolate::Current())); + return code; +} + + +void CompareNilIC::Clear(Address address, Code* target) { + if (target->ic_state() == UNINITIALIZED) return; + Code::ExtraICState state = target->extended_extra_ic_state(); + + EqualityKind kind = + CompareNilICStub::EqualityKindFromExtraICState(state); + NilValue nil = + CompareNilICStub::NilValueFromExtraICState(state); + + SetTargetAtAddress(address, GetRawUninitialized(kind, nil)); +} + + +MaybeObject* CompareNilIC::DoCompareNilSlow(EqualityKind kind, + NilValue nil, + Handle<Object> object) { + if (kind == kStrictEquality) { + if (nil == kNullValue) { + return Smi::FromInt(object->IsNull()); + } else { + return Smi::FromInt(object->IsUndefined()); + } + } + if (object->IsNull() || object->IsUndefined()) { + return Smi::FromInt(true); + } + return Smi::FromInt(object->IsUndetectableObject()); +} + + +MaybeObject* CompareNilIC::CompareNil(Handle<Object> object) { + Code::ExtraICState extra_ic_state = target()->extended_extra_ic_state(); + + // Extract the current supported types from the patched IC and calculate what + // types must be supported as a result of the miss. + bool already_monomorphic; + CompareNilICStub::Types types = + CompareNilICStub::GetPatchedICFlags(extra_ic_state, + object, &already_monomorphic); + + EqualityKind kind = + CompareNilICStub::EqualityKindFromExtraICState(extra_ic_state); + NilValue nil = + CompareNilICStub::NilValueFromExtraICState(extra_ic_state); + + // Find or create the specialized stub to support the new set of types. + CompareNilICStub stub(kind, nil, types); + Handle<Code> code; + if ((types & CompareNilICStub::kCompareAgainstMonomorphicMap) != 0) { + Handle<Map> monomorphic_map(already_monomorphic + ? target()->FindFirstMap() + : HeapObject::cast(*object)->map()); + code = isolate()->stub_cache()->ComputeCompareNil(monomorphic_map, + nil, + stub.GetTypes()); + } else { + code = stub.GetCode(isolate()); + } + + patch(*code); + + return DoCompareNilSlow(kind, nil, object); +} + + +void CompareNilIC::patch(Code* code) { + set_target(code); +} + + +RUNTIME_FUNCTION(MaybeObject*, CompareNilIC_Miss) { + HandleScope scope(isolate); + Handle<Object> object = args.at<Object>(0); + CompareNilIC ic(isolate); + return ic.CompareNil(object); +} + + +RUNTIME_FUNCTION(MaybeObject*, Unreachable) { + UNREACHABLE(); + CHECK(false); + return isolate->heap()->undefined_value(); +} + + RUNTIME_FUNCTION(MaybeObject*, ToBoolean_Patch) { ASSERT(args.length() == 3); diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h index 55b5661929..4bf259a2f4 100644 --- a/deps/v8/src/ic.h +++ b/deps/v8/src/ic.h @@ -59,6 +59,8 @@ namespace internal { ICU(UnaryOp_Patch) \ ICU(BinaryOp_Patch) \ ICU(CompareIC_Miss) \ + ICU(CompareNilIC_Miss) \ + ICU(Unreachable) \ ICU(ToBoolean_Patch) // // IC is the base class for LoadIC, StoreIC, CallIC, KeyedLoadIC, @@ -775,6 +777,26 @@ class CompareIC: public IC { }; +class CompareNilIC: public IC { + public: + explicit CompareNilIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) {} + + MUST_USE_RESULT MaybeObject* CompareNil(Handle<Object> object); + + static Handle<Code> GetUninitialized(); + + static Code* GetRawUninitialized(EqualityKind kind, NilValue nil); + + static void Clear(Address address, Code* target); + + void patch(Code* code); + + static MUST_USE_RESULT MaybeObject* DoCompareNilSlow(EqualityKind kind, + NilValue nil, + Handle<Object> object); +}; + + class ToBooleanIC: public IC { public: explicit ToBooleanIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) { } @@ -789,6 +811,8 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check); DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissFromStubFailure); DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissFromStubFailure); +DECLARE_RUNTIME_FUNCTION(MaybeObject*, CompareNilIC_Miss); + } } // namespace v8::internal diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc index ca2c0257f6..79a9020bfc 100644 --- a/deps/v8/src/isolate.cc +++ b/deps/v8/src/isolate.cc @@ -507,6 +507,29 @@ void Isolate::IterateDeferredHandles(ObjectVisitor* visitor) { } +#ifdef DEBUG +bool Isolate::IsDeferredHandle(Object** handle) { + // Each DeferredHandles instance keeps the handles to one job in the + // parallel recompilation queue, containing a list of blocks. Each block + // contains kHandleBlockSize handles except for the first block, which may + // not be fully filled. + // We iterate through all the blocks to see whether the argument handle + // belongs to one of the blocks. If so, it is deferred. + for (DeferredHandles* deferred = deferred_handles_head_; + deferred != NULL; + deferred = deferred->next_) { + List<Object**>* blocks = &deferred->blocks_; + for (int i = 0; i < blocks->length(); i++) { + Object** block_limit = (i == 0) ? deferred->first_block_limit_ + : blocks->at(i) + kHandleBlockSize; + if (blocks->at(i) <= handle && handle < block_limit) return true; + } + } + return false; +} +#endif // DEBUG + + void Isolate::RegisterTryCatchHandler(v8::TryCatch* that) { // The ARM simulator has a separate JS stack. We therefore register // the C++ try catch handler with the simulator and get back an @@ -907,7 +930,7 @@ void Isolate::ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type) { HandleScope scope(this); Handle<JSObject> receiver_handle(receiver); Handle<Object> data(AccessCheckInfo::cast(data_obj)->data(), this); - { VMState state(this, EXTERNAL); + { VMState<EXTERNAL> state(this); thread_local_top()->failed_access_check_callback_( v8::Utils::ToLocal(receiver_handle), type, @@ -986,7 +1009,7 @@ bool Isolate::MayNamedAccess(JSObject* receiver, Object* key, bool result = false; { // Leaving JavaScript. - VMState state(this, EXTERNAL); + VMState<EXTERNAL> state(this); result = callback(v8::Utils::ToLocal(receiver_handle), v8::Utils::ToLocal(key_handle), type, @@ -1028,7 +1051,7 @@ bool Isolate::MayIndexedAccess(JSObject* receiver, bool result = false; { // Leaving JavaScript. - VMState state(this, EXTERNAL); + VMState<EXTERNAL> state(this); result = callback(v8::Utils::ToLocal(receiver_handle), index, type, @@ -1060,7 +1083,7 @@ Failure* Isolate::StackOverflow() { GetProperty(Handle<JSObject>::cast(error), "stackTraceLimit"); if (!stack_trace_limit->IsNumber()) return Failure::Exception(); double dlimit = stack_trace_limit->Number(); - int limit = isnan(dlimit) ? 0 : static_cast<int>(dlimit); + int limit = std::isnan(dlimit) ? 0 : static_cast<int>(dlimit); Handle<JSArray> stack_trace = CaptureSimpleStackTrace( exception, factory()->undefined_value(), limit); @@ -1077,6 +1100,23 @@ Failure* Isolate::TerminateExecution() { } +void Isolate::CancelTerminateExecution() { + if (try_catch_handler()) { + try_catch_handler()->has_terminated_ = false; + } + if (has_pending_exception() && + pending_exception() == heap_.termination_exception()) { + thread_local_top()->external_caught_exception_ = false; + clear_pending_exception(); + } + if (has_scheduled_exception() && + scheduled_exception() == heap_.termination_exception()) { + thread_local_top()->external_caught_exception_ = false; + clear_scheduled_exception(); + } +} + + Failure* Isolate::Throw(Object* exception, MessageLocation* location) { DoThrow(exception, location); return Failure::Exception(); @@ -1740,8 +1780,8 @@ Isolate::Isolate() memset(code_kind_statistics_, 0, sizeof(code_kind_statistics_[0]) * Code::NUMBER_OF_KINDS); - allow_compiler_thread_handle_deref_ = true; - allow_execution_thread_handle_deref_ = true; + compiler_thread_handle_deref_state_ = HandleDereferenceGuard::ALLOW; + execution_thread_handle_deref_state_ = HandleDereferenceGuard::ALLOW; #endif #ifdef ENABLE_DEBUGGER_SUPPORT @@ -1990,12 +2030,14 @@ void Isolate::PropagatePendingExceptionToExternalTryCatch() { } else if (thread_local_top_.pending_exception_ == heap()->termination_exception()) { try_catch_handler()->can_continue_ = false; + try_catch_handler()->has_terminated_ = true; try_catch_handler()->exception_ = heap()->null_value(); } else { // At this point all non-object (failure) exceptions have // been dealt with so this shouldn't fail. ASSERT(!pending_exception()->IsFailure()); try_catch_handler()->can_continue_ = true; + try_catch_handler()->has_terminated_ = false; try_catch_handler()->exception_ = pending_exception(); if (!thread_local_top_.pending_message_obj_->IsTheHole()) { try_catch_handler()->message_ = thread_local_top_.pending_message_obj_; @@ -2009,7 +2051,7 @@ void Isolate::InitializeLoggingAndCounters() { logger_ = new Logger(this); } if (counters_ == NULL) { - counters_ = new Counters; + counters_ = new Counters(this); } } @@ -2074,7 +2116,7 @@ bool Isolate::Init(Deserializer* des) { heap_profiler_ = new HeapProfiler(heap()); // Enable logging before setting up the heap - logger_->SetUp(); + logger_->SetUp(this); // Initialize other runtime facilities #if defined(USE_SIMULATOR) @@ -2201,6 +2243,8 @@ bool Isolate::Init(Deserializer* des) { DONT_TRACK_ALLOCATION_SITE, 0); stub.InitializeInterfaceDescriptor( this, code_stub_interface_descriptor(CodeStub::FastCloneShallowArray)); + CompareNilICStub::InitializeForIsolate(this); + ArrayConstructorStubBase::InstallDescriptors(this); } if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Start(); @@ -2360,27 +2404,28 @@ void Isolate::UnlinkDeferredHandles(DeferredHandles* deferred) { #ifdef DEBUG -bool Isolate::AllowHandleDereference() { - if (allow_execution_thread_handle_deref_ && - allow_compiler_thread_handle_deref_) { +HandleDereferenceGuard::State Isolate::HandleDereferenceGuardState() { + if (execution_thread_handle_deref_state_ == HandleDereferenceGuard::ALLOW && + compiler_thread_handle_deref_state_ == HandleDereferenceGuard::ALLOW) { // Short-cut to avoid polling thread id. - return true; + return HandleDereferenceGuard::ALLOW; } if (FLAG_parallel_recompilation && optimizing_compiler_thread()->IsOptimizerThread()) { - return allow_compiler_thread_handle_deref_; + return compiler_thread_handle_deref_state_; } else { - return allow_execution_thread_handle_deref_; + return execution_thread_handle_deref_state_; } } -void Isolate::SetAllowHandleDereference(bool allow) { +void Isolate::SetHandleDereferenceGuardState( + HandleDereferenceGuard::State state) { if (FLAG_parallel_recompilation && optimizing_compiler_thread()->IsOptimizerThread()) { - allow_compiler_thread_handle_deref_ = allow; + compiler_thread_handle_deref_state_ = state; } else { - allow_execution_thread_handle_deref_ = allow; + execution_thread_handle_deref_state_ = state; } } #endif diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h index 927ad0e0fb..71d86f4ac0 100644 --- a/deps/v8/src/isolate.h +++ b/deps/v8/src/isolate.h @@ -85,7 +85,7 @@ class SweeperThread; class ThreadManager; class ThreadState; class ThreadVisitor; // Defined in v8threads.h -class VMState; +template <StateTag Tag> class VMState; // 'void function pointer', used to roundtrip the // ExternalReference::ExternalReferenceRedirector since we can not include @@ -783,6 +783,7 @@ class Isolate { // Out of resource exception helpers. Failure* StackOverflow(); Failure* TerminateExecution(); + void CancelTerminateExecution(); // Administration void Iterate(ObjectVisitor* v); @@ -990,9 +991,9 @@ class Isolate { int* code_kind_statistics() { return code_kind_statistics_; } - bool AllowHandleDereference(); + HandleDereferenceGuard::State HandleDereferenceGuardState(); - void SetAllowHandleDereference(bool allow); + void SetHandleDereferenceGuardState(HandleDereferenceGuard::State state); #endif #if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \ @@ -1030,7 +1031,7 @@ class Isolate { return thread_local_top_.current_vm_state_; } - void SetCurrentVMState(StateTag state) { + void set_current_vm_state(StateTag state) { thread_local_top_.current_vm_state_ = state; } @@ -1073,6 +1074,10 @@ class Isolate { void LinkDeferredHandles(DeferredHandles* deferred_handles); void UnlinkDeferredHandles(DeferredHandles* deferred_handles); +#ifdef DEBUG + bool IsDeferredHandle(Object** location); +#endif // DEBUG + OptimizingCompilerThread* optimizing_compiler_thread() { return &optimizing_compiler_thread_; } @@ -1291,8 +1296,8 @@ class Isolate { JSObject::SpillInformation js_spill_information_; int code_kind_statistics_[Code::NUMBER_OF_KINDS]; - bool allow_compiler_thread_handle_deref_; - bool allow_execution_thread_handle_deref_; + HandleDereferenceGuard::State compiler_thread_handle_deref_state_; + HandleDereferenceGuard::State execution_thread_handle_deref_state_; #endif #ifdef ENABLE_DEBUGGER_SUPPORT diff --git a/deps/v8/src/json-stringifier.h b/deps/v8/src/json-stringifier.h index 3bab324e14..47a01297a8 100644 --- a/deps/v8/src/json-stringifier.h +++ b/deps/v8/src/json-stringifier.h @@ -522,7 +522,7 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeSmi(Smi* object) { BasicJsonStringifier::Result BasicJsonStringifier::SerializeDouble( double number) { - if (isinf(number) || isnan(number)) { + if (std::isinf(number) || std::isnan(number)) { AppendAscii("null"); return SUCCESS; } diff --git a/deps/v8/src/list-inl.h b/deps/v8/src/list-inl.h index 408859e456..d815a7e227 100644 --- a/deps/v8/src/list-inl.h +++ b/deps/v8/src/list-inl.h @@ -216,7 +216,7 @@ void List<T, P>::Sort(int (*cmp)(const T* x, const T* y)) { template<typename T, class P> void List<T, P>::Sort() { - Sort(PointerValueCompare<T>); + ToVector().Sort(); } diff --git a/deps/v8/src/lithium.cc b/deps/v8/src/lithium.cc index b4a57f520b..10d7f71332 100644 --- a/deps/v8/src/lithium.cc +++ b/deps/v8/src/lithium.cc @@ -177,8 +177,8 @@ void LEnvironment::PrintTo(StringStream* stream) { if (deoptimization_index() != Safepoint::kNoDeoptimizationIndex) { stream->Add("deopt_id=%d|", deoptimization_index()); } - stream->Add("[parameters=%d|", parameter_count()); - stream->Add("[arguments_stack_height=%d|", arguments_stack_height()); + stream->Add("parameters=%d|", parameter_count()); + stream->Add("arguments_stack_height=%d|", arguments_stack_height()); for (int i = 0; i < values_.length(); ++i) { if (i != 0) stream->Add(";"); if (values_[i] == NULL) { @@ -329,7 +329,6 @@ void LChunk::MarkEmptyBlocks() { can_eliminate = false; } } - if (can_eliminate) { label->set_replacement(GetLabel(goto_instr->block_id())); } @@ -341,6 +340,7 @@ void LChunk::MarkEmptyBlocks() { void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) { LInstructionGap* gap = new(graph_->zone()) LInstructionGap(block); + gap->set_hydrogen_value(instr->hydrogen_value()); int index = -1; if (instr->IsControl()) { instructions_.Add(gap, zone()); diff --git a/deps/v8/src/log-utils.cc b/deps/v8/src/log-utils.cc index cef7dbab24..a44dca0765 100644 --- a/deps/v8/src/log-utils.cc +++ b/deps/v8/src/log-utils.cc @@ -79,13 +79,8 @@ void Log::Initialize() { FLAG_prof_auto = false; } - bool open_log_file = FLAG_log || FLAG_log_runtime || FLAG_log_api - || FLAG_log_code || FLAG_log_gc || FLAG_log_handles || FLAG_log_suspect - || FLAG_log_regexp || FLAG_log_state_changes || FLAG_ll_prof - || FLAG_log_internal_timer_events; - // If we're logging anything, we need to open the log file. - if (open_log_file) { + if (Log::InitLogAtStart()) { if (strcmp(FLAG_logfile, "-") == 0) { OpenStdout(); } else if (strcmp(FLAG_logfile, kLogToTemporaryFile) == 0) { diff --git a/deps/v8/src/log-utils.h b/deps/v8/src/log-utils.h index d0cb828983..a1867f2582 100644 --- a/deps/v8/src/log-utils.h +++ b/deps/v8/src/log-utils.h @@ -44,6 +44,12 @@ class Log { // Disables logging, but preserves acquired resources. void stop() { is_stopped_ = true; } + static bool InitLogAtStart() { + return FLAG_log || FLAG_log_runtime || FLAG_log_api + || FLAG_log_code || FLAG_log_gc || FLAG_log_handles || FLAG_log_suspect + || FLAG_log_regexp || FLAG_ll_prof || FLAG_log_internal_timer_events; + } + // Frees all resources acquired in Initialize and Open... functions. // When a temporary file is used for the log, returns its stream descriptor, // leaving the file open. diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc index 55f5637d55..e52d0f3969 100644 --- a/deps/v8/src/log.cc +++ b/deps/v8/src/log.cc @@ -335,15 +335,6 @@ class Logger::NameBuffer { void AppendString(String* str) { if (str == NULL) return; - if (str->HasOnlyAsciiChars()) { - int utf8_length = Min(str->length(), kUtf8BufferSize - utf8_pos_); - String::WriteToFlat(str, - reinterpret_cast<uint8_t*>(utf8_buffer_ + utf8_pos_), - 0, - utf8_length); - utf8_pos_ += utf8_length; - return; - } int uc16_length = Min(str->length(), kUtf16BufferSize); String::WriteToFlat(str, utf16_buffer, 0, uc16_length); int previous = unibrow::Utf16::kNoPreviousCharacter; @@ -658,13 +649,17 @@ void Logger::TimerEvent(StartEnd se, const char* name) { } -void Logger::EnterExternal() { - LOG(ISOLATE, TimerEvent(START, TimerEventScope::v8_external)); +void Logger::EnterExternal(Isolate* isolate) { + LOG(isolate, TimerEvent(START, TimerEventScope::v8_external)); + ASSERT(isolate->current_vm_state() == JS); + isolate->set_current_vm_state(EXTERNAL); } -void Logger::LeaveExternal() { - LOG(ISOLATE, TimerEvent(END, TimerEventScope::v8_external)); +void Logger::LeaveExternal(Isolate* isolate) { + LOG(isolate, TimerEvent(END, TimerEventScope::v8_external)); + ASSERT(isolate->current_vm_state() == EXTERNAL); + isolate->set_current_vm_state(JS); } @@ -1448,7 +1443,13 @@ void Logger::TickEvent(TickSample* sample, bool overflow) { msg.Append(','); msg.AppendAddress(sample->sp); msg.Append(",%ld", static_cast<int>(OS::Ticks() - epoch_)); - msg.AppendAddress(sample->external_callback); + if (sample->has_external_callback) { + msg.Append(",1,"); + msg.AppendAddress(sample->external_callback); + } else { + msg.Append(",0,"); + msg.AppendAddress(sample->tos); + } msg.Append(",%d", static_cast<int>(sample->state)); if (overflow) { msg.Append(",overflow"); @@ -1589,6 +1590,7 @@ void Logger::LogCodeObject(Object* object) { case Code::UNARY_OP_IC: // fall through case Code::BINARY_OP_IC: // fall through case Code::COMPARE_IC: // fall through + case Code::COMPARE_NIL_IC: // fall through case Code::TO_BOOLEAN_IC: // fall through case Code::STUB: description = @@ -1810,7 +1812,7 @@ void Logger::LogAccessorCallbacks() { } -bool Logger::SetUp() { +bool Logger::SetUp(Isolate* isolate) { // Tests and EnsureInitialize() can call this twice in a row. It's harmless. if (is_initialized_) return true; is_initialized_ = true; @@ -1826,23 +1828,13 @@ bool Logger::SetUp() { FLAG_prof_auto = false; } - // TODO(isolates): this assert introduces cyclic dependency (logger - // -> thread local top -> heap -> logger). - // ASSERT(VMState::is_outermost_external()); - log_->Initialize(); if (FLAG_ll_prof) LogCodeInfo(); - Isolate* isolate = Isolate::Current(); ticker_ = new Ticker(isolate, kSamplingIntervalMs); - bool start_logging = FLAG_log || FLAG_log_runtime || FLAG_log_api - || FLAG_log_code || FLAG_log_gc || FLAG_log_handles || FLAG_log_suspect - || FLAG_log_regexp || FLAG_log_state_changes || FLAG_ll_prof - || FLAG_log_internal_timer_events; - - if (start_logging) { + if (Log::InitLogAtStart()) { logging_nesting_ = 1; } diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h index 26833302aa..8db13df805 100644 --- a/deps/v8/src/log.h +++ b/deps/v8/src/log.h @@ -163,7 +163,7 @@ class Logger { #undef DECLARE_ENUM // Acquires resources for logging if the right flags are set. - bool SetUp(); + bool SetUp(Isolate* isolate); // Sets the current code event handler. void SetCodeEventHandler(uint32_t options, @@ -292,8 +292,8 @@ class Logger { void TimerEvent(StartEnd se, const char* name); - static void EnterExternal(); - static void LeaveExternal(); + static void EnterExternal(Isolate* isolate); + static void LeaveExternal(Isolate* isolate); class TimerEventScope { public: @@ -466,7 +466,7 @@ class Logger { friend class LogMessageBuilder; friend class TimeLog; friend class Profiler; - friend class VMState; + template <StateTag Tag> friend class VMState; friend class LoggerTestHelper; diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc index f49179f67e..62dee48472 100644 --- a/deps/v8/src/mark-compact.cc +++ b/deps/v8/src/mark-compact.cc @@ -1939,14 +1939,14 @@ void MarkCompactCollector::MarkImplicitRefGroups() { ImplicitRefGroup* entry = ref_groups->at(i); ASSERT(entry != NULL); - if (!IsMarked(*entry->parent_)) { + if (!IsMarked(*entry->parent)) { (*ref_groups)[last++] = entry; continue; } - Object*** children = entry->children_; + Object*** children = entry->children; // A parent object is marked, so mark all child heap objects. - for (size_t j = 0; j < entry->length_; ++j) { + for (size_t j = 0; j < entry->length; ++j) { if ((*children[j])->IsHeapObject()) { HeapObject* child = HeapObject::cast(*children[j]); MarkBit mark = Marking::MarkBitFrom(child); @@ -1956,7 +1956,7 @@ void MarkCompactCollector::MarkImplicitRefGroups() { // Once the entire group has been marked, dispose it because it's // not needed anymore. - entry->Dispose(); + delete entry; } ref_groups->Rewind(last); } @@ -3125,6 +3125,8 @@ void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) { void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { + Heap::RelocationLock relocation_lock(heap()); + bool code_slots_filtering_required; { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE); code_slots_filtering_required = MarkInvalidatedCode(); diff --git a/deps/v8/src/math.js b/deps/v8/src/math.js index e5ab70cc1e..9ba1934b85 100644 --- a/deps/v8/src/math.js +++ b/deps/v8/src/math.js @@ -213,6 +213,13 @@ function MathTan(x) { return %_MathTan(x); } +// Non-standard extension. +function MathImul(x, y) { + if (!IS_NUMBER(x)) x = NonNumberToNumber(x); + if (!IS_NUMBER(y)) y = NonNumberToNumber(y); + return %NumberImul(x, y); +} + // ------------------------------------------------------------------- @@ -283,7 +290,8 @@ function SetUpMath() { "atan2", MathAtan2, "pow", MathPow, "max", MathMax, - "min", MathMin + "min", MathMin, + "imul", MathImul )); } diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js index 67fe3ccf1f..15a39b7b85 100644 --- a/deps/v8/src/messages.js +++ b/deps/v8/src/messages.js @@ -31,6 +31,8 @@ var kMessages = { // Error cyclic_proto: ["Cyclic __proto__ value"], code_gen_from_strings: ["%0"], + generator_running: ["Generator is already running"], + generator_finished: ["Generator has already finished"], // TypeError unexpected_token: ["Unexpected token ", "%0"], unexpected_token_number: ["Unexpected number"], @@ -158,7 +160,7 @@ var kMessages = { symbol_to_string: ["Conversion from symbol to string"], invalid_module_path: ["Module does not export '", "%0", "', or export is not itself a module"], module_type_error: ["Module '", "%0", "' used improperly"], - module_export_undefined: ["Export '", "%0", "' is not defined in module"], + module_export_undefined: ["Export '", "%0", "' is not defined in module"] }; diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc index e36b97f180..1b666ec6c0 100644 --- a/deps/v8/src/mips/assembler-mips.cc +++ b/deps/v8/src/mips/assembler-mips.cc @@ -237,10 +237,14 @@ void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) { // See assembler-mips-inl.h for inlined constructors. Operand::Operand(Handle<Object> handle) { +#ifdef DEBUG + Isolate* isolate = Isolate::Current(); +#endif + ALLOW_HANDLE_DEREF(isolate, "using and embedding raw address"); rm_ = no_reg; // Verify all Objects referred by code are NOT in new space. Object* obj = *handle; - ASSERT(!HEAP->InNewSpace(obj)); + ASSERT(!isolate->heap()->InNewSpace(obj)); if (obj->IsHeapObject()) { imm32_ = reinterpret_cast<intptr_t>(handle.location()); rmode_ = RelocInfo::EMBEDDED_OBJECT; diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc index 1901f9c413..700bcc4a53 100644 --- a/deps/v8/src/mips/builtins-mips.cc +++ b/deps/v8/src/mips/builtins-mips.cc @@ -317,8 +317,7 @@ static void AllocateJSArray(MacroAssembler* masm, // entering the generic code. In both cases argc in a0 needs to be preserved. // Both registers are preserved by this code so no need to differentiate between // construct call and normal call. -static void ArrayNativeCode(MacroAssembler* masm, - Label* call_generic_code) { +void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code) { Counters* counters = masm->isolate()->counters(); Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array, has_non_smi_element, finish, cant_transition_map, not_double; @@ -546,7 +545,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) { } -void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) { +void Builtins::Generate_CommonArrayConstructCode(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- a0 : number of arguments // -- a1 : constructor function @@ -566,48 +565,17 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) { __ GetObjectType(a3, a3, t0); __ Assert(eq, "Unexpected initial map for Array function (4)", t0, Operand(MAP_TYPE)); - - if (FLAG_optimize_constructed_arrays) { - // We should either have undefined in a2 or a valid jsglobalpropertycell - Label okay_here; - Handle<Object> undefined_sentinel( - masm->isolate()->heap()->undefined_value(), masm->isolate()); - Handle<Map> global_property_cell_map( - masm->isolate()->heap()->global_property_cell_map()); - __ Branch(&okay_here, eq, a2, Operand(undefined_sentinel)); - __ lw(a3, FieldMemOperand(a2, 0)); - __ Assert(eq, "Expected property cell in register a3", - a3, Operand(global_property_cell_map)); - __ bind(&okay_here); - } - } - - if (FLAG_optimize_constructed_arrays) { - Label not_zero_case, not_one_case; - __ Branch(¬_zero_case, ne, a0, Operand(zero_reg)); - ArrayNoArgumentConstructorStub no_argument_stub; - __ TailCallStub(&no_argument_stub); - - __ bind(¬_zero_case); - __ Branch(¬_one_case, gt, a0, Operand(1)); - ArraySingleArgumentConstructorStub single_argument_stub; - __ TailCallStub(&single_argument_stub); - - __ bind(¬_one_case); - ArrayNArgumentsConstructorStub n_argument_stub; - __ TailCallStub(&n_argument_stub); - } else { - Label generic_constructor; - // Run the native code for the Array function called as a constructor. - ArrayNativeCode(masm, &generic_constructor); - - // Jump to the generic construct code in case the specialized code cannot - // handle the construction. - __ bind(&generic_constructor); - Handle<Code> generic_construct_stub = - masm->isolate()->builtins()->JSConstructStubGeneric(); - __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); } + Label generic_constructor; + // Run the native code for the Array function called as a constructor. + ArrayNativeCode(masm, &generic_constructor); + + // Jump to the generic construct code in case the specialized code cannot + // handle the construction. + __ bind(&generic_constructor); + Handle<Code> generic_construct_stub = + masm->isolate()->builtins()->JSConstructStubGeneric(); + __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); } diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc index 27fd73231a..733c3694d2 100644 --- a/deps/v8/src/mips/code-stubs-mips.cc +++ b/deps/v8/src/mips/code-stubs-mips.cc @@ -97,16 +97,33 @@ void TransitionElementsKindStub::InitializeInterfaceDescriptor( } -static void InitializeArrayConstructorDescriptor(Isolate* isolate, +void CompareNilICStub::InitializeInterfaceDescriptor( + Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { + static Register registers[] = { a0 }; + descriptor->register_param_count_ = 1; + descriptor->register_params_ = registers; + descriptor->deoptimization_handler_ = + FUNCTION_ADDR(CompareNilIC_Miss); + descriptor->miss_handler_ = + ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate); +} + + +static void InitializeArrayConstructorDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor, + int constant_stack_parameter_count) { // register state - // a1 -- constructor function + // a0 -- number of arguments // a2 -- type info cell with elements kind - // a0 -- number of arguments to the constructor function - static Register registers[] = { a1, a2 }; - descriptor->register_param_count_ = 2; - // stack param count needs (constructor pointer, and single argument) - descriptor->stack_parameter_count_ = &a0; + static Register registers[] = { a2 }; + descriptor->register_param_count_ = 1; + if (constant_stack_parameter_count != 0) { + // stack param count needs (constructor pointer, and single argument) + descriptor->stack_parameter_count_ = &a0; + } + descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count; descriptor->register_params_ = registers; descriptor->function_mode_ = JS_FUNCTION_STUB_MODE; descriptor->deoptimization_handler_ = @@ -117,21 +134,21 @@ static void InitializeArrayConstructorDescriptor(Isolate* isolate, void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor( Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeArrayConstructorDescriptor(isolate, descriptor); + InitializeArrayConstructorDescriptor(isolate, descriptor, 0); } void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor( Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeArrayConstructorDescriptor(isolate, descriptor); + InitializeArrayConstructorDescriptor(isolate, descriptor, 1); } void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeArrayConstructorDescriptor(isolate, descriptor); + InitializeArrayConstructorDescriptor(isolate, descriptor, -1); } @@ -161,6 +178,30 @@ static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand, } +void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) { + // Update the static counter each time a new code stub is generated. + Isolate* isolate = masm->isolate(); + isolate->counters()->code_stubs()->Increment(); + + CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate); + int param_count = descriptor->register_param_count_; + { + // Call the runtime system in a fresh internal frame. + FrameScope scope(masm, StackFrame::INTERNAL); + ASSERT(descriptor->register_param_count_ == 0 || + a0.is(descriptor->register_params_[param_count - 1])); + // Push arguments + for (int i = 0; i < param_count; ++i) { + __ push(descriptor->register_params_[i]); + } + ExternalReference miss = descriptor->miss_handler_; + __ CallExternalReference(miss, descriptor->register_param_count_); + } + + __ Ret(); +} + + void ToNumberStub::Generate(MacroAssembler* masm) { // The ToNumber stub takes one argument in a0. Label check_heap_number, call_builtin; @@ -1561,7 +1602,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { AllowExternalCallThatCantCauseGC scope(masm); __ PrepareCallCFunction(argument_count, fp_argument_count, scratch); - __ li(a0, Operand(ExternalReference::isolate_address())); + __ li(a0, Operand(ExternalReference::isolate_address(masm->isolate()))); __ CallCFunction( ExternalReference::store_buffer_overflow_function(masm->isolate()), argument_count); @@ -2487,12 +2528,17 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { __ Addu(scratch2, scratch1, Operand(0x40000000)); // If not try to return a heap number. __ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg)); - // Check for minus zero. Return heap number for minus zero. + // Check for minus zero. Return heap number for minus zero if + // double results are allowed; otherwise transition. Label not_zero; __ Branch(¬_zero, ne, scratch1, Operand(zero_reg)); __ mfc1(scratch2, f11); __ And(scratch2, scratch2, HeapNumber::kSignMask); - __ Branch(&return_heap_number, ne, scratch2, Operand(zero_reg)); + __ Branch(result_type_ <= BinaryOpIC::INT32 ? &transition + : &return_heap_number, + ne, + scratch2, + Operand(zero_reg)); __ bind(¬_zero); // Tag the result and return. @@ -2505,21 +2551,18 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { __ bind(&return_heap_number); // Return a heap number, or fall through to type transition or runtime // call if we can't. - if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::NUMBER - : BinaryOpIC::INT32)) { - // We are using FPU registers so s0 is available. - heap_number_result = s0; - BinaryOpStub_GenerateHeapResultAllocation(masm, - heap_number_result, - heap_number_map, - scratch1, - scratch2, - &call_runtime, - mode_); - __ mov(v0, heap_number_result); - __ sdc1(f10, FieldMemOperand(v0, HeapNumber::kValueOffset)); - __ Ret(); - } + // We are using FPU registers so s0 is available. + heap_number_result = s0; + BinaryOpStub_GenerateHeapResultAllocation(masm, + heap_number_result, + heap_number_map, + scratch1, + scratch2, + &call_runtime, + mode_); + __ mov(v0, heap_number_result); + __ sdc1(f10, FieldMemOperand(v0, HeapNumber::kValueOffset)); + __ Ret(); // A DIV operation expecting an integer result falls through // to type transition. @@ -3303,6 +3346,9 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); StubFailureTrampolineStub::GenerateAheadOfTime(isolate); RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate); + if (FLAG_optimize_constructed_arrays) { + ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); + } } @@ -3384,7 +3430,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, __ AssertStackIsAligned(); - __ li(a2, Operand(ExternalReference::isolate_address())); + __ li(a2, Operand(ExternalReference::isolate_address(isolate))); // To let the GC traverse the return address of the exit frames, we need to // know where the return address is. The CEntryStub is unmovable, so @@ -4663,7 +4709,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Argument 9: Pass current isolate address. // CFunctionArgumentOperand handles MIPS stack argument slots. - __ li(a0, Operand(ExternalReference::isolate_address())); + __ li(a0, Operand(ExternalReference::isolate_address(isolate))); __ sw(a0, MemOperand(sp, 5 * kPointerSize)); // Argument 8: Indicate that this is a direct call from JavaScript. @@ -5057,7 +5103,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) { Handle<Object> terminal_kind_sentinel = TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(), LAST_FAST_ELEMENTS_KIND); - __ Branch(&miss, ne, a3, Operand(terminal_kind_sentinel)); + __ Branch(&miss, gt, a3, Operand(terminal_kind_sentinel)); // Make sure the function is the Array() function __ LoadArrayFunction(a3); __ Branch(&megamorphic, ne, a1, Operand(a3)); @@ -7277,7 +7323,7 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) { __ Move(address, regs_.address()); __ Move(a0, regs_.object()); __ Move(a1, address); - __ li(a2, Operand(ExternalReference::isolate_address())); + __ li(a2, Operand(ExternalReference::isolate_address(masm->isolate()))); AllowExternalCallThatCantCauseGC scope(masm); if (mode == INCREMENTAL_COMPACTION) { @@ -7523,6 +7569,189 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) { } +template<class T> +static void CreateArrayDispatch(MacroAssembler* masm) { + int last_index = GetSequenceIndexFromFastElementsKind( + TERMINAL_FAST_ELEMENTS_KIND); + for (int i = 0; i <= last_index; ++i) { + Label next; + ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); + __ Branch(&next, ne, a3, Operand(kind)); + T stub(kind); + __ TailCallStub(&stub); + __ bind(&next); + } + + // If we reached this point there is a problem. + __ Abort("Unexpected ElementsKind in array constructor"); +} + + +static void CreateArrayDispatchOneArgument(MacroAssembler* masm) { + // a2 - type info cell + // a3 - kind + // a0 - number of arguments + // a1 - constructor? + // sp[0] - last argument + ASSERT(FAST_SMI_ELEMENTS == 0); + ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); + ASSERT(FAST_ELEMENTS == 2); + ASSERT(FAST_HOLEY_ELEMENTS == 3); + ASSERT(FAST_DOUBLE_ELEMENTS == 4); + ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5); + + Handle<Object> undefined_sentinel( + masm->isolate()->heap()->undefined_value(), + masm->isolate()); + + // is the low bit set? If so, we are holey and that is good. + Label normal_sequence; + __ And(at, a3, Operand(1)); + __ Branch(&normal_sequence, ne, at, Operand(zero_reg)); + + // look at the first argument + __ lw(t1, MemOperand(sp, 0)); + __ Branch(&normal_sequence, eq, t1, Operand(zero_reg)); + + // We are going to create a holey array, but our kind is non-holey. + // Fix kind and retry + __ Addu(a3, a3, Operand(1)); + __ Branch(&normal_sequence, eq, a2, Operand(undefined_sentinel)); + + // Save the resulting elements kind in type info + __ SmiTag(a3); + __ sw(a3, FieldMemOperand(a2, kPointerSize)); + __ SmiUntag(a3); + + __ bind(&normal_sequence); + int last_index = GetSequenceIndexFromFastElementsKind( + TERMINAL_FAST_ELEMENTS_KIND); + for (int i = 0; i <= last_index; ++i) { + Label next; + ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); + __ Branch(&next, ne, a3, Operand(kind)); + ArraySingleArgumentConstructorStub stub(kind); + __ TailCallStub(&stub); + __ bind(&next); + } + + // If we reached this point there is a problem. + __ Abort("Unexpected ElementsKind in array constructor"); +} + + +template<class T> +static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) { + int to_index = GetSequenceIndexFromFastElementsKind( + TERMINAL_FAST_ELEMENTS_KIND); + for (int i = 0; i <= to_index; ++i) { + ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); + T stub(kind); + stub.GetCode(isolate)->set_is_pregenerated(true); + } +} + + +void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) { + ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>( + isolate); + ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>( + isolate); + ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>( + isolate); +} + + +void ArrayConstructorStub::Generate(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- a0 : argc (only if argument_count_ == ANY) + // -- a1 : constructor + // -- a2 : type info cell + // -- sp[0] : return address + // -- sp[4] : last argument + // ----------------------------------- + Handle<Object> undefined_sentinel( + masm->isolate()->heap()->undefined_value(), + masm->isolate()); + + if (FLAG_debug_code) { + // The array construct code is only set for the global and natives + // builtin Array functions which always have maps. + + // Initial map for the builtin Array function should be a map. + __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset)); + // Will both indicate a NULL and a Smi. + __ And(at, a3, Operand(kSmiTagMask)); + __ Assert(ne, "Unexpected initial map for Array function", + at, Operand(zero_reg)); + __ GetObjectType(a3, a3, t0); + __ Assert(eq, "Unexpected initial map for Array function", + t0, Operand(MAP_TYPE)); + + // We should either have undefined in ebx or a valid jsglobalpropertycell + Label okay_here; + Handle<Map> global_property_cell_map( + masm->isolate()->heap()->global_property_cell_map()); + __ Branch(&okay_here, eq, a2, Operand(undefined_sentinel)); + __ lw(a3, FieldMemOperand(a2, 0)); + __ Assert(eq, "Expected property cell in register ebx", + a3, Operand(global_property_cell_map)); + __ bind(&okay_here); + } + + if (FLAG_optimize_constructed_arrays) { + Label no_info, switch_ready; + // Get the elements kind and case on that. + __ Branch(&no_info, eq, a2, Operand(undefined_sentinel)); + __ lw(a3, FieldMemOperand(a2, kPointerSize)); + + // There is no info if the call site went megamorphic either + // TODO(mvstanton): Really? I thought if it was the array function that + // the cell wouldn't get stamped as megamorphic. + __ Branch(&no_info, eq, a3, + Operand(TypeFeedbackCells::MegamorphicSentinel(masm->isolate()))); + __ SmiUntag(a3); + __ jmp(&switch_ready); + __ bind(&no_info); + __ li(a3, Operand(GetInitialFastElementsKind())); + __ bind(&switch_ready); + + if (argument_count_ == ANY) { + Label not_zero_case, not_one_case; + __ And(at, a0, a0); + __ Branch(¬_zero_case, ne, at, Operand(zero_reg)); + CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm); + + __ bind(¬_zero_case); + __ Branch(¬_one_case, gt, a0, Operand(1)); + CreateArrayDispatchOneArgument(masm); + + __ bind(¬_one_case); + CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm); + } else if (argument_count_ == NONE) { + CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm); + } else if (argument_count_ == ONE) { + CreateArrayDispatchOneArgument(masm); + } else if (argument_count_ == MORE_THAN_ONE) { + CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm); + } else { + UNREACHABLE(); + } + } else { + Label generic_constructor; + // Run the native code for the Array function called as a constructor. + ArrayNativeCode(masm, &generic_constructor); + + // Jump to the generic construct code in case the specialized code cannot + // handle the construction. + __ bind(&generic_constructor); + Handle<Code> generic_construct_stub = + masm->isolate()->builtins()->JSConstructStubGeneric(); + __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); + } +} + + #undef __ } } // namespace v8::internal diff --git a/deps/v8/src/mips/code-stubs-mips.h b/deps/v8/src/mips/code-stubs-mips.h index 2370d4537d..3a84644a18 100644 --- a/deps/v8/src/mips/code-stubs-mips.h +++ b/deps/v8/src/mips/code-stubs-mips.h @@ -35,6 +35,9 @@ namespace v8 { namespace internal { +void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code); + + // Compute a transcendental math function natively, or call the // TranscendentalCache runtime function. class TranscendentalCacheStub: public PlatformCodeStub { diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc index 7896f20138..ecf4087328 100644 --- a/deps/v8/src/mips/deoptimizer-mips.cc +++ b/deps/v8/src/mips/deoptimizer-mips.cc @@ -591,8 +591,6 @@ void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) { void Deoptimizer::EntryGenerator::Generate() { GeneratePrologue(); - Isolate* isolate = masm()->isolate(); - // Unlike on ARM we don't save all the registers, just the useful ones. // For the rest, there are gaps on the stack, so the offsets remain the same. const int kNumberOfRegisters = Register::kNumRegisters; @@ -653,12 +651,12 @@ void Deoptimizer::EntryGenerator::Generate() { // a2: bailout id already loaded. // a3: code address or 0 already loaded. __ sw(t0, CFunctionArgumentOperand(5)); // Fp-to-sp delta. - __ li(t1, Operand(ExternalReference::isolate_address())); + __ li(t1, Operand(ExternalReference::isolate_address(isolate()))); __ sw(t1, CFunctionArgumentOperand(6)); // Isolate. // Call Deoptimizer::New(). { AllowExternalCallThatCantCauseGC scope(masm()); - __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6); + __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6); } // Preserve "deoptimizer" object in register v0 and get the input @@ -725,7 +723,7 @@ void Deoptimizer::EntryGenerator::Generate() { { AllowExternalCallThatCantCauseGC scope(masm()); __ CallCFunction( - ExternalReference::compute_output_frames_function(isolate), 1); + ExternalReference::compute_output_frames_function(isolate()), 1); } __ pop(a0); // Restore deoptimizer object (class Deoptimizer). diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc index bc0d85543b..a6fd39aa18 100644 --- a/deps/v8/src/mips/full-codegen-mips.cc +++ b/deps/v8/src/mips/full-codegen-mips.cc @@ -1926,6 +1926,156 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) { } +void FullCodeGenerator::VisitYield(Yield* expr) { + Comment cmnt(masm_, "[ Yield"); + // Evaluate yielded value first; the initial iterator definition depends on + // this. It stays on the stack while we update the iterator. + VisitForStackValue(expr->expression()); + + switch (expr->yield_kind()) { + case Yield::INITIAL: + case Yield::SUSPEND: { + VisitForStackValue(expr->generator_object()); + __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1); + __ lw(context_register(), + MemOperand(fp, StandardFrameConstants::kContextOffset)); + + Label resume; + __ LoadRoot(at, Heap::kTheHoleValueRootIndex); + __ Branch(&resume, ne, result_register(), Operand(at)); + __ pop(result_register()); + if (expr->yield_kind() == Yield::SUSPEND) { + // TODO(wingo): Box into { value: VALUE, done: false }. + } + EmitReturnSequence(); + + __ bind(&resume); + context()->Plug(result_register()); + break; + } + + case Yield::FINAL: { + VisitForAccumulatorValue(expr->generator_object()); + __ li(a1, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorClosed))); + __ sw(a1, FieldMemOperand(result_register(), + JSGeneratorObject::kContinuationOffset)); + __ pop(result_register()); + // TODO(wingo): Box into { value: VALUE, done: true }. + + // Exit all nested statements. + NestedStatement* current = nesting_stack_; + int stack_depth = 0; + int context_length = 0; + while (current != NULL) { + current = current->Exit(&stack_depth, &context_length); + } + __ Drop(stack_depth); + EmitReturnSequence(); + break; + } + + case Yield::DELEGATING: + UNIMPLEMENTED(); + } +} + + +void FullCodeGenerator::EmitGeneratorResume(Expression *generator, + Expression *value, + JSGeneratorObject::ResumeMode resume_mode) { + // The value stays in a0, and is ultimately read by the resumed generator, as + // if the CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. a1 + // will hold the generator object until the activation has been resumed. + VisitForStackValue(generator); + VisitForAccumulatorValue(value); + __ pop(a1); + + // Check generator state. + Label wrong_state, done; + __ lw(a3, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset)); + STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting <= 0); + STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed <= 0); + __ Branch(&wrong_state, le, a3, Operand(zero_reg)); + + // Load suspended function and context. + __ lw(cp, FieldMemOperand(a1, JSGeneratorObject::kContextOffset)); + __ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset)); + + // Load receiver and store as the first argument. + __ lw(a2, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset)); + __ push(a2); + + // Push holes for the rest of the arguments to the generator function. + __ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset)); + __ lw(a3, + FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset)); + __ LoadRoot(a2, Heap::kTheHoleValueRootIndex); + Label push_argument_holes, push_frame; + __ bind(&push_argument_holes); + __ Subu(a3, a3, Operand(1)); + __ Branch(&push_frame, lt, a3, Operand(zero_reg)); + __ push(a2); + __ jmp(&push_argument_holes); + + // Enter a new JavaScript frame, and initialize its slots as they were when + // the generator was suspended. + Label resume_frame; + __ bind(&push_frame); + __ Call(&resume_frame); + __ jmp(&done); + __ bind(&resume_frame); + __ push(ra); // Return address. + __ push(fp); // Caller's frame pointer. + __ mov(fp, sp); + __ push(cp); // Callee's context. + __ push(t0); // Callee's JS Function. + + // Load the operand stack size. + __ lw(a3, FieldMemOperand(a1, JSGeneratorObject::kOperandStackOffset)); + __ lw(a3, FieldMemOperand(a3, FixedArray::kLengthOffset)); + __ SmiUntag(a3); + + // If we are sending a value and there is no operand stack, we can jump back + // in directly. + if (resume_mode == JSGeneratorObject::SEND) { + Label slow_resume; + __ Branch(&slow_resume, ne, a3, Operand(zero_reg)); + __ lw(a3, FieldMemOperand(t0, JSFunction::kCodeEntryOffset)); + __ lw(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset)); + __ SmiUntag(a2); + __ Addu(a3, a3, Operand(a2)); + __ li(a2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting))); + __ sw(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset)); + __ Jump(a3); + __ bind(&slow_resume); + } + + // Otherwise, we push holes for the operand stack and call the runtime to fix + // up the stack and the handlers. + Label push_operand_holes, call_resume; + __ bind(&push_operand_holes); + __ Subu(a3, a3, Operand(1)); + __ Branch(&call_resume, lt, a3, Operand(zero_reg)); + __ push(a2); + __ b(&push_operand_holes); + __ bind(&call_resume); + __ push(a1); + __ push(result_register()); + __ Push(Smi::FromInt(resume_mode)); + __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3); + // Not reached: the runtime call returns elsewhere. + __ stop("not-reached"); + + // Throw error if we attempt to operate on a running generator. + __ bind(&wrong_state); + __ push(a1); + __ CallRuntime(Runtime::kThrowGeneratorStateError, 1); + + __ bind(&done); + context()->Plug(result_register()); +} + + void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) { SetSourcePosition(prop->position()); Literal* key = prop->key()->AsLiteral(); @@ -4398,26 +4548,21 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr, VisitForAccumulatorValue(sub_expr); PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); - Heap::RootListIndex nil_value = nil == kNullValue ? - Heap::kNullValueRootIndex : - Heap::kUndefinedValueRootIndex; + EqualityKind kind = expr->op() == Token::EQ_STRICT + ? kStrictEquality : kNonStrictEquality; __ mov(a0, result_register()); - __ LoadRoot(a1, nil_value); - if (expr->op() == Token::EQ_STRICT) { + if (kind == kStrictEquality) { + Heap::RootListIndex nil_value = nil == kNullValue ? + Heap::kNullValueRootIndex : + Heap::kUndefinedValueRootIndex; + __ LoadRoot(a1, nil_value); Split(eq, a0, Operand(a1), if_true, if_false, fall_through); } else { - Heap::RootListIndex other_nil_value = nil == kNullValue ? - Heap::kUndefinedValueRootIndex : - Heap::kNullValueRootIndex; - __ Branch(if_true, eq, a0, Operand(a1)); - __ LoadRoot(a1, other_nil_value); - __ Branch(if_true, eq, a0, Operand(a1)); - __ JumpIfSmi(a0, if_false); - // It can be an undetectable object. - __ lw(a1, FieldMemOperand(a0, HeapObject::kMapOffset)); - __ lbu(a1, FieldMemOperand(a1, Map::kBitFieldOffset)); - __ And(a1, a1, Operand(1 << Map::kIsUndetectable)); - Split(ne, a1, Operand(zero_reg), if_true, if_false, fall_through); + Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), + kNonStrictEquality, + nil); + CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId()); + Split(ne, v0, Operand(zero_reg), if_true, if_false, fall_through); } context()->Plug(if_true, if_false); } diff --git a/deps/v8/src/mips/lithium-codegen-mips.cc b/deps/v8/src/mips/lithium-codegen-mips.cc index 77949313e1..0c2983f23e 100644 --- a/deps/v8/src/mips/lithium-codegen-mips.cc +++ b/deps/v8/src/mips/lithium-codegen-mips.cc @@ -257,38 +257,21 @@ bool LCodeGen::GenerateBody() { !is_aborted() && current_instruction_ < instructions_->length(); current_instruction_++) { LInstruction* instr = instructions_->at(current_instruction_); + + // Don't emit code for basic blocks with a replacement. if (instr->IsLabel()) { - LLabel* label = LLabel::cast(instr); - emit_instructions = !label->HasReplacement(); + emit_instructions = !LLabel::cast(instr)->HasReplacement(); } + if (!emit_instructions) continue; - if (emit_instructions) { - if (FLAG_code_comments) { - HValue* hydrogen = instr->hydrogen_value(); - if (hydrogen != NULL) { - if (hydrogen->IsChange()) { - HValue* changed_value = HChange::cast(hydrogen)->value(); - int use_id = 0; - const char* use_mnemo = "dead"; - if (hydrogen->UseCount() >= 1) { - HValue* use_value = hydrogen->uses().value(); - use_id = use_value->id(); - use_mnemo = use_value->Mnemonic(); - } - Comment(";;; @%d: %s. <of #%d %s for #%d %s>", - current_instruction_, instr->Mnemonic(), - changed_value->id(), changed_value->Mnemonic(), - use_id, use_mnemo); - } else { - Comment(";;; @%d: %s. <#%d>", current_instruction_, - instr->Mnemonic(), hydrogen->id()); - } - } else { - Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic()); - } - } - instr->CompileToNative(this); + if (FLAG_code_comments && instr->HasInterestingComment(this)) { + Comment(";;; <@%d,#%d> %s", + current_instruction_, + instr->hydrogen_value()->id(), + instr->Mnemonic()); } + + instr->CompileToNative(this); } return !is_aborted(); } @@ -299,11 +282,14 @@ bool LCodeGen::GenerateDeferredCode() { if (deferred_.length() > 0) { for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { LDeferredCode* code = deferred_[i]; + Comment(";;; <@%d,#%d> " + "-------------------- Deferred %s --------------------", + code->instruction_index(), + code->instr()->hydrogen_value()->id(), + code->instr()->Mnemonic()); __ bind(code->entry()); if (NeedsDeferredFrame()) { - Comment(";;; Deferred build frame @%d: %s.", - code->instruction_index(), - code->instr()->Mnemonic()); + Comment(";;; Build frame"); ASSERT(!frame_is_built_); ASSERT(info()->IsStub()); frame_is_built_ = true; @@ -311,15 +297,11 @@ bool LCodeGen::GenerateDeferredCode() { __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); __ push(scratch0()); __ Addu(fp, sp, Operand(2 * kPointerSize)); + Comment(";;; Deferred code"); } - Comment(";;; Deferred code @%d: %s.", - code->instruction_index(), - code->instr()->Mnemonic()); code->Generate(); if (NeedsDeferredFrame()) { - Comment(";;; Deferred destroy frame @%d: %s.", - code->instruction_index(), - code->instr()->Mnemonic()); + Comment(";;; Destroy frame"); ASSERT(frame_is_built_); __ pop(at); __ MultiPop(cp.bit() | fp.bit() | ra.bit()); @@ -346,8 +328,10 @@ bool LCodeGen::GenerateDeoptJumpTable() { Abort("Generated code is too large"); } + if (deopt_jump_table_.length() > 0) { + Comment(";;; -------------------- Jump table --------------------"); + } Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); - __ RecordComment("[ Deoptimization jump table"); Label table_start; __ bind(&table_start); Label needs_frame_not_call; @@ -592,7 +576,7 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, pushed_arguments_index, pushed_arguments_count); bool has_closure_id = !info()->closure().is_null() && - *info()->closure() != *environment->closure(); + !info()->closure().is_identical_to(environment->closure()); int closure_id = has_closure_id ? DefineDeoptimizationLiteral(environment->closure()) : Translation::kSelfLiteralId; @@ -904,10 +888,13 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { Handle<FixedArray> literals = factory()->NewFixedArray(deoptimization_literals_.length(), TENURED); - for (int i = 0; i < deoptimization_literals_.length(); i++) { - literals->set(i, *deoptimization_literals_[i]); + { ALLOW_HANDLE_DEREF(isolate(), + "copying a ZoneList of handles into a FixedArray"); + for (int i = 0; i < deoptimization_literals_.length(); i++) { + literals->set(i, *deoptimization_literals_[i]); + } + data->SetLiteralArray(*literals); } - data->SetLiteralArray(*literals); data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt())); data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_)); @@ -1023,10 +1010,19 @@ void LCodeGen::RecordPosition(int position) { } +static const char* LabelType(LLabel* label) { + if (label->is_loop_header()) return " (loop header)"; + if (label->is_osr_entry()) return " (OSR entry)"; + return ""; +} + + void LCodeGen::DoLabel(LLabel* label) { - Comment(";;; -------------------- B%d%s --------------------", + Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------", + current_instruction_, + label->hydrogen_value()->id(), label->block_id(), - label->is_loop_header() ? " (loop header)" : ""); + LabelType(label)); __ bind(label->label()); current_block_ = label->block_id(); DoGap(label); @@ -1480,6 +1476,7 @@ void LCodeGen::DoConstantD(LConstantD* instr) { void LCodeGen::DoConstantT(LConstantT* instr) { Handle<Object> value = instr->value(); + ALLOW_HANDLE_DEREF(isolate(), "smi check"); if (value->IsSmi()) { __ li(ToRegister(instr->result()), Operand(value)); } else { @@ -1756,10 +1753,9 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) { } -int LCodeGen::GetNextEmittedBlock(int block) { - for (int i = block + 1; i < graph()->blocks()->length(); ++i) { - LLabel* label = chunk_->GetLabel(i); - if (!label->HasReplacement()) return i; +int LCodeGen::GetNextEmittedBlock() const { + for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) { + if (!chunk_->GetLabel(i)->HasReplacement()) return i; } return -1; } @@ -1767,7 +1763,7 @@ int LCodeGen::GetNextEmittedBlock(int block) { void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc, Register src1, const Operand& src2) { - int next_block = GetNextEmittedBlock(current_block_); + int next_block = GetNextEmittedBlock(); right_block = chunk_->LookupDestination(right_block); left_block = chunk_->LookupDestination(left_block); if (right_block == left_block) { @@ -1786,7 +1782,7 @@ void LCodeGen::EmitBranch(int left_block, int right_block, void LCodeGen::EmitBranchF(int left_block, int right_block, Condition cc, FPURegister src1, FPURegister src2) { - int next_block = GetNextEmittedBlock(current_block_); + int next_block = GetNextEmittedBlock(); right_block = chunk_->LookupDestination(right_block); left_block = chunk_->LookupDestination(left_block); if (right_block == left_block) { @@ -1916,10 +1912,8 @@ void LCodeGen::DoBranch(LBranch* instr) { void LCodeGen::EmitGoto(int block) { - block = chunk_->LookupDestination(block); - int next_block = GetNextEmittedBlock(current_block_); - if (block != next_block) { - __ jmp(chunk_->GetAssemblyLabel(block)); + if (!IsNextEmittedBlock(block)) { + __ jmp(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block))); } } @@ -2552,20 +2546,21 @@ void LCodeGen::DoReturn(LReturn* instr) { if (NeedsEagerFrame()) { __ mov(sp, fp); __ Pop(ra, fp); - - if (instr->has_constant_parameter_count()) { - int parameter_count = ToInteger32(instr->constant_parameter_count()); - int32_t sp_delta = (parameter_count + 1) * kPointerSize; - if (sp_delta != 0) { - __ Addu(sp, sp, Operand(sp_delta)); - } - } else { - Register reg = ToRegister(instr->parameter_count()); - __ Addu(reg, reg, Operand(1)); - __ sll(at, reg, kPointerSizeLog2); - __ Addu(sp, sp, at); + } + if (instr->has_constant_parameter_count()) { + int parameter_count = ToInteger32(instr->constant_parameter_count()); + int32_t sp_delta = (parameter_count + 1) * kPointerSize; + if (sp_delta != 0) { + __ Addu(sp, sp, Operand(sp_delta)); } + } else { + Register reg = ToRegister(instr->parameter_count()); + // The argument count parameter is a smi + __ SmiUntag(reg); + __ sll(at, reg, kPointerSizeLog2); + __ Addu(sp, sp, at); } + __ Jump(ra); } @@ -2883,16 +2878,24 @@ void LCodeGen::DoLoadExternalArrayPointer( void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { Register arguments = ToRegister(instr->arguments()); - Register length = ToRegister(instr->length()); - Register index = ToRegister(instr->index()); Register result = ToRegister(instr->result()); - // There are two words between the frame pointer and the last argument. - // Subtracting from length accounts for one of them, add one more. - __ subu(length, length, index); - __ Addu(length, length, Operand(1)); - __ sll(length, length, kPointerSizeLog2); - __ Addu(at, arguments, Operand(length)); - __ lw(result, MemOperand(at, 0)); + if (instr->length()->IsConstantOperand() && + instr->index()->IsConstantOperand()) { + int const_index = ToInteger32(LConstantOperand::cast(instr->index())); + int const_length = ToInteger32(LConstantOperand::cast(instr->length())); + int index = (const_length - const_index) + 1; + __ lw(result, MemOperand(arguments, index * kPointerSize)); + } else { + Register length = ToRegister(instr->length()); + Register index = ToRegister(instr->index()); + // There are two words between the frame pointer and the last argument. + // Subtracting from length accounts for one of them, add one more. + __ subu(length, length, index); + __ Addu(length, length, Operand(1)); + __ sll(length, length, kPointerSizeLog2); + __ Addu(at, arguments, Operand(length)); + __ lw(result, MemOperand(at, 0)); + } } @@ -3323,12 +3326,15 @@ void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) { void LCodeGen::CallKnownFunction(Handle<JSFunction> function, + int formal_parameter_count, int arity, LInstruction* instr, CallKind call_kind, A1State a1_state) { - bool can_invoke_directly = !function->NeedsArgumentsAdaption() || - function->shared()->formal_parameter_count() == arity; + bool dont_adapt_arguments = + formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel; + bool can_invoke_directly = + dont_adapt_arguments || formal_parameter_count == arity; LPointerMap* pointers = instr->pointer_map(); RecordPosition(pointers->position()); @@ -3343,7 +3349,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function, // Set r0 to arguments count if adaption is not needed. Assumes that r0 // is available to write to at this point. - if (!function->NeedsArgumentsAdaption()) { + if (dont_adapt_arguments) { __ li(a0, Operand(arity)); } @@ -3357,7 +3363,9 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function, } else { SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); ParameterCount count(arity); - __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind); + ParameterCount expected(formal_parameter_count); + __ InvokeFunction( + function, expected, count, CALL_FUNCTION, generator, call_kind); } // Restore context. @@ -3368,7 +3376,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function, void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) { ASSERT(ToRegister(instr->result()).is(v0)); __ mov(a0, v0); - CallKnownFunction(instr->function(), + CallKnownFunction(instr->hydrogen()->function(), + instr->hydrogen()->formal_parameter_count(), instr->arity(), instr, CALL_AS_METHOD, @@ -3780,7 +3789,8 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { ASSERT(ToRegister(instr->function()).is(a1)); ASSERT(instr->HasPointerMap()); - if (instr->known_function().is_null()) { + Handle<JSFunction> known_function = instr->hydrogen()->known_function(); + if (known_function.is_null()) { LPointerMap* pointers = instr->pointer_map(); RecordPosition(pointers->position()); SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); @@ -3788,7 +3798,8 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { __ InvokeFunction(a1, count, CALL_FUNCTION, generator, CALL_AS_METHOD); __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); } else { - CallKnownFunction(instr->known_function(), + CallKnownFunction(known_function, + instr->hydrogen()->formal_parameter_count(), instr->arity(), instr, CALL_AS_METHOD, @@ -3848,7 +3859,8 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) { void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) { ASSERT(ToRegister(instr->result()).is(v0)); - CallKnownFunction(instr->target(), + CallKnownFunction(instr->hydrogen()->target(), + instr->hydrogen()->formal_parameter_count(), instr->arity(), instr, CALL_AS_FUNCTION, @@ -3879,10 +3891,18 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) { __ li(a0, Operand(instr->arity())); __ li(a2, Operand(instr->hydrogen()->property_cell())); - Handle<Code> array_construct_code = - isolate()->builtins()->ArrayConstructCode(); - - CallCode(array_construct_code, RelocInfo::CONSTRUCT_CALL, instr); + Object* cell_value = instr->hydrogen()->property_cell()->value(); + ElementsKind kind = static_cast<ElementsKind>(Smi::cast(cell_value)->value()); + if (instr->arity() == 0) { + ArrayNoArgumentConstructorStub stub(kind); + CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + } else if (instr->arity() == 1) { + ArraySingleArgumentConstructorStub stub(kind); + CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + } else { + ArrayNArgumentsConstructorStub stub(kind); + CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + } } @@ -4890,6 +4910,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { void LCodeGen::DoCheckFunction(LCheckFunction* instr) { Register reg = ToRegister(instr->value()); Handle<JSFunction> target = instr->hydrogen()->target(); + ALLOW_HANDLE_DEREF(isolate(), "smi check"); if (isolate()->heap()->InNewSpace(*target)) { Register reg = ToRegister(instr->value()); Handle<JSGlobalPropertyCell> cell = @@ -5029,16 +5050,12 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) { Register scratch = ToRegister(instr->temp()); Register scratch2 = ToRegister(instr->temp2()); Handle<JSFunction> constructor = instr->hydrogen()->constructor(); - Handle<Map> initial_map(constructor->initial_map()); + Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map(); int instance_size = initial_map->instance_size(); ASSERT(initial_map->pre_allocated_property_fields() + initial_map->unused_property_fields() - initial_map->inobject_properties() == 0); - // Allocate memory for the object. The initial map might change when - // the constructor's prototype changes, but instance size and property - // counts remain unchanged (if slack tracking finished). - ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress()); __ Allocate(instance_size, result, scratch, scratch2, deferred->entry(), TAG_OBJECT); @@ -5073,8 +5090,7 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) { void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) { Register result = ToRegister(instr->result()); - Handle<JSFunction> constructor = instr->hydrogen()->constructor(); - Handle<Map> initial_map(constructor->initial_map()); + Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map(); int instance_size = initial_map->instance_size(); // TODO(3095996): Get rid of this. For now, we need to make the @@ -5157,7 +5173,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) { void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { - Handle<FixedArray> literals(instr->environment()->closure()->literals()); + Handle<FixedArray> literals = instr->hydrogen()->literals(); ElementsKind boilerplate_elements_kind = instr->hydrogen()->boilerplate_elements_kind(); AllocationSiteMode allocation_site_mode = @@ -5215,7 +5231,7 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) { ASSERT(ToRegister(instr->result()).is(v0)); - Handle<FixedArray> literals(instr->environment()->closure()->literals()); + Handle<FixedArray> literals = instr->hydrogen()->literals(); Handle<FixedArray> constant_properties = instr->hydrogen()->constant_properties(); @@ -5229,7 +5245,7 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) { __ li(a0, Operand(Smi::FromInt(flags))); // Pick the right runtime function or stub to call. - int properties_count = constant_properties->length() / 2; + int properties_count = instr->hydrogen()->constant_properties_length() / 2; if (instr->hydrogen()->depth() > 1) { __ Push(a3, a2, a1, a0); CallRuntime(Runtime::kCreateObjectLiteral, 4, instr); @@ -5307,19 +5323,17 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { // Use the fast case closure allocation code that allocates in new // space for nested functions that don't need literals cloning. - Handle<SharedFunctionInfo> shared_info = instr->shared_info(); bool pretenure = instr->hydrogen()->pretenure(); - if (!pretenure && shared_info->num_literals() == 0) { - FastNewClosureStub stub(shared_info->language_mode(), - shared_info->is_generator()); - __ li(a1, Operand(shared_info)); + if (!pretenure && instr->hydrogen()->has_no_literals()) { + FastNewClosureStub stub(instr->hydrogen()->language_mode(), + instr->hydrogen()->is_generator()); + __ li(a1, Operand(instr->hydrogen()->shared_info())); __ push(a1); CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } else { - __ li(a2, Operand(shared_info)); - __ li(a1, Operand(pretenure - ? factory()->true_value() - : factory()->false_value())); + __ li(a2, Operand(instr->hydrogen()->shared_info())); + __ li(a1, Operand(pretenure ? factory()->true_value() + : factory()->false_value())); __ Push(cp, a2, a1); CallRuntime(Runtime::kNewClosure, 3, instr); } diff --git a/deps/v8/src/mips/lithium-codegen-mips.h b/deps/v8/src/mips/lithium-codegen-mips.h index 01d0ffcbee..f082c01dda 100644 --- a/deps/v8/src/mips/lithium-codegen-mips.h +++ b/deps/v8/src/mips/lithium-codegen-mips.h @@ -79,10 +79,20 @@ class LCodeGen BASE_EMBEDDED { Heap* heap() const { return isolate()->heap(); } Zone* zone() const { return zone_; } + // TODO(svenpanne) Use this consistently. + int LookupDestination(int block_id) const { + return chunk()->LookupDestination(block_id); + } + + bool IsNextEmittedBlock(int block_id) const { + return LookupDestination(block_id) == GetNextEmittedBlock(); + } + bool NeedsEagerFrame() const { return GetStackSlotCount() > 0 || info()->is_non_deferred_calling() || - !info()->IsStub(); + !info()->IsStub() || + info()->requires_frame(); } bool NeedsDeferredFrame() const { return !NeedsEagerFrame() && info()->is_deferred_calling(); @@ -189,13 +199,13 @@ class LCodeGen BASE_EMBEDDED { LPlatformChunk* chunk() const { return chunk_; } Scope* scope() const { return scope_; } - HGraph* graph() const { return chunk_->graph(); } + HGraph* graph() const { return chunk()->graph(); } Register scratch0() { return kLithiumScratchReg; } Register scratch1() { return kLithiumScratchReg2; } DoubleRegister double_scratch0() { return kLithiumScratchDouble; } - int GetNextEmittedBlock(int block); + int GetNextEmittedBlock() const; LInstruction* GetNextInstruction(); void EmitClassOfTest(Label* if_true, @@ -257,6 +267,7 @@ class LCodeGen BASE_EMBEDDED { // Generate a direct call to a known function. Expects the function // to be in a1. void CallKnownFunction(Handle<JSFunction> function, + int formal_parameter_count, int arity, LInstruction* instr, CallKind call_kind, diff --git a/deps/v8/src/mips/lithium-mips.cc b/deps/v8/src/mips/lithium-mips.cc index afa806c4ec..c2f89867d7 100644 --- a/deps/v8/src/mips/lithium-mips.cc +++ b/deps/v8/src/mips/lithium-mips.cc @@ -192,6 +192,11 @@ const char* LArithmeticT::Mnemonic() const { } +bool LGoto::HasInterestingComment(LCodeGen* gen) const { + return !gen->IsNextEmittedBlock(block_id()); +} + + void LGoto::PrintDataTo(StringStream* stream) { stream->Add("B%d", block_id()); } @@ -823,11 +828,15 @@ void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) { HEnvironment* last_environment = pred->last_environment(); for (int i = 0; i < block->phis()->length(); ++i) { HPhi* phi = block->phis()->at(i); - last_environment->SetValueAt(phi->merged_index(), phi); + if (phi->merged_index() < last_environment->length()) { + last_environment->SetValueAt(phi->merged_index(), phi); + } } for (int i = 0; i < block->deleted_phis()->length(); ++i) { - last_environment->SetValueAt(block->deleted_phis()->at(i), - graph_->GetConstantUndefined()); + if (block->deleted_phis()->at(i) < last_environment->length()) { + last_environment->SetValueAt(block->deleted_phis()->at(i), + graph_->GetConstantUndefined()); + } } block->UpdateEnvironment(last_environment); // Pick up the outgoing argument count of one of the predecessors. @@ -984,12 +993,14 @@ LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) { LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) { + info()->MarkAsRequiresFrame(); return DefineAsRegister( new(zone()) LArgumentsLength(UseRegister(length->value()))); } LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) { + info()->MarkAsRequiresFrame(); return DefineAsRegister(new(zone()) LArgumentsElements); } @@ -2295,7 +2306,8 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) { ASSERT(info()->IsStub()); CodeStubInterfaceDescriptor* descriptor = info()->code_stub()->GetInterfaceDescriptor(info()->isolate()); - Register reg = descriptor->register_params_[instr->index()]; + int index = static_cast<int>(instr->index()); + Register reg = DESCRIPTOR_GET_PARAMETER_REGISTER(descriptor, index); return DefineFixed(result, reg); } } @@ -2327,9 +2339,17 @@ LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) { LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) { + info()->MarkAsRequiresFrame(); LOperand* args = UseRegister(instr->arguments()); - LOperand* length = UseTempRegister(instr->length()); - LOperand* index = UseRegister(instr->index()); + LOperand* length; + LOperand* index; + if (instr->length()->IsConstant() && instr->index()->IsConstant()) { + length = UseRegisterOrConstant(instr->length()); + index = UseOrConstant(instr->index()); + } else { + length = UseTempRegister(instr->length()); + index = Use(instr->index()); + } return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index)); } diff --git a/deps/v8/src/mips/lithium-mips.h b/deps/v8/src/mips/lithium-mips.h index b0fc59a3b2..cfca64452b 100644 --- a/deps/v8/src/mips/lithium-mips.h +++ b/deps/v8/src/mips/lithium-mips.h @@ -279,6 +279,8 @@ class LInstruction: public ZoneObject { LOperand* FirstInput() { return InputAt(0); } LOperand* Output() { return HasResult() ? result() : NULL; } + virtual bool HasInterestingComment(LCodeGen* gen) const { return true; } + #ifdef DEBUG void VerifyCall(); #endif @@ -378,6 +380,10 @@ class LInstructionGap: public LGap { public: explicit LInstructionGap(HBasicBlock* block) : LGap(block) { } + virtual bool HasInterestingComment(LCodeGen* gen) const { + return !IsRedundant(); + } + DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap") }; @@ -386,6 +392,7 @@ class LGoto: public LTemplateInstruction<0, 0, 0> { public: explicit LGoto(int block_id) : block_id_(block_id) { } + virtual bool HasInterestingComment(LCodeGen* gen) const; DECLARE_CONCRETE_INSTRUCTION(Goto, "goto") virtual void PrintDataTo(StringStream* stream); virtual bool IsControl() const { return true; } @@ -433,12 +440,14 @@ class LLabel: public LGap { explicit LLabel(HBasicBlock* block) : LGap(block), replacement_(NULL) { } + virtual bool HasInterestingComment(LCodeGen* gen) const { return false; } DECLARE_CONCRETE_INSTRUCTION(Label, "label") virtual void PrintDataTo(StringStream* stream); int block_id() const { return block()->block_id(); } bool is_loop_header() const { return block()->IsLoopHeader(); } + bool is_osr_entry() const { return block()->is_osr_entry(); } Label* label() { return &label_; } LLabel* replacement() const { return replacement_; } void set_replacement(LLabel* label) { replacement_ = label; } @@ -452,6 +461,7 @@ class LLabel: public LGap { class LParameter: public LTemplateInstruction<1, 0, 0> { public: + virtual bool HasInterestingComment(LCodeGen* gen) const { return false; } DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter") }; @@ -469,6 +479,7 @@ class LCallStub: public LTemplateInstruction<1, 0, 0> { class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> { public: + virtual bool HasInterestingComment(LCodeGen* gen) const { return false; } DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value") }; @@ -1790,7 +1801,6 @@ class LInvokeFunction: public LTemplateInstruction<1, 1, 0> { virtual void PrintDataTo(StringStream* stream); int arity() const { return hydrogen()->argument_count() - 1; } - Handle<JSFunction> known_function() { return hydrogen()->known_function(); } }; @@ -1858,7 +1868,6 @@ class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> { virtual void PrintDataTo(StringStream* stream); - Handle<JSFunction> target() const { return hydrogen()->target(); } int arity() const { return hydrogen()->argument_count() - 1; } }; @@ -2429,8 +2438,6 @@ class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal") DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral) - - Handle<SharedFunctionInfo> shared_info() { return hydrogen()->shared_info(); } }; @@ -2507,6 +2514,7 @@ class LOsrEntry: public LTemplateInstruction<0, 0, 0> { public: LOsrEntry(); + virtual bool HasInterestingComment(LCodeGen* gen) const { return false; } DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry") LOperand** SpilledRegisterArray() { return register_spills_; } diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc index 6f9891469e..220d9fe0c7 100644 --- a/deps/v8/src/mips/macro-assembler-mips.cc +++ b/deps/v8/src/mips/macro-assembler-mips.cc @@ -83,6 +83,7 @@ void MacroAssembler::StoreRoot(Register source, void MacroAssembler::LoadHeapObject(Register result, Handle<HeapObject> object) { + ALLOW_HANDLE_DEREF(isolate(), "using raw address"); if (isolate()->heap()->InNewSpace(*object)) { Handle<JSGlobalPropertyCell> cell = isolate()->factory()->NewJSGlobalPropertyCell(object); @@ -2457,6 +2458,7 @@ void MacroAssembler::Jump(Handle<Code> code, const Operand& rt, BranchDelaySlot bd) { ASSERT(RelocInfo::IsCodeTarget(rmode)); + ALLOW_HANDLE_DEREF(isolate(), "embedding raw address"); Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd); } @@ -2544,6 +2546,7 @@ int MacroAssembler::CallSize(Handle<Code> code, Register rs, const Operand& rt, BranchDelaySlot bd) { + ALLOW_HANDLE_DEREF(isolate(), "using raw address"); return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd); } @@ -2564,6 +2567,7 @@ void MacroAssembler::Call(Handle<Code> code, SetRecordedAstId(ast_id); rmode = RelocInfo::CODE_TARGET_WITH_ID; } + ALLOW_HANDLE_DEREF(isolate(), "embedding raw address"); Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd); ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd), SizeOfCodeGeneratedSince(&start)); @@ -3743,6 +3747,7 @@ void MacroAssembler::InvokeFunction(Register function, void MacroAssembler::InvokeFunction(Handle<JSFunction> function, + const ParameterCount& expected, const ParameterCount& actual, InvokeFlag flag, const CallWrapper& call_wrapper, @@ -3754,7 +3759,6 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function, LoadHeapObject(a1, function); lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); - ParameterCount expected(function->shared()->formal_parameter_count()); // We call indirectly through the code field in the function to // allow recompilation to take effect without changing any of the // call sites. @@ -3921,8 +3925,9 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function, if (FLAG_log_timer_events) { FrameScope frame(this, StackFrame::MANUAL); PushSafepointRegisters(); - PrepareCallCFunction(0, a0); - CallCFunction(ExternalReference::log_enter_external_function(isolate()), 0); + PrepareCallCFunction(1, a0); + li(a0, Operand(ExternalReference::isolate_address(isolate()))); + CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1); PopSafepointRegisters(); } @@ -3941,8 +3946,9 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function, if (FLAG_log_timer_events) { FrameScope frame(this, StackFrame::MANUAL); PushSafepointRegisters(); - PrepareCallCFunction(0, a0); - CallCFunction(ExternalReference::log_leave_external_function(isolate()), 0); + PrepareCallCFunction(1, a0); + li(a0, Operand(ExternalReference::isolate_address(isolate()))); + CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1); PopSafepointRegisters(); } @@ -3996,7 +4002,7 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function, mov(s0, v0); mov(a0, v0); PrepareCallCFunction(1, s1); - li(a0, Operand(ExternalReference::isolate_address())); + li(a0, Operand(ExternalReference::isolate_address(isolate()))); CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()), 1); mov(v0, s0); diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h index 125cc8aaf3..e914f24025 100644 --- a/deps/v8/src/mips/macro-assembler-mips.h +++ b/deps/v8/src/mips/macro-assembler-mips.h @@ -188,10 +188,10 @@ class MacroAssembler: public Assembler { void Call(Register target, COND_ARGS); static int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS); void Call(Address target, RelocInfo::Mode rmode, COND_ARGS); - static int CallSize(Handle<Code> code, - RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, - TypeFeedbackId ast_id = TypeFeedbackId::None(), - COND_ARGS); + int CallSize(Handle<Code> code, + RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, + TypeFeedbackId ast_id = TypeFeedbackId::None(), + COND_ARGS); void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, TypeFeedbackId ast_id = TypeFeedbackId::None(), @@ -289,6 +289,7 @@ class MacroAssembler: public Assembler { void LoadHeapObject(Register dst, Handle<HeapObject> object); void LoadObject(Register result, Handle<Object> object) { + ALLOW_HANDLE_DEREF(isolate(), "heap object check"); if (object->IsHeapObject()) { LoadHeapObject(result, Handle<HeapObject>::cast(object)); } else { @@ -882,6 +883,7 @@ class MacroAssembler: public Assembler { CallKind call_kind); void InvokeFunction(Handle<JSFunction> function, + const ParameterCount& expected, const ParameterCount& actual, InvokeFlag flag, const CallWrapper& call_wrapper, diff --git a/deps/v8/src/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/mips/regexp-macro-assembler-mips.cc index 2fbc0eaa56..7289296d56 100644 --- a/deps/v8/src/mips/regexp-macro-assembler-mips.cc +++ b/deps/v8/src/mips/regexp-macro-assembler-mips.cc @@ -388,7 +388,7 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase( // Address of current input position. __ Addu(a1, current_input_offset(), Operand(end_of_input_address())); // Isolate. - __ li(a3, Operand(ExternalReference::isolate_address())); + __ li(a3, Operand(ExternalReference::isolate_address(masm_->isolate()))); { AllowExternalCallThatCantCauseGC scope(masm_); @@ -901,7 +901,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) { __ PrepareCallCFunction(num_arguments, a0); __ mov(a0, backtrack_stackpointer()); __ Addu(a1, frame_pointer(), Operand(kStackHighEnd)); - __ li(a2, Operand(ExternalReference::isolate_address())); + __ li(a2, Operand(ExternalReference::isolate_address(masm_->isolate()))); ExternalReference grow_stack = ExternalReference::re_grow_stack(masm_->isolate()); __ CallCFunction(grow_stack, num_arguments); diff --git a/deps/v8/src/mips/regexp-macro-assembler-mips.h b/deps/v8/src/mips/regexp-macro-assembler-mips.h index 8dd52a4847..3ad64f9aeb 100644 --- a/deps/v8/src/mips/regexp-macro-assembler-mips.h +++ b/deps/v8/src/mips/regexp-macro-assembler-mips.h @@ -229,6 +229,7 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler { inline void CallCFunctionUsingStub(ExternalReference function, int num_arguments); + Isolate* isolate() const { return masm_->isolate(); } MacroAssembler* masm_; diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc index bc384357c2..467345807a 100644 --- a/deps/v8/src/mips/simulator-mips.cc +++ b/deps/v8/src/mips/simulator-mips.cc @@ -26,8 +26,8 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include <stdlib.h> -#include <math.h> #include <limits.h> +#include <cmath> #include <cstdarg> #include "v8.h" @@ -1155,7 +1155,7 @@ bool Simulator::test_fcsr_bit(uint32_t cc) { bool Simulator::set_fcsr_round_error(double original, double rounded) { bool ret = false; - if (!isfinite(original) || !isfinite(rounded)) { + if (!std::isfinite(original) || !std::isfinite(rounded)) { set_fcsr_bit(kFCSRInvalidOpFlagBit, true); ret = true; } @@ -2067,25 +2067,28 @@ void Simulator::DecodeTypeRegister(Instruction* instr) { set_fpu_register_double(fd_reg, sqrt(fs)); break; case C_UN_D: - set_fcsr_bit(fcsr_cc, isnan(fs) || isnan(ft)); + set_fcsr_bit(fcsr_cc, std::isnan(fs) || std::isnan(ft)); break; case C_EQ_D: set_fcsr_bit(fcsr_cc, (fs == ft)); break; case C_UEQ_D: - set_fcsr_bit(fcsr_cc, (fs == ft) || (isnan(fs) || isnan(ft))); + set_fcsr_bit(fcsr_cc, + (fs == ft) || (std::isnan(fs) || std::isnan(ft))); break; case C_OLT_D: set_fcsr_bit(fcsr_cc, (fs < ft)); break; case C_ULT_D: - set_fcsr_bit(fcsr_cc, (fs < ft) || (isnan(fs) || isnan(ft))); + set_fcsr_bit(fcsr_cc, + (fs < ft) || (std::isnan(fs) || std::isnan(ft))); break; case C_OLE_D: set_fcsr_bit(fcsr_cc, (fs <= ft)); break; case C_ULE_D: - set_fcsr_bit(fcsr_cc, (fs <= ft) || (isnan(fs) || isnan(ft))); + set_fcsr_bit(fcsr_cc, + (fs <= ft) || (std::isnan(fs) || std::isnan(ft))); break; case CVT_W_D: // Convert double to word. // Rounding modes are not yet supported. diff --git a/deps/v8/src/mips/stub-cache-mips.cc b/deps/v8/src/mips/stub-cache-mips.cc index b9757fa138..e110c47c6f 100644 --- a/deps/v8/src/mips/stub-cache-mips.cc +++ b/deps/v8/src/mips/stub-cache-mips.cc @@ -715,7 +715,7 @@ static void PushInterceptorArguments(MacroAssembler* masm, __ Push(scratch, receiver, holder); __ lw(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset)); __ push(scratch); - __ li(scratch, Operand(ExternalReference::isolate_address())); + __ li(scratch, Operand(ExternalReference::isolate_address(masm->isolate()))); __ push(scratch); } @@ -789,7 +789,7 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm, __ li(t2, call_data); } - __ li(t3, Operand(ExternalReference::isolate_address())); + __ li(t3, Operand(ExternalReference::isolate_address(masm->isolate()))); // Store JS function, call data and isolate. __ sw(t1, MemOperand(sp, 1 * kPointerSize)); __ sw(t2, MemOperand(sp, 2 * kPointerSize)); @@ -951,7 +951,9 @@ class CallInterceptorCompiler BASE_EMBEDDED { CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_) ? CALL_AS_FUNCTION : CALL_AS_METHOD; - __ InvokeFunction(optimization.constant_function(), arguments_, + Handle<JSFunction> function = optimization.constant_function(); + ParameterCount expected(function); + __ InvokeFunction(function, expected, arguments_, JUMP_FUNCTION, NullCallWrapper(), call_kind); } @@ -1165,7 +1167,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object, } // Log the check depth. - LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1)); + LOG(isolate(), IntEvent("check-maps-depth", depth + 1)); if (!holder.is_identical_to(first) || check == CHECK_ALL_MAPS) { // Check the holder map. @@ -1292,13 +1294,13 @@ void BaseLoadStubCompiler::GenerateLoadCallback( __ lw(scratch3(), FieldMemOperand(scratch3(), ExecutableAccessorInfo::kDataOffset)); } else { - __ li(scratch3(), Handle<Object>(callback->data(), - callback->GetIsolate())); + __ li(scratch3(), Handle<Object>(callback->data(), isolate())); } __ Subu(sp, sp, 4 * kPointerSize); __ sw(reg, MemOperand(sp, 3 * kPointerSize)); __ sw(scratch3(), MemOperand(sp, 2 * kPointerSize)); - __ li(scratch3(), Operand(ExternalReference::isolate_address())); + __ li(scratch3(), + Operand(ExternalReference::isolate_address(isolate()))); __ sw(scratch3(), MemOperand(sp, 1 * kPointerSize)); __ sw(name(), MemOperand(sp, 0 * kPointerSize)); @@ -1323,10 +1325,8 @@ void BaseLoadStubCompiler::GenerateLoadCallback( const int kStackUnwindSpace = 5; Address getter_address = v8::ToCData<Address>(callback->getter()); ApiFunction fun(getter_address); - ExternalReference ref = - ExternalReference(&fun, - ExternalReference::DIRECT_GETTER_CALL, - masm()->isolate()); + ExternalReference ref = ExternalReference( + &fun, ExternalReference::DIRECT_GETTER_CALL, isolate()); __ CallApiFunctionAndReturn(ref, kStackUnwindSpace); } @@ -1411,7 +1411,7 @@ void BaseLoadStubCompiler::GenerateLoadInterceptor( this->name(), interceptor_holder); ExternalReference ref = ExternalReference( - IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), masm()->isolate()); + IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), isolate()); __ TailCallExternalReference(ref, 6, 1); } } @@ -1721,11 +1721,9 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( __ bind(&no_fast_elements_check); ExternalReference new_space_allocation_top = - ExternalReference::new_space_allocation_top_address( - masm()->isolate()); + ExternalReference::new_space_allocation_top_address(isolate()); ExternalReference new_space_allocation_limit = - ExternalReference::new_space_allocation_limit_address( - masm()->isolate()); + ExternalReference::new_space_allocation_limit_address(isolate()); const int kAllocationDelta = 4; // Load top and check if it is the end of elements. @@ -1762,10 +1760,8 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( __ Ret(); } __ bind(&call_builtin); - __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush, - masm()->isolate()), - argc + 1, - 1); + __ TailCallExternalReference( + ExternalReference(Builtins::c_ArrayPush, isolate()), argc + 1, 1); } // Handle call cache miss. @@ -1849,10 +1845,8 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall( __ Ret(); __ bind(&call_builtin); - __ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPop, - masm()->isolate()), - argc + 1, - 1); + __ TailCallExternalReference( + ExternalReference(Builtins::c_ArrayPop, isolate()), argc + 1, 1); // Handle call cache miss. __ bind(&miss); @@ -2091,8 +2085,9 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall( // Tail call the full function. We do not have to patch the receiver // because the function makes no use of it. __ bind(&slow); - __ InvokeFunction( - function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); + ParameterCount expected(function); + __ InvokeFunction(function, expected, arguments(), + JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); __ bind(&miss); // a2: function name. @@ -2221,8 +2216,9 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall( __ bind(&slow); // Tail call the full function. We do not have to patch the receiver // because the function makes no use of it. - __ InvokeFunction( - function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); + ParameterCount expected(function); + __ InvokeFunction(function, expected, arguments(), + JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); __ bind(&miss); // a2: function name. @@ -2322,8 +2318,9 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall( // Tail call the full function. We do not have to patch the receiver // because the function makes no use of it. __ bind(&slow); - __ InvokeFunction( - function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); + ParameterCount expected(function); + __ InvokeFunction(function, expected, arguments(), + JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); __ bind(&miss); // a2: function name. @@ -2413,8 +2410,7 @@ void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object, ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK); switch (check) { case RECEIVER_MAP_CHECK: - __ IncrementCounter(masm()->isolate()->counters()->call_const(), - 1, a0, a3); + __ IncrementCounter(isolate()->counters()->call_const(), 1, a0, a3); // Check that the maps haven't changed. CheckPrototypes(Handle<JSObject>::cast(object), a1, holder, a0, a3, t0, @@ -2498,8 +2494,9 @@ void CallStubCompiler::CompileHandlerBackend(Handle<JSFunction> function) { CallKind call_kind = CallICBase::Contextual::decode(extra_state_) ? CALL_AS_FUNCTION : CALL_AS_METHOD; - __ InvokeFunction( - function, arguments(), JUMP_FUNCTION, NullCallWrapper(), call_kind); + ParameterCount expected(function); + __ InvokeFunction(function, expected, arguments(), + JUMP_FUNCTION, NullCallWrapper(), call_kind); } @@ -2605,7 +2602,7 @@ Handle<Code> CallStubCompiler::CompileCallGlobal( __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); // Jump to the cached code (tail call). - Counters* counters = masm()->isolate()->counters(); + Counters* counters = isolate()->counters(); __ IncrementCounter(counters->call_global_inline(), 1, a3, t0); ParameterCount expected(function->shared()->formal_parameter_count()); CallKind call_kind = CallICBase::Contextual::decode(extra_state_) @@ -2649,8 +2646,7 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback( // Do tail-call to the runtime system. ExternalReference store_callback_property = - ExternalReference(IC_Utility(IC::kStoreCallbackProperty), - masm()->isolate()); + ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate()); __ TailCallExternalReference(store_callback_property, 4, 1); // Handle store cache miss. @@ -2686,8 +2682,9 @@ void StoreStubCompiler::GenerateStoreViaSetter( __ push(a1); __ push(a0); ParameterCount actual(1); - __ InvokeFunction(setter, actual, CALL_FUNCTION, NullCallWrapper(), - CALL_AS_METHOD); + ParameterCount expected(setter); + __ InvokeFunction(setter, expected, actual, + CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); } else { // If we generate a global code snippet for deoptimization only, remember // the place to continue after deoptimization. @@ -2733,8 +2730,7 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor( // Do tail-call to the runtime system. ExternalReference store_ic_property = - ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), - masm()->isolate()); + ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate()); __ TailCallExternalReference(store_ic_property, 4, 1); // Handle store cache miss. @@ -2772,7 +2768,7 @@ Handle<Code> StoreStubCompiler::CompileStoreGlobal( __ mov(v0, a0); // Stored value must be returned in v0. // Cells are always rescanned, so no write barrier here. - Counters* counters = masm()->isolate()->counters(); + Counters* counters = isolate()->counters(); __ IncrementCounter( counters->named_store_global_inline(), 1, scratch1(), scratch2()); __ Ret(); @@ -2867,8 +2863,9 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm, // Call the JavaScript getter with the receiver on the stack. __ push(a0); ParameterCount actual(0); - __ InvokeFunction(getter, actual, CALL_FUNCTION, NullCallWrapper(), - CALL_AS_METHOD); + ParameterCount expected(getter); + __ InvokeFunction(getter, expected, actual, + CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); } else { // If we generate a global code snippet for deoptimization only, remember // the place to continue after deoptimization. @@ -2912,7 +2909,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal( HandlerFrontendFooter(&success, &miss); __ bind(&success); - Counters* counters = masm()->isolate()->counters(); + Counters* counters = isolate()->counters(); __ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3); __ mov(v0, t0); __ Ret(); @@ -3090,8 +3087,8 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub( __ bind(&next); } else { // Set the property to the constant value. - Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i), - masm()->isolate()); + Handle<Object> constant( + shared->GetThisPropertyAssignmentConstant(i), isolate()); __ li(a2, Operand(constant)); __ sw(a2, MemOperand(t5)); __ Addu(t5, t5, kPointerSize); @@ -3119,7 +3116,7 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub( __ sll(t0, a1, kPointerSizeLog2); __ Addu(sp, sp, t0); __ Addu(sp, sp, Operand(kPointerSize)); - Counters* counters = masm()->isolate()->counters(); + Counters* counters = isolate()->counters(); __ IncrementCounter(counters->constructed_objects(), 1, a1, a2); __ IncrementCounter(counters->constructed_objects_stub(), 1, a1, a2); __ Ret(); @@ -3128,7 +3125,7 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub( // construction. __ bind(&generic_stub_call); Handle<Code> generic_construct_stub = - masm()->isolate()->builtins()->JSConstructStubGeneric(); + isolate()->builtins()->JSConstructStubGeneric(); __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); // Return the generated code. diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc index 24730a0412..ee6df1d2ab 100644 --- a/deps/v8/src/objects-debug.cc +++ b/deps/v8/src/objects-debug.cc @@ -401,7 +401,7 @@ void FixedDoubleArray::FixedDoubleArrayVerify() { for (int i = 0; i < length(); i++) { if (!is_the_hole(i)) { double value = get_scalar(i); - CHECK(!isnan(value) || + CHECK(!std::isnan(value) || (BitCast<uint64_t>(value) == BitCast<uint64_t>(canonical_not_the_hole_nan_as_double())) || ((BitCast<uint64_t>(value) & Double::kSignMask) != 0)); @@ -416,6 +416,7 @@ void JSGeneratorObject::JSGeneratorObjectVerify() { // initialized by the generator. Hence these weak checks. VerifyObjectField(kFunctionOffset); VerifyObjectField(kContextOffset); + VerifyObjectField(kReceiverOffset); VerifyObjectField(kOperandStackOffset); VerifyObjectField(kContinuationOffset); } diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h index f3a029ee34..08378f1955 100644 --- a/deps/v8/src/objects-inl.h +++ b/deps/v8/src/objects-inl.h @@ -355,14 +355,14 @@ bool String::IsTwoByteRepresentationUnderneath() { } -bool String::HasOnlyAsciiChars() { +bool String::HasOnlyOneByteChars() { uint32_t type = map()->instance_type(); - return (type & kAsciiDataHintMask) == kAsciiDataHintTag; + return (type & kOneByteDataHintMask) == kOneByteDataHintTag; } bool String::IsOneByteConvertible() { - return HasOnlyAsciiChars() || IsOneByteRepresentation(); + return HasOnlyOneByteChars() || IsOneByteRepresentation(); } @@ -860,7 +860,7 @@ double Object::Number() { bool Object::IsNaN() { - return this->IsHeapNumber() && isnan(HeapNumber::cast(this)->value()); + return this->IsHeapNumber() && std::isnan(HeapNumber::cast(this)->value()); } @@ -1299,6 +1299,44 @@ void JSObject::ValidateElements() { } +bool JSObject::ShouldTrackAllocationInfo() { + if (map()->CanTrackAllocationSite()) { + if (!IsJSArray()) { + return true; + } + + return AllocationSiteInfo::GetMode(GetElementsKind()) == + TRACK_ALLOCATION_SITE; + } + return false; +} + + +// Heuristic: We only need to create allocation site info if the boilerplate +// elements kind is the initial elements kind. +AllocationSiteMode AllocationSiteInfo::GetMode( + ElementsKind boilerplate_elements_kind) { + if (FLAG_track_allocation_sites && + IsFastSmiElementsKind(boilerplate_elements_kind)) { + return TRACK_ALLOCATION_SITE; + } + + return DONT_TRACK_ALLOCATION_SITE; +} + + +AllocationSiteMode AllocationSiteInfo::GetMode(ElementsKind from, + ElementsKind to) { + if (FLAG_track_allocation_sites && + IsFastSmiElementsKind(from) && + (IsFastObjectElementsKind(to) || IsFastDoubleElementsKind(to))) { + return TRACK_ALLOCATION_SITE; + } + + return DONT_TRACK_ALLOCATION_SITE; +} + + MaybeObject* JSObject::EnsureCanContainHeapObjectElements() { ValidateElements(); ElementsKind elements_kind = map()->elements_kind(); @@ -1921,7 +1959,7 @@ void FixedDoubleArray::set(int index, double value) { ASSERT(map() != HEAP->fixed_cow_array_map() && map() != HEAP->fixed_array_map()); int offset = kHeaderSize + index * kDoubleSize; - if (isnan(value)) value = canonical_not_the_hole_nan_as_double(); + if (std::isnan(value)) value = canonical_not_the_hole_nan_as_double(); WRITE_DOUBLE_FIELD(this, offset, value); } @@ -3634,6 +3672,12 @@ Code::ExtraICState Code::extra_ic_state() { } +Code::ExtraICState Code::extended_extra_ic_state() { + ASSERT(is_inline_cache_stub() || ic_state() == DEBUG_STUB); + return ExtractExtendedExtraICStateFromFlags(flags()); +} + + Code::StubType Code::type() { return ExtractTypeFromFlags(flags()); } @@ -3663,6 +3707,7 @@ int Code::major_key() { kind() == UNARY_OP_IC || kind() == BINARY_OP_IC || kind() == COMPARE_IC || + kind() == COMPARE_NIL_IC || kind() == LOAD_IC || kind() == KEYED_LOAD_IC || kind() == TO_BOOLEAN_IC); @@ -3676,6 +3721,7 @@ void Code::set_major_key(int major) { kind() == UNARY_OP_IC || kind() == BINARY_OP_IC || kind() == COMPARE_IC || + kind() == COMPARE_NIL_IC || kind() == LOAD_IC || kind() == KEYED_LOAD_IC || kind() == STORE_IC || @@ -3689,7 +3735,7 @@ void Code::set_major_key(int major) { bool Code::is_pregenerated() { - return kind() == STUB && IsPregeneratedField::decode(flags()); + return (kind() == STUB && IsPregeneratedField::decode(flags())); } @@ -3940,13 +3986,23 @@ Code::Flags Code::ComputeFlags(Kind kind, int argc, InlineCacheHolderFlag holder) { ASSERT(argc <= Code::kMaxArguments); + // Since the extended extra ic state overlaps with the argument count + // for CALL_ICs, do so checks to make sure that they don't interfere. + ASSERT((kind != Code::CALL_IC && + kind != Code::KEYED_CALL_IC) || + (ExtraICStateField::encode(extra_ic_state) | true)); // Compute the bit mask. unsigned int bits = KindField::encode(kind) | ICStateField::encode(ic_state) | TypeField::encode(type) - | ExtraICStateField::encode(extra_ic_state) - | (argc << kArgumentsCountShift) + | ExtendedExtraICStateField::encode(extra_ic_state) | CacheHolderField::encode(holder); + // TODO(danno): This is a bit of a hack right now since there are still + // clients of this API that pass "extra" values in for argc. These clients + // should be retrofitted to used ExtendedExtraICState. + if (kind != Code::COMPARE_NIL_IC) { + bits |= (argc << kArgumentsCountShift); + } return static_cast<Flags>(bits); } @@ -3975,6 +4031,12 @@ Code::ExtraICState Code::ExtractExtraICStateFromFlags(Flags flags) { } +Code::ExtraICState Code::ExtractExtendedExtraICStateFromFlags( + Flags flags) { + return ExtendedExtraICStateField::decode(flags); +} + + Code::StubType Code::ExtractTypeFromFlags(Flags flags) { return TypeField::decode(flags); } @@ -5029,7 +5091,8 @@ void Foreign::set_foreign_address(Address value) { ACCESSORS(JSGeneratorObject, function, JSFunction, kFunctionOffset) -ACCESSORS(JSGeneratorObject, context, Object, kContextOffset) +ACCESSORS(JSGeneratorObject, context, Context, kContextOffset) +ACCESSORS(JSGeneratorObject, receiver, Object, kReceiverOffset) SMI_ACCESSORS(JSGeneratorObject, continuation, kContinuationOffset) ACCESSORS(JSGeneratorObject, operand_stack, FixedArray, kOperandStackOffset) @@ -5124,7 +5187,8 @@ void Code::set_type_feedback_info(Object* value, WriteBarrierMode mode) { int Code::stub_info() { - ASSERT(kind() == COMPARE_IC || kind() == BINARY_OP_IC || kind() == LOAD_IC); + ASSERT(kind() == COMPARE_IC || kind() == COMPARE_NIL_IC || + kind() == BINARY_OP_IC || kind() == LOAD_IC); Object* value = READ_FIELD(this, kTypeFeedbackInfoOffset); return Smi::cast(value)->value(); } @@ -5132,6 +5196,7 @@ int Code::stub_info() { void Code::set_stub_info(int value) { ASSERT(kind() == COMPARE_IC || + kind() == COMPARE_NIL_IC || kind() == BINARY_OP_IC || kind() == STUB || kind() == LOAD_IC || diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc index f21481ab0b..5aeeec6567 100644 --- a/deps/v8/src/objects-printer.cc +++ b/deps/v8/src/objects-printer.cc @@ -495,11 +495,11 @@ static const char* TypeToString(InstanceType type) { return "CONS_STRING"; case EXTERNAL_STRING_TYPE: case EXTERNAL_ASCII_STRING_TYPE: - case EXTERNAL_STRING_WITH_ASCII_DATA_TYPE: + case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE: return "EXTERNAL_STRING"; case SHORT_EXTERNAL_STRING_TYPE: case SHORT_EXTERNAL_ASCII_STRING_TYPE: - case SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE: + case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE: return "SHORT_EXTERNAL_STRING"; case INTERNALIZED_STRING_TYPE: return "INTERNALIZED_STRING"; case ASCII_INTERNALIZED_STRING_TYPE: return "ASCII_INTERNALIZED_STRING"; @@ -508,11 +508,11 @@ static const char* TypeToString(InstanceType type) { return "CONS_ASCII_INTERNALIZED_STRING"; case EXTERNAL_INTERNALIZED_STRING_TYPE: case EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE: - case EXTERNAL_INTERNALIZED_STRING_WITH_ASCII_DATA_TYPE: + case EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE: return "EXTERNAL_INTERNALIZED_STRING"; case SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE: case SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE: - case SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ASCII_DATA_TYPE: + case SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE: return "SHORT_EXTERNAL_INTERNALIZED_STRING"; case FIXED_ARRAY_TYPE: return "FIXED_ARRAY"; case BYTE_ARRAY_TYPE: return "BYTE_ARRAY"; diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc index 2092859cd5..128c04da44 100644 --- a/deps/v8/src/objects.cc +++ b/deps/v8/src/objects.cc @@ -344,7 +344,7 @@ MaybeObject* JSObject::GetPropertyWithCallback(Object* receiver, v8::Handle<v8::Value> result; { // Leaving JavaScript. - VMState state(isolate, EXTERNAL); + VMState<EXTERNAL> state(isolate); result = call_fun(v8::Utils::ToLocal(key), info); } RETURN_IF_SCHEDULED_EXCEPTION(isolate); @@ -745,6 +745,20 @@ Handle<Object> Object::GetProperty(Handle<Object> object, } +MaybeObject* Object::GetPropertyOrFail(Handle<Object> object, + Handle<Object> receiver, + LookupResult* result, + Handle<Name> key, + PropertyAttributes* attributes) { + Isolate* isolate = object->IsHeapObject() + ? Handle<HeapObject>::cast(object)->GetIsolate() + : Isolate::Current(); + CALL_HEAP_FUNCTION_PASS_EXCEPTION( + isolate, + object->GetProperty(*receiver, result, *key, attributes)); +} + + MaybeObject* Object::GetProperty(Object* receiver, LookupResult* result, Name* name, @@ -951,7 +965,7 @@ bool Object::SameValue(Object* other) { double this_value = Number(); double other_value = other->Number(); return (this_value == other_value) || - (isnan(this_value) && isnan(other_value)); + (std::isnan(this_value) && std::isnan(other_value)); } if (IsString() && other->IsString()) { return String::cast(this)->Equals(String::cast(other)); @@ -1116,21 +1130,21 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) { this->set_map_no_write_barrier( is_internalized ? (is_ascii - ? heap->external_internalized_string_with_ascii_data_map() + ? heap->external_internalized_string_with_one_byte_data_map() : heap->external_internalized_string_map()) : (is_ascii - ? heap->external_string_with_ascii_data_map() + ? heap->external_string_with_one_byte_data_map() : heap->external_string_map())); } else { this->set_map_no_write_barrier( is_internalized - ? (is_ascii - ? heap-> - short_external_internalized_string_with_ascii_data_map() - : heap->short_external_internalized_string_map()) - : (is_ascii - ? heap->short_external_string_with_ascii_data_map() - : heap->short_external_string_map())); + ? (is_ascii + ? heap-> + short_external_internalized_string_with_one_byte_data_map() + : heap->short_external_internalized_string_map()) + : (is_ascii + ? heap->short_external_string_with_one_byte_data_map() + : heap->short_external_string_map())); } ExternalTwoByteString* self = ExternalTwoByteString::cast(this); self->set_resource(resource); @@ -2105,7 +2119,7 @@ MaybeObject* JSObject::SetPropertyWithInterceptor( v8::Handle<v8::Value> result; { // Leaving JavaScript. - VMState state(isolate, EXTERNAL); + VMState<EXTERNAL> state(isolate); Handle<Object> value_unhole(value->IsTheHole() ? isolate->heap()->undefined_value() : value, @@ -2139,6 +2153,19 @@ Handle<Object> JSReceiver::SetProperty(Handle<JSReceiver> object, } +MaybeObject* JSReceiver::SetPropertyOrFail( + Handle<JSReceiver> object, + Handle<Name> key, + Handle<Object> value, + PropertyAttributes attributes, + StrictModeFlag strict_mode, + JSReceiver::StoreFromKeyed store_mode) { + CALL_HEAP_FUNCTION_PASS_EXCEPTION( + object->GetIsolate(), + object->SetProperty(*key, *value, attributes, strict_mode, store_mode)); +} + + MaybeObject* JSReceiver::SetProperty(Name* name, Object* value, PropertyAttributes attributes, @@ -2203,7 +2230,7 @@ MaybeObject* JSObject::SetPropertyWithCallback(Object* structure, v8::AccessorInfo info(args.end()); { // Leaving JavaScript. - VMState state(isolate, EXTERNAL); + VMState<EXTERNAL> state(isolate); call_fun(v8::Utils::ToLocal(key), v8::Utils::ToLocal(value_handle), info); @@ -3251,9 +3278,11 @@ MaybeObject* JSObject::SetPropertyForResult(LookupResult* lookup, } else { LookupResult new_lookup(isolate); self->LocalLookup(*name, &new_lookup, true); - if (new_lookup.IsDataProperty() && - !Object::GetProperty(self, name)->SameValue(*old_value)) { - EnqueueChangeRecord(self, "updated", name, old_value); + if (new_lookup.IsDataProperty()) { + Handle<Object> new_value = Object::GetProperty(self, name); + if (!new_value->SameValue(*old_value)) { + EnqueueChangeRecord(self, "updated", name, old_value); + } } } } @@ -3403,8 +3432,11 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes( } else { LookupResult new_lookup(isolate); self->LocalLookup(*name, &new_lookup, true); - bool value_changed = new_lookup.IsDataProperty() && - !old_value->SameValue(*Object::GetProperty(self, name)); + bool value_changed = false; + if (new_lookup.IsDataProperty()) { + Handle<Object> new_value = Object::GetProperty(self, name); + value_changed = !old_value->SameValue(*new_value); + } if (new_lookup.GetAttributes() != old_attributes) { if (!value_changed) old_value = isolate->factory()->the_hole_value(); EnqueueChangeRecord(self, "reconfigured", name, old_value); @@ -3467,7 +3499,7 @@ PropertyAttributes JSObject::GetPropertyAttributeWithInterceptor( v8::Handle<v8::Integer> result; { // Leaving JavaScript. - VMState state(isolate, EXTERNAL); + VMState<EXTERNAL> state(isolate); result = query(v8::Utils::ToLocal(name_handle), info); } if (!result.IsEmpty()) { @@ -3482,7 +3514,7 @@ PropertyAttributes JSObject::GetPropertyAttributeWithInterceptor( v8::Handle<v8::Value> result; { // Leaving JavaScript. - VMState state(isolate, EXTERNAL); + VMState<EXTERNAL> state(isolate); result = getter(v8::Utils::ToLocal(name_handle), info); } if (!result.IsEmpty()) return DONT_ENUM; @@ -3608,7 +3640,7 @@ PropertyAttributes JSObject::GetElementAttributeWithInterceptor( v8::Handle<v8::Integer> result; { // Leaving JavaScript. - VMState state(isolate, EXTERNAL); + VMState<EXTERNAL> state(isolate); result = query(index, info); } if (!result.IsEmpty()) @@ -3621,7 +3653,7 @@ PropertyAttributes JSObject::GetElementAttributeWithInterceptor( v8::Handle<v8::Value> result; { // Leaving JavaScript. - VMState state(isolate, EXTERNAL); + VMState<EXTERNAL> state(isolate); result = getter(index, info); } if (!result.IsEmpty()) return NONE; @@ -4000,10 +4032,10 @@ MaybeObject* JSObject::SetIdentityHash(Smi* hash, CreationFlag flag) { int JSObject::GetIdentityHash(Handle<JSObject> obj) { - CALL_AND_RETRY(obj->GetIsolate(), - obj->GetIdentityHash(ALLOW_CREATION), - return Smi::cast(__object__)->value(), - return 0); + CALL_AND_RETRY_OR_DIE(obj->GetIsolate(), + obj->GetIdentityHash(ALLOW_CREATION), + return Smi::cast(__object__)->value(), + return 0); } @@ -4291,7 +4323,7 @@ MaybeObject* JSObject::DeletePropertyWithInterceptor(Name* name) { v8::Handle<v8::Boolean> result; { // Leaving JavaScript. - VMState state(isolate, EXTERNAL); + VMState<EXTERNAL> state(isolate); result = deleter(v8::Utils::ToLocal(name_handle), info); } RETURN_IF_SCHEDULED_EXCEPTION(isolate); @@ -4328,7 +4360,7 @@ MaybeObject* JSObject::DeleteElementWithInterceptor(uint32_t index) { v8::Handle<v8::Boolean> result; { // Leaving JavaScript. - VMState state(isolate, EXTERNAL); + VMState<EXTERNAL> state(isolate); result = deleter(index, info); } RETURN_IF_SCHEDULED_EXCEPTION(isolate); @@ -5363,9 +5395,9 @@ MaybeObject* JSObject::DefineFastAccessor(Name* name, LookupResult result(GetIsolate()); LocalLookup(name, &result); - if (result.IsFound() - && !result.IsPropertyCallbacks() - && !result.IsTransition()) return GetHeap()->null_value(); + if (result.IsFound() && !result.IsPropertyCallbacks()) { + return GetHeap()->null_value(); + } // Return success if the same accessor with the same attributes already exist. AccessorPair* source_accessors = NULL; @@ -7893,31 +7925,6 @@ bool AllocationSiteInfo::GetElementsKindPayload(ElementsKind* kind) { } -// Heuristic: We only need to create allocation site info if the boilerplate -// elements kind is the initial elements kind. -AllocationSiteMode AllocationSiteInfo::GetMode( - ElementsKind boilerplate_elements_kind) { - if (FLAG_track_allocation_sites && - IsFastSmiElementsKind(boilerplate_elements_kind)) { - return TRACK_ALLOCATION_SITE; - } - - return DONT_TRACK_ALLOCATION_SITE; -} - - -AllocationSiteMode AllocationSiteInfo::GetMode(ElementsKind from, - ElementsKind to) { - if (FLAG_track_allocation_sites && - IsFastSmiElementsKind(from) && - (IsFastObjectElementsKind(to) || IsFastDoubleElementsKind(to))) { - return TRACK_ALLOCATION_SITE; - } - - return DONT_TRACK_ALLOCATION_SITE; -} - - uint32_t StringHasher::MakeArrayIndexHash(uint32_t value, int length) { // For array indexes mix the length into the hash as an array index could // be zero. @@ -8362,13 +8369,13 @@ MaybeObject* JSObject::OptimizeAsPrototype() { } -MUST_USE_RESULT static MaybeObject* CacheInitialJSArrayMaps( +static MUST_USE_RESULT MaybeObject* CacheInitialJSArrayMaps( Context* native_context, Map* initial_map) { // Replace all of the cached initial array maps in the native context with // the appropriate transitioned elements kind maps. Heap* heap = native_context->GetHeap(); MaybeObject* maybe_maps = - heap->AllocateFixedArrayWithHoles(kElementsKindCount); + heap->AllocateFixedArrayWithHoles(kElementsKindCount, TENURED); FixedArray* maps; if (!maybe_maps->To(&maps)) return maybe_maps; @@ -8391,6 +8398,14 @@ MUST_USE_RESULT static MaybeObject* CacheInitialJSArrayMaps( } +Handle<Object> CacheInitialJSArrayMaps(Handle<Context> native_context, + Handle<Map> initial_map) { + CALL_HEAP_FUNCTION(native_context->GetIsolate(), + CacheInitialJSArrayMaps(*native_context, *initial_map), + Object); +} + + MaybeObject* JSFunction::SetInstancePrototype(Object* value) { ASSERT(value->IsJSReceiver()); Heap* heap = GetHeap(); @@ -8994,6 +9009,12 @@ void ObjectVisitor::VisitExternalReference(RelocInfo* rinfo) { VisitExternalReferences(p, p + 1); } +byte Code::compare_nil_state() { + ASSERT(is_compare_nil_ic_stub()); + return CompareNilICStub::TypesFromExtraICState(extended_extra_ic_state()); +} + + void Code::InvalidateRelocation() { set_relocation_info(GetHeap()->empty_byte_array()); } @@ -9028,6 +9049,7 @@ void Code::CopyFrom(const CodeDesc& desc) { RelocInfo::kApplyMask; // Needed to find target_object and runtime_entry on X64 Assembler* origin = desc.origin; + ALLOW_HANDLE_DEREF(GetIsolate(), "embedding raw addresses into code"); for (RelocIterator it(this, mode_mask); !it.done(); it.next()) { RelocInfo::Mode mode = it.rinfo()->rmode(); if (mode == RelocInfo::EMBEDDED_OBJECT) { @@ -9129,6 +9151,22 @@ Map* Code::FindFirstMap() { } +void Code::ReplaceFirstMap(Map* replace_with) { + ASSERT(is_inline_cache_stub()); + AssertNoAllocation no_allocation; + int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT); + for (RelocIterator it(this, mask); !it.done(); it.next()) { + RelocInfo* info = it.rinfo(); + Object* object = info->target_object(); + if (object->IsMap()) { + info->set_target_object(replace_with); + return; + } + } + UNREACHABLE(); +} + + void Code::FindAllMaps(MapHandleList* maps) { ASSERT(is_inline_cache_stub()); AssertNoAllocation no_allocation; @@ -9324,6 +9362,7 @@ const char* Code::Kind2String(Kind kind) { case UNARY_OP_IC: return "UNARY_OP_IC"; case BINARY_OP_IC: return "BINARY_OP_IC"; case COMPARE_IC: return "COMPARE_IC"; + case COMPARE_NIL_IC: return "COMPARE_NIL_IC"; case TO_BOOLEAN_IC: return "TO_BOOLEAN_IC"; } UNREACHABLE(); @@ -10238,7 +10277,7 @@ MaybeObject* JSObject::SetElementWithInterceptor(uint32_t index, v8::Handle<v8::Value> result; { // Leaving JavaScript. - VMState state(isolate, EXTERNAL); + VMState<EXTERNAL> state(isolate); result = setter(index, v8::Utils::ToLocal(value_handle), info); } RETURN_IF_SCHEDULED_EXCEPTION(isolate); @@ -10281,7 +10320,7 @@ MaybeObject* JSObject::GetElementWithCallback(Object* receiver, v8::Handle<v8::Value> result; { // Leaving JavaScript. - VMState state(isolate, EXTERNAL); + VMState<EXTERNAL> state(isolate); result = call_fun(v8::Utils::ToLocal(key), info); } RETURN_IF_SCHEDULED_EXCEPTION(isolate); @@ -10347,7 +10386,7 @@ MaybeObject* JSObject::SetElementWithCallback(Object* structure, v8::AccessorInfo info(args.end()); { // Leaving JavaScript. - VMState state(isolate, EXTERNAL); + VMState<EXTERNAL> state(isolate); call_fun(v8::Utils::ToLocal(key), v8::Utils::ToLocal(value_handle), info); @@ -10923,8 +10962,8 @@ MaybeObject* JSObject::SetElement(uint32_t index, } else if (old_value->IsTheHole()) { EnqueueChangeRecord(self, "reconfigured", name, old_value); } else { - bool value_changed = - !old_value->SameValue(*Object::GetElement(self, index)); + Handle<Object> new_value = Object::GetElement(self, index); + bool value_changed = !old_value->SameValue(*new_value); if (old_attributes != new_attributes) { if (!value_changed) old_value = isolate->factory()->the_hole_value(); EnqueueChangeRecord(self, "reconfigured", name, old_value); @@ -11227,7 +11266,7 @@ MaybeObject* JSObject::GetElementWithInterceptor(Object* receiver, v8::Handle<v8::Value> result; { // Leaving JavaScript. - VMState state(isolate, EXTERNAL); + VMState<EXTERNAL> state(isolate); result = getter(index, info); } RETURN_IF_SCHEDULED_EXCEPTION(isolate); @@ -11537,7 +11576,7 @@ MaybeObject* JSObject::GetPropertyWithInterceptor( v8::Handle<v8::Value> result; { // Leaving JavaScript. - VMState state(isolate, EXTERNAL); + VMState<EXTERNAL> state(isolate); result = getter(v8::Utils::ToLocal(name_handle), info); } RETURN_IF_SCHEDULED_EXCEPTION(isolate); @@ -14391,7 +14430,7 @@ Object* JSDate::DoGetField(FieldIndex index) { } double time = value()->Number(); - if (isnan(time)) return GetIsolate()->heap()->nan_value(); + if (std::isnan(time)) return GetIsolate()->heap()->nan_value(); int64_t local_time_ms = date_cache->ToLocal(static_cast<int64_t>(time)); int days = DateCache::DaysFromTime(local_time_ms); @@ -14410,7 +14449,7 @@ Object* JSDate::GetUTCField(FieldIndex index, DateCache* date_cache) { ASSERT(index >= kFirstUTCField); - if (isnan(value)) return GetIsolate()->heap()->nan_value(); + if (std::isnan(value)) return GetIsolate()->heap()->nan_value(); int64_t time_ms = static_cast<int64_t>(value); diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h index 3ca89f08ea..e32c41bb13 100644 --- a/deps/v8/src/objects.h +++ b/deps/v8/src/objects.h @@ -330,10 +330,10 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits; V(SLICED_STRING_TYPE) \ V(EXTERNAL_STRING_TYPE) \ V(EXTERNAL_ASCII_STRING_TYPE) \ - V(EXTERNAL_STRING_WITH_ASCII_DATA_TYPE) \ + V(EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE) \ V(SHORT_EXTERNAL_STRING_TYPE) \ V(SHORT_EXTERNAL_ASCII_STRING_TYPE) \ - V(SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE) \ + V(SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE) \ \ V(INTERNALIZED_STRING_TYPE) \ V(ASCII_INTERNALIZED_STRING_TYPE) \ @@ -341,10 +341,10 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits; V(CONS_ASCII_INTERNALIZED_STRING_TYPE) \ V(EXTERNAL_INTERNALIZED_STRING_TYPE) \ V(EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE) \ - V(EXTERNAL_INTERNALIZED_STRING_WITH_ASCII_DATA_TYPE) \ + V(EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE) \ V(SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE) \ V(SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE) \ - V(SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ASCII_DATA_TYPE) \ + V(SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE) \ \ V(SYMBOL_TYPE) \ V(MAP_TYPE) \ @@ -366,6 +366,7 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits; V(EXTERNAL_INT_ARRAY_TYPE) \ V(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE) \ V(EXTERNAL_FLOAT_ARRAY_TYPE) \ + V(EXTERNAL_DOUBLE_ARRAY_TYPE) \ V(EXTERNAL_PIXEL_ARRAY_TYPE) \ V(FILLER_TYPE) \ \ @@ -460,10 +461,10 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits; ExternalAsciiString::kSize, \ external_ascii_string, \ ExternalAsciiString) \ - V(EXTERNAL_STRING_WITH_ASCII_DATA_TYPE, \ + V(EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE, \ ExternalTwoByteString::kSize, \ - external_string_with_ascii_data, \ - ExternalStringWithAsciiData) \ + external_string_with_one_bytei_data, \ + ExternalStringWithOneByteData) \ V(SHORT_EXTERNAL_STRING_TYPE, \ ExternalTwoByteString::kShortSize, \ short_external_string, \ @@ -472,10 +473,10 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits; ExternalAsciiString::kShortSize, \ short_external_ascii_string, \ ShortExternalAsciiString) \ - V(SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE, \ + V(SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE, \ ExternalTwoByteString::kShortSize, \ - short_external_string_with_ascii_data, \ - ShortExternalStringWithAsciiData) \ + short_external_string_with_one_byte_data, \ + ShortExternalStringWithOneByteData) \ \ V(INTERNALIZED_STRING_TYPE, \ kVariableSizeSentinel, \ @@ -501,10 +502,10 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits; ExternalAsciiString::kSize, \ external_ascii_internalized_string, \ ExternalAsciiInternalizedString) \ - V(EXTERNAL_INTERNALIZED_STRING_WITH_ASCII_DATA_TYPE, \ + V(EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE, \ ExternalTwoByteString::kSize, \ - external_internalized_string_with_ascii_data, \ - ExternalInternalizedStringWithAsciiData) \ + external_internalized_string_with_one_byte_data, \ + ExternalInternalizedStringWithOneByteData) \ V(SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE, \ ExternalTwoByteString::kShortSize, \ short_external_internalized_string, \ @@ -513,10 +514,10 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits; ExternalAsciiString::kShortSize, \ short_external_ascii_internalized_string, \ ShortExternalAsciiInternalizedString) \ - V(SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ASCII_DATA_TYPE, \ + V(SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE, \ ExternalTwoByteString::kShortSize, \ - short_external_internalized_string_with_ascii_data, \ - ShortExternalInternalizedStringWithAsciiData) \ + short_external_internalized_string_with_one_byte_data, \ + ShortExternalInternalizedStringWithOneByteData) \ // A struct is a simple object a set of object-valued fields. Including an // object type in this causes the compiler to generate most of the boilerplate @@ -604,9 +605,9 @@ const uint32_t kSlicedNotConsMask = kSlicedStringTag & ~kConsStringTag; STATIC_ASSERT(IS_POWER_OF_TWO(kSlicedNotConsMask) && kSlicedNotConsMask != 0); // If bit 7 is clear, then bit 3 indicates whether this two-byte -// string actually contains ASCII data. -const uint32_t kAsciiDataHintMask = 0x08; -const uint32_t kAsciiDataHintTag = 0x08; +// string actually contains one byte data. +const uint32_t kOneByteDataHintMask = 0x08; +const uint32_t kOneByteDataHintTag = 0x08; // If bit 7 is clear and string representation indicates an external string, // then bit 4 indicates whether the data pointer is cached. @@ -636,13 +637,13 @@ enum InstanceType { SLICED_ASCII_STRING_TYPE = kOneByteStringTag | kSlicedStringTag, EXTERNAL_STRING_TYPE = kTwoByteStringTag | kExternalStringTag, EXTERNAL_ASCII_STRING_TYPE = kOneByteStringTag | kExternalStringTag, - EXTERNAL_STRING_WITH_ASCII_DATA_TYPE = - EXTERNAL_STRING_TYPE | kAsciiDataHintTag, + EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE = + EXTERNAL_STRING_TYPE | kOneByteDataHintTag, SHORT_EXTERNAL_STRING_TYPE = EXTERNAL_STRING_TYPE | kShortExternalStringTag, SHORT_EXTERNAL_ASCII_STRING_TYPE = EXTERNAL_ASCII_STRING_TYPE | kShortExternalStringTag, - SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE = - EXTERNAL_STRING_WITH_ASCII_DATA_TYPE | kShortExternalStringTag, + SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE = + EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE | kShortExternalStringTag, INTERNALIZED_STRING_TYPE = STRING_TYPE | kInternalizedTag, ASCII_INTERNALIZED_STRING_TYPE = ASCII_STRING_TYPE | kInternalizedTag, @@ -652,14 +653,14 @@ enum InstanceType { EXTERNAL_INTERNALIZED_STRING_TYPE = EXTERNAL_STRING_TYPE | kInternalizedTag, EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE = EXTERNAL_ASCII_STRING_TYPE | kInternalizedTag, - EXTERNAL_INTERNALIZED_STRING_WITH_ASCII_DATA_TYPE = - EXTERNAL_STRING_WITH_ASCII_DATA_TYPE | kInternalizedTag, + EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE = + EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE | kInternalizedTag, SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE = SHORT_EXTERNAL_STRING_TYPE | kInternalizedTag, SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE = SHORT_EXTERNAL_ASCII_STRING_TYPE | kInternalizedTag, - SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ASCII_DATA_TYPE = - SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE | kInternalizedTag, + SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE = + SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE | kInternalizedTag, // Non-string names SYMBOL_TYPE = kNotStringTag, // LAST_NAME_TYPE, FIRST_NONSTRING_TYPE @@ -1096,6 +1097,13 @@ class Object : public MaybeObject { Handle<Name> key, PropertyAttributes* attributes); + MUST_USE_RESULT static MaybeObject* GetPropertyOrFail( + Handle<Object> object, + Handle<Object> receiver, + LookupResult* result, + Handle<Name> key, + PropertyAttributes* attributes); + MUST_USE_RESULT MaybeObject* GetProperty(Object* receiver, LookupResult* result, Name* key, @@ -1569,6 +1577,15 @@ class JSReceiver: public HeapObject { Handle<Object> value, PropertyAttributes attributes, StrictModeFlag strict_mode); + + MUST_USE_RESULT static MaybeObject* SetPropertyOrFail( + Handle<JSReceiver> object, + Handle<Name> key, + Handle<Object> value, + PropertyAttributes attributes, + StrictModeFlag strict_mode, + StoreFromKeyed store_from_keyed = MAY_BE_STORE_FROM_KEYED); + // Can cause GC. MUST_USE_RESULT MaybeObject* SetProperty( Name* key, @@ -1721,6 +1738,8 @@ class JSObject: public JSReceiver { bool HasDictionaryArgumentsElements(); inline SeededNumberDictionary* element_dictionary(); // Gets slow elements. + inline bool ShouldTrackAllocationInfo(); + inline void set_map_and_elements( Map* map, FixedArrayBase* value, @@ -4337,6 +4356,7 @@ class Code: public HeapObject { V(UNARY_OP_IC) \ V(BINARY_OP_IC) \ V(COMPARE_IC) \ + V(COMPARE_NIL_IC) \ V(TO_BOOLEAN_IC) enum Kind { @@ -4448,6 +4468,8 @@ class Code: public HeapObject { inline Kind kind(); inline InlineCacheState ic_state(); // Only valid for IC stubs. inline ExtraICState extra_ic_state(); // Only valid for IC stubs. + inline ExtraICState extended_extra_ic_state(); // Only valid for + // non-call IC stubs. inline StubType type(); // Only valid for monomorphic IC stubs. inline int arguments_count(); // Only valid for call IC stubs. @@ -4463,6 +4485,7 @@ class Code: public HeapObject { inline bool is_unary_op_stub() { return kind() == UNARY_OP_IC; } inline bool is_binary_op_stub() { return kind() == BINARY_OP_IC; } inline bool is_compare_ic_stub() { return kind() == COMPARE_IC; } + inline bool is_compare_nil_ic_stub() { return kind() == COMPARE_NIL_IC; } inline bool is_to_boolean_ic_stub() { return kind() == TO_BOOLEAN_IC; } // [major_key]: For kind STUB or BINARY_OP_IC, the major key. @@ -4541,6 +4564,9 @@ class Code: public HeapObject { inline byte to_boolean_state(); inline void set_to_boolean_state(byte value); + // [compare_nil]: For kind COMPARE_NIL_IC tells what state the stub is in. + byte compare_nil_state(); + // [has_function_cache]: For kind STUB tells whether there is a function // cache is passed to the stub. inline bool has_function_cache(); @@ -4560,6 +4586,7 @@ class Code: public HeapObject { // Find the first map in an IC stub. Map* FindFirstMap(); void FindAllMaps(MapHandleList* maps); + void ReplaceFirstMap(Map* replace); // Find the first code in an IC stub. Code* FindFirstCode(); @@ -4612,6 +4639,7 @@ class Code: public HeapObject { static inline Kind ExtractKindFromFlags(Flags flags); static inline InlineCacheHolderFlag ExtractCacheHolderFromFlags(Flags flags); static inline ExtraICState ExtractExtraICStateFromFlags(Flags flags); + static inline ExtraICState ExtractExtendedExtraICStateFromFlags(Flags flags); static inline int ExtractArgumentsCountFromFlags(Flags flags); static inline Flags RemoveTypeFromFlags(Flags flags); @@ -4751,8 +4779,11 @@ class Code: public HeapObject { class TypeField: public BitField<StubType, 3, 3> {}; class CacheHolderField: public BitField<InlineCacheHolderFlag, 6, 1> {}; class KindField: public BitField<Kind, 7, 4> {}; - class ExtraICStateField: public BitField<ExtraICState, 11, 5> {}; - class IsPregeneratedField: public BitField<bool, 16, 1> {}; + class IsPregeneratedField: public BitField<bool, 11, 1> {}; + class ExtraICStateField: public BitField<ExtraICState, 12, 5> {}; + class ExtendedExtraICStateField: public BitField<ExtraICState, 12, + PlatformSmiTagging::kSmiValueSize - 12 + 1> {}; // NOLINT + STATIC_ASSERT(ExtraICStateField::kShift == ExtendedExtraICStateField::kShift); // KindSpecificFlags1 layout (STUB and OPTIMIZED_FUNCTION) static const int kStackSlotsFirstBit = 0; @@ -4825,6 +4856,13 @@ class Code: public HeapObject { PlatformSmiTagging::kSmiValueSize - Code::kArgumentsCountShift + 1; static const int kMaxArguments = (1 << kArgumentsBits) - 1; + // ICs can use either argument count or ExtendedExtraIC, since their storage + // overlaps. + STATIC_ASSERT(ExtraICStateField::kShift + + ExtraICStateField::kSize + kArgumentsBits == + ExtendedExtraICStateField::kShift + + ExtendedExtraICStateField::kSize); + // This constant should be encodable in an ARM instruction. static const int kFlagsNotUsedInLookup = TypeField::kMask | CacheHolderField::kMask; @@ -5641,7 +5679,8 @@ class Script: public Struct { V(Math, pow, MathPow) \ V(Math, random, MathRandom) \ V(Math, max, MathMax) \ - V(Math, min, MathMin) + V(Math, min, MathMin) \ + V(Math, imul, MathImul) enum BuiltinFunctionId { @@ -6281,10 +6320,17 @@ class JSGeneratorObject: public JSObject { // [function]: The function corresponding to this generator object. DECL_ACCESSORS(function, JSFunction) - // [context]: The context of the suspended computation, or undefined. - DECL_ACCESSORS(context, Object) + // [context]: The context of the suspended computation. + DECL_ACCESSORS(context, Context) + + // [receiver]: The receiver of the suspended computation. + DECL_ACCESSORS(receiver, Object) // [continuation]: Offset into code of continuation. + // + // A positive offset indicates a suspended generator. The special + // kGeneratorExecuting and kGeneratorClosed values indicate that a generator + // cannot be resumed. inline int continuation(); inline void set_continuation(int continuation); @@ -6298,13 +6344,21 @@ class JSGeneratorObject: public JSObject { DECLARE_PRINTER(JSGeneratorObject) DECLARE_VERIFIER(JSGeneratorObject) + // Magic sentinel values for the continuation. + static const int kGeneratorExecuting = -1; + static const int kGeneratorClosed = 0; + // Layout description. static const int kFunctionOffset = JSObject::kHeaderSize; static const int kContextOffset = kFunctionOffset + kPointerSize; - static const int kContinuationOffset = kContextOffset + kPointerSize; + static const int kReceiverOffset = kContextOffset + kPointerSize; + static const int kContinuationOffset = kReceiverOffset + kPointerSize; static const int kOperandStackOffset = kContinuationOffset + kPointerSize; static const int kSize = kOperandStackOffset + kPointerSize; + // Resume mode, for use by runtime functions. + enum ResumeMode { SEND, THROW }; + private: DISALLOW_IMPLICIT_CONSTRUCTORS(JSGeneratorObject); }; @@ -7245,9 +7299,9 @@ class AllocationSiteInfo: public Struct { // Returns NULL if no AllocationSiteInfo is available for object. static AllocationSiteInfo* FindForJSObject(JSObject* object); - - static AllocationSiteMode GetMode(ElementsKind boilerplate_elements_kind); - static AllocationSiteMode GetMode(ElementsKind from, ElementsKind to); + static inline AllocationSiteMode GetMode( + ElementsKind boilerplate_elements_kind); + static inline AllocationSiteMode GetMode(ElementsKind from, ElementsKind to); static const int kPayloadOffset = HeapObject::kHeaderSize; static const int kSize = kPayloadOffset + kPointerSize; @@ -7582,7 +7636,7 @@ class String: public Name { // NOTE: this should be considered only a hint. False negatives are // possible. - inline bool HasOnlyAsciiChars(); + inline bool HasOnlyOneByteChars(); inline bool IsOneByteConvertible(); @@ -8554,7 +8608,7 @@ class JSWeakMap: public JSObject { class JSArrayBuffer: public JSObject { public: - // [backing_store]: backing memory for thsi array + // [backing_store]: backing memory for this array DECL_ACCESSORS(backing_store, void) // [byte_length]: length in bytes @@ -8699,6 +8753,10 @@ class JSArray: public JSObject { }; +Handle<Object> CacheInitialJSArrayMaps(Handle<Context> native_context, + Handle<Map> initial_map); + + // JSRegExpResult is just a JSArray with a specific initial map. // This initial map adds in-object properties for "index" and "input" // properties, as assigned by RegExp.prototype.exec, which allows diff --git a/deps/v8/src/optimizing-compiler-thread.cc b/deps/v8/src/optimizing-compiler-thread.cc index b982b94198..1e2e0a85df 100644 --- a/deps/v8/src/optimizing-compiler-thread.cc +++ b/deps/v8/src/optimizing-compiler-thread.cc @@ -88,7 +88,9 @@ void OptimizingCompilerThread::CompileNext() { // The function may have already been optimized by OSR. Simply continue. // Mark it for installing before queuing so that we can be sure of the write // order: marking first and (after being queued) installing code second. - optimizing_compiler->info()->closure()->MarkForInstallingRecompiledCode(); + { Heap::RelocationLock relocation_lock(isolate_->heap()); + optimizing_compiler->info()->closure()->MarkForInstallingRecompiledCode(); + } output_queue_.Enqueue(optimizing_compiler); } diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc index b63911bc97..267b8722c5 100644 --- a/deps/v8/src/parser.cc +++ b/deps/v8/src/parser.cc @@ -2511,16 +2511,24 @@ Statement* Parser::ParseReturnStatement(bool* ok) { Token::Value tok = peek(); Statement* result; + Expression* return_value; if (scanner().HasAnyLineTerminatorBeforeNext() || tok == Token::SEMICOLON || tok == Token::RBRACE || tok == Token::EOS) { - ExpectSemicolon(CHECK_OK); - result = factory()->NewReturnStatement(GetLiteralUndefined()); + return_value = GetLiteralUndefined(); } else { - Expression* expr = ParseExpression(true, CHECK_OK); - ExpectSemicolon(CHECK_OK); - result = factory()->NewReturnStatement(expr); + return_value = ParseExpression(true, CHECK_OK); + } + ExpectSemicolon(CHECK_OK); + if (is_generator()) { + Expression* generator = factory()->NewVariableProxy( + current_function_state_->generator_object_variable()); + Expression* yield = factory()->NewYield( + generator, return_value, Yield::FINAL, RelocInfo::kNoPosition); + result = factory()->NewExpressionStatement(yield); + } else { + result = factory()->NewReturnStatement(return_value); } // An ECMAScript program is considered syntactically incorrect if it @@ -2563,7 +2571,7 @@ Statement* Parser::ParseWithStatement(ZoneStringList* labels, bool* ok) { stmt = ParseStatement(labels, CHECK_OK); with_scope->set_end_position(scanner().location().end_pos); } - return factory()->NewWithStatement(expr, stmt); + return factory()->NewWithStatement(with_scope, expr, stmt); } @@ -3100,12 +3108,12 @@ Expression* Parser::ParseYieldExpression(bool* ok) { // 'yield' '*'? AssignmentExpression int position = scanner().peek_location().beg_pos; Expect(Token::YIELD, CHECK_OK); - bool is_yield_star = Check(Token::MUL); + Yield::Kind kind = + Check(Token::MUL) ? Yield::DELEGATING : Yield::SUSPEND; Expression* generator_object = factory()->NewVariableProxy( current_function_state_->generator_object_variable()); Expression* expression = ParseAssignmentExpression(false, CHECK_OK); - return factory()->NewYield(generator_object, expression, is_yield_star, - position); + return factory()->NewYield(generator_object, expression, kind, position); } @@ -4591,12 +4599,22 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name, VariableProxy* get_proxy = factory()->NewVariableProxy( current_function_state_->generator_object_variable()); Yield* yield = factory()->NewYield( - get_proxy, assignment, false, RelocInfo::kNoPosition); + get_proxy, assignment, Yield::INITIAL, RelocInfo::kNoPosition); body->Add(factory()->NewExpressionStatement(yield), zone()); } ParseSourceElements(body, Token::RBRACE, false, false, CHECK_OK); + if (is_generator) { + VariableProxy* get_proxy = factory()->NewVariableProxy( + current_function_state_->generator_object_variable()); + Expression *undefined = factory()->NewLiteral( + isolate()->factory()->undefined_value()); + Yield* yield = factory()->NewYield( + get_proxy, undefined, Yield::FINAL, RelocInfo::kNoPosition); + body->Add(factory()->NewExpressionStatement(yield), zone()); + } + materialized_literal_count = function_state.materialized_literal_count(); expected_property_count = function_state.expected_property_count(); handler_count = function_state.handler_count(); diff --git a/deps/v8/src/platform-cygwin.cc b/deps/v8/src/platform-cygwin.cc index 6804af8b99..35427d4d19 100644 --- a/deps/v8/src/platform-cygwin.cc +++ b/deps/v8/src/platform-cygwin.cc @@ -86,7 +86,7 @@ void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) { } const char* OS::LocalTimezone(double time) { - if (isnan(time)) return ""; + if (std::isnan(time)) return ""; time_t tv = static_cast<time_t>(floor(time/msPerSecond)); struct tm* t = localtime(&tv); if (NULL == t) return ""; diff --git a/deps/v8/src/platform-freebsd.cc b/deps/v8/src/platform-freebsd.cc index eadcf55f28..4305ccb288 100644 --- a/deps/v8/src/platform-freebsd.cc +++ b/deps/v8/src/platform-freebsd.cc @@ -103,7 +103,7 @@ int OS::ActivationFrameAlignment() { const char* OS::LocalTimezone(double time) { - if (isnan(time)) return ""; + if (std::isnan(time)) return ""; time_t tv = static_cast<time_t>(floor(time/msPerSecond)); struct tm* t = localtime(&tv); if (NULL == t) return ""; diff --git a/deps/v8/src/platform-linux.cc b/deps/v8/src/platform-linux.cc index a6402d73dd..a4d03b0aca 100644 --- a/deps/v8/src/platform-linux.cc +++ b/deps/v8/src/platform-linux.cc @@ -322,7 +322,7 @@ void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) { const char* OS::LocalTimezone(double time) { - if (isnan(time)) return ""; + if (std::isnan(time)) return ""; time_t tv = static_cast<time_t>(floor(time/msPerSecond)); struct tm* t = localtime(&tv); if (NULL == t) return ""; diff --git a/deps/v8/src/platform-macos.cc b/deps/v8/src/platform-macos.cc index 9bb4dbdd34..eea1726405 100644 --- a/deps/v8/src/platform-macos.cc +++ b/deps/v8/src/platform-macos.cc @@ -302,7 +302,7 @@ void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) { const char* OS::LocalTimezone(double time) { - if (isnan(time)) return ""; + if (std::isnan(time)) return ""; time_t tv = static_cast<time_t>(floor(time/msPerSecond)); struct tm* t = localtime(&tv); if (NULL == t) return ""; diff --git a/deps/v8/src/platform-openbsd.cc b/deps/v8/src/platform-openbsd.cc index e1aff17377..380c15f21a 100644 --- a/deps/v8/src/platform-openbsd.cc +++ b/deps/v8/src/platform-openbsd.cc @@ -125,7 +125,7 @@ void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) { const char* OS::LocalTimezone(double time) { - if (isnan(time)) return ""; + if (std::isnan(time)) return ""; time_t tv = static_cast<time_t>(floor(time/msPerSecond)); struct tm* t = localtime(&tv); if (NULL == t) return ""; diff --git a/deps/v8/src/platform-posix.cc b/deps/v8/src/platform-posix.cc index 2cf898dd60..f76ec44332 100644 --- a/deps/v8/src/platform-posix.cc +++ b/deps/v8/src/platform-posix.cc @@ -115,26 +115,11 @@ void* OS::GetRandomMmapAddr() { raw_addr &= V8_UINT64_C(0x3ffffffff000); #else uint32_t raw_addr = V8::RandomPrivate(isolate); - - raw_addr &= 0x3ffff000; - -# ifdef __sun - // For our Solaris/illumos mmap hint, we pick a random address in the bottom - // half of the top half of the address space (that is, the third quarter). - // Because we do not MAP_FIXED, this will be treated only as a hint -- the - // system will not fail to mmap() because something else happens to already - // be mapped at our random address. We deliberately set the hint high enough - // to get well above the system's break (that is, the heap); Solaris and - // illumos will try the hint and if that fails allocate as if there were - // no hint at all. The high hint prevents the break from getting hemmed in - // at low values, ceding half of the address space to the system heap. - raw_addr += 0x80000000; -# else // The range 0x20000000 - 0x60000000 is relatively unpopulated across a // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos // 10.6 and 10.7. + raw_addr &= 0x3ffff000; raw_addr += 0x20000000; -# endif #endif return reinterpret_cast<void*>(raw_addr); } @@ -219,7 +204,7 @@ int64_t OS::Ticks() { double OS::DaylightSavingsOffset(double time) { - if (isnan(time)) return nan_value(); + if (std::isnan(time)) return nan_value(); time_t tv = static_cast<time_t>(floor(time/msPerSecond)); struct tm* t = localtime(&tv); if (NULL == t) return nan_value(); @@ -349,6 +334,7 @@ OS::MemMoveFunction CreateMemMoveFunction(); // Copy memory area. No restrictions. void OS::MemMove(void* dest, const void* src, size_t size) { + if (size == 0) return; // Note: here we rely on dependent reads being ordered. This is true // on all architectures we currently support. (*memmove_function)(dest, src, size); diff --git a/deps/v8/src/platform-solaris.cc b/deps/v8/src/platform-solaris.cc index aeacab9d54..5fb28c84a6 100644 --- a/deps/v8/src/platform-solaris.cc +++ b/deps/v8/src/platform-solaris.cc @@ -62,6 +62,7 @@ // SunOS 5.10 Generic_141445-09) which make it difficult or impossible to // access signbit() despite the availability of other C99 math functions. #ifndef signbit +namespace std { // Test sign - usually defined in math.h int signbit(double x) { // We need to take care of the special case of both positive and negative @@ -74,6 +75,7 @@ int signbit(double x) { return x < 0; } } +} // namespace std #endif // signbit namespace v8 { @@ -116,7 +118,7 @@ void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) { const char* OS::LocalTimezone(double time) { - if (isnan(time)) return ""; + if (std::isnan(time)) return ""; time_t tv = static_cast<time_t>(floor(time/msPerSecond)); struct tm* t = localtime(&tv); if (NULL == t) return ""; diff --git a/deps/v8/src/platform-win32.cc b/deps/v8/src/platform-win32.cc index 272678fe64..6795844760 100644 --- a/deps/v8/src/platform-win32.cc +++ b/deps/v8/src/platform-win32.cc @@ -160,6 +160,7 @@ OS::MemMoveFunction CreateMemMoveFunction(); // Copy memory area to disjoint memory area. void OS::MemMove(void* dest, const void* src, size_t size) { + if (size == 0) return; // Note: here we rely on dependent reads being ordered. This is true // on all architectures we currently support. (*memmove_function)(dest, src, size); @@ -188,8 +189,8 @@ double modulo(double x, double y) { // Workaround MS fmod bugs. ECMA-262 says: // dividend is finite and divisor is an infinity => result equals dividend // dividend is a zero and divisor is nonzero finite => result equals dividend - if (!(isfinite(x) && (!isfinite(y) && !isnan(y))) && - !(x == 0 && (y != 0 && isfinite(y)))) { + if (!(std::isfinite(x) && (!std::isfinite(y) && !std::isnan(y))) && + !(x == 0 && (y != 0 && std::isfinite(y)))) { x = fmod(x, y); } return x; diff --git a/deps/v8/src/platform.h b/deps/v8/src/platform.h index ab75d74f44..86706fe3c3 100644 --- a/deps/v8/src/platform.h +++ b/deps/v8/src/platform.h @@ -46,7 +46,9 @@ #ifdef __sun # ifndef signbit +namespace std { int signbit(double x); +} # endif #endif diff --git a/deps/v8/src/preparser.cc b/deps/v8/src/preparser.cc index 6e8800e019..3bf88cad35 100644 --- a/deps/v8/src/preparser.cc +++ b/deps/v8/src/preparser.cc @@ -25,7 +25,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#include <math.h> +#include <cmath> #include "../include/v8stdint.h" @@ -42,14 +42,18 @@ #include "unicode.h" #include "utils.h" -namespace v8 { - #ifdef _MSC_VER +namespace std { + // Usually defined in math.h, but not in MSVC. // Abstracted to work int isfinite(double value); + +} // namespace std #endif +namespace v8 { + namespace preparser { PreParser::PreParseResult PreParser::PreParseLazyFunction( @@ -1712,7 +1716,7 @@ int DuplicateFinder::AddNumber(i::Vector<const char> key, int value) { double double_value = StringToDouble(unicode_constants_, key, flags, 0.0); int length; const char* string; - if (!isfinite(double_value)) { + if (!std::isfinite(double_value)) { string = "Infinity"; length = 8; // strlen("Infinity"); } else { diff --git a/deps/v8/src/profile-generator-inl.h b/deps/v8/src/profile-generator-inl.h index 4e6302c37e..64dabf62b7 100644 --- a/deps/v8/src/profile-generator-inl.h +++ b/deps/v8/src/profile-generator-inl.h @@ -85,7 +85,6 @@ CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) { return gc_entry_; case JS: case COMPILER: - case PARALLEL_COMPILER: // DOM events handlers are reported as OTHER / EXTERNAL entries. // To avoid confusing people, let's put all these entries into // one bucket. diff --git a/deps/v8/src/profile-generator.cc b/deps/v8/src/profile-generator.cc index eacabeff4f..b1b163b50e 100644 --- a/deps/v8/src/profile-generator.cc +++ b/deps/v8/src/profile-generator.cc @@ -260,12 +260,13 @@ double ProfileNode::GetTotalMillis() const { void ProfileNode::Print(int indent) { - OS::Print("%5u %5u %*c %s%s [%d]", + OS::Print("%5u %5u %*c %s%s [%d] #%d", total_ticks_, self_ticks_, indent, ' ', entry_->name_prefix(), entry_->name(), - entry_->security_token_id()); + entry_->security_token_id(), + id()); if (entry_->resource_name()[0] != '\0') OS::Print(" %s:%d", entry_->resource_name(), entry_->line_number()); OS::Print("\n"); @@ -570,7 +571,12 @@ void CodeMap::MoveCode(Address from, Address to) { void CodeMap::CodeTreePrinter::Call( const Address& key, const CodeMap::CodeEntryInfo& value) { - OS::Print("%p %5d %s\n", key, value.size, value.entry->name()); + // For shared function entries, 'size' field is used to store their IDs. + if (value.entry == kSharedFunctionCodeEntry) { + OS::Print("%p SharedFunctionInfo %d\n", key, value.size); + } else { + OS::Print("%p %5d %s\n", key, value.size, value.entry->name()); + } } @@ -894,7 +900,7 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) { if (sample.pc != NULL) { *entry++ = code_map_.FindEntry(sample.pc); - if (sample.external_callback) { + if (sample.has_external_callback) { // Don't use PC when in external callback code, as it can point // inside callback's code, and we will erroneously report // that a callback calls itself. diff --git a/deps/v8/src/profile-generator.h b/deps/v8/src/profile-generator.h index 4ddb75337e..761291e121 100644 --- a/deps/v8/src/profile-generator.h +++ b/deps/v8/src/profile-generator.h @@ -107,7 +107,6 @@ class CodeEntry { INLINE(const char* name() const) { return name_; } INLINE(const char* resource_name() const) { return resource_name_; } INLINE(int line_number() const) { return line_number_; } - INLINE(int shared_id() const) { return shared_id_; } INLINE(void set_shared_id(int shared_id)) { shared_id_ = shared_id; } INLINE(int security_token_id() const) { return security_token_id_; } @@ -401,33 +400,6 @@ class ProfileGenerator { public: explicit ProfileGenerator(CpuProfilesCollection* profiles); - INLINE(CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag, - Name* name, - String* resource_name, - int line_number)) { - return profiles_->NewCodeEntry(tag, name, resource_name, line_number); - } - - INLINE(CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag, - const char* name)) { - return profiles_->NewCodeEntry(tag, name); - } - - INLINE(CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag, - const char* name_prefix, - Name* name)) { - return profiles_->NewCodeEntry(tag, name_prefix, name); - } - - INLINE(CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag, - int args_count)) { - return profiles_->NewCodeEntry(tag, args_count); - } - - INLINE(CodeEntry* NewCodeEntry(int security_token_id)) { - return profiles_->NewCodeEntry(security_token_id); - } - void RecordTickSample(const TickSample& sample); INLINE(CodeMap* code_map()) { return &code_map_; } diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc index c0213d2ea4..ebe88fe9b7 100644 --- a/deps/v8/src/runtime.cc +++ b/deps/v8/src/runtime.cc @@ -65,6 +65,12 @@ #include "v8threads.h" #include "vm-state-inl.h" +#ifndef _STLP_VENDOR_CSTD +// STLPort doesn't import fpclassify and isless into the std namespace. +using std::fpclassify; +using std::isless; +#endif + namespace v8 { namespace internal { @@ -656,6 +662,47 @@ static void ArrayBufferWeakCallback(v8::Isolate* external_isolate, } +bool Runtime::SetupArrayBuffer(Isolate* isolate, + Handle<JSArrayBuffer> array_buffer, + void* data, + size_t allocated_length) { + array_buffer->set_backing_store(data); + + Handle<Object> byte_length = + isolate->factory()->NewNumber(static_cast<double>(allocated_length)); + CHECK(byte_length->IsSmi() || byte_length->IsHeapNumber()); + array_buffer->set_byte_length(*byte_length); + return true; +} + + +bool Runtime::SetupArrayBufferAllocatingData( + Isolate* isolate, + Handle<JSArrayBuffer> array_buffer, + size_t allocated_length) { + void* data; + if (allocated_length != 0) { + data = malloc(allocated_length); + if (data == NULL) return false; + memset(data, 0, allocated_length); + } else { + data = NULL; + } + + if (!SetupArrayBuffer(isolate, array_buffer, data, allocated_length)) + return false; + + v8::Isolate* external_isolate = reinterpret_cast<v8::Isolate*>(isolate); + v8::Persistent<v8::Value> weak_handle = v8::Persistent<v8::Value>::New( + external_isolate, v8::Utils::ToLocal(Handle<Object>::cast(array_buffer))); + weak_handle.MakeWeak(external_isolate, data, ArrayBufferWeakCallback); + weak_handle.MarkIndependent(external_isolate); + isolate->heap()->AdjustAmountOfExternalAllocatedMemory(allocated_length); + + return true; +} + + RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayBufferInitialize) { HandleScope scope(isolate); ASSERT(args.length() == 2); @@ -679,38 +726,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayBufferInitialize) { allocated_length = static_cast<size_t>(value); } - void* data; - if (allocated_length != 0) { - data = malloc(allocated_length); - - if (data == NULL) { + if (!Runtime::SetupArrayBufferAllocatingData(isolate, + holder, allocated_length)) { return isolate->Throw(*isolate->factory()-> NewRangeError("invalid_array_buffer_length", HandleVector<Object>(NULL, 0))); - } - - memset(data, 0, allocated_length); - } else { - data = NULL; } - holder->set_backing_store(data); - - Object* byte_length; - { - MaybeObject* maybe_byte_length = - isolate->heap()->NumberFromDouble( - static_cast<double>(allocated_length)); - if (!maybe_byte_length->ToObject(&byte_length)) return maybe_byte_length; - } - CHECK(byte_length->IsSmi() || byte_length->IsHeapNumber()); - holder->set_byte_length(byte_length); - - v8::Isolate* external_isolate = reinterpret_cast<v8::Isolate*>(isolate); - v8::Persistent<v8::Value> weak_handle = v8::Persistent<v8::Value>::New( - external_isolate, v8::Utils::ToLocal(Handle<Object>::cast(holder))); - weak_handle.MakeWeak(external_isolate, data, ArrayBufferWeakCallback); - weak_handle.MarkIndependent(external_isolate); - isolate->heap()->AdjustAmountOfExternalAllocatedMemory(allocated_length); return *holder; } @@ -2391,6 +2412,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetExpectedNumberOfProperties) { RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSGeneratorObject) { NoHandleAllocation ha(isolate); ASSERT(args.length() == 0); + JavaScriptFrameIterator it(isolate); JavaScriptFrame* frame = it.frame(); JSFunction* function = JSFunction::cast(frame->function()); @@ -2405,7 +2427,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSGeneratorObject) { if (!maybe_generator->To(&generator)) return maybe_generator; } generator->set_function(function); - generator->set_context(isolate->heap()->undefined_value()); + generator->set_context(Context::cast(frame->context())); + generator->set_receiver(frame->receiver()); generator->set_continuation(0); generator->set_operand_stack(isolate->heap()->empty_fixed_array()); @@ -2413,6 +2436,122 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSGeneratorObject) { } +RUNTIME_FUNCTION(MaybeObject*, Runtime_SuspendJSGeneratorObject) { + HandleScope scope(isolate); + ASSERT(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator_object, 0); + + JavaScriptFrameIterator stack_iterator(isolate); + JavaScriptFrame *frame = stack_iterator.frame(); + Handle<JSFunction> function(JSFunction::cast(frame->function())); + RUNTIME_ASSERT(function->shared()->is_generator()); + + intptr_t offset = frame->pc() - function->code()->instruction_start(); + ASSERT(*function == generator_object->function()); + ASSERT(offset > 0 && Smi::IsValid(offset)); + generator_object->set_continuation(static_cast<int>(offset)); + + // Generator functions force context allocation for locals, so Local0 points + // to the bottom of the operand stack. Assume the stack grows down. + // + // TODO(wingo): Move these magical calculations to frames.h when the + // generators implementation has stabilized. + intptr_t stack_size_in_bytes = + (frame->fp() + JavaScriptFrameConstants::kLocal0Offset) - + (frame->sp() - kPointerSize); + ASSERT(IsAddressAligned(frame->fp(), kPointerSize)); + ASSERT(IsAligned(stack_size_in_bytes, kPointerSize)); + ASSERT(stack_size_in_bytes >= 0); + ASSERT(Smi::IsValid(stack_size_in_bytes)); + intptr_t stack_size = stack_size_in_bytes >> kPointerSizeLog2; + + // We expect there to be at least two values on the stack: the return value of + // the yield expression, and the argument to this runtime call. Neither of + // those should be saved. + ASSERT(stack_size >= 2); + stack_size -= 2; + + if (stack_size == 0) { + ASSERT_EQ(generator_object->operand_stack(), + isolate->heap()->empty_fixed_array()); + // If there are no operands on the stack, there shouldn't be a handler + // active either. + ASSERT(!frame->HasHandler()); + } else { + // TODO(wingo): Save the operand stack and/or the stack handlers. + UNIMPLEMENTED(); + } + + // It's possible for the context to be other than the initial context even if + // there is no stack handler active. For example, this is the case in the + // body of a "with" statement. Therefore we always save the context. + generator_object->set_context(Context::cast(frame->context())); + + // The return value is the hole for a suspend return, and anything else for a + // resume return. + return isolate->heap()->the_hole_value(); +} + + +// Note that this function is the slow path for resuming generators. It is only +// called if the suspended activation had operands on the stack, stack handlers +// needing rewinding, or if the resume should throw an exception. The fast path +// is handled directly in FullCodeGenerator::EmitGeneratorResume(), which is +// inlined into GeneratorNext, GeneratorSend, and GeneratorThrow. +// EmitGeneratorResumeResume is called in any case, as it needs to reconstruct +// the stack frame and make space for arguments and operands. +RUNTIME_FUNCTION(MaybeObject*, Runtime_ResumeJSGeneratorObject) { + HandleScope scope(isolate); + ASSERT(args.length() == 3); + CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator_object, 0); + CONVERT_ARG_HANDLE_CHECKED(Object, value, 1); + CONVERT_SMI_ARG_CHECKED(resume_mode_int, 2); + JavaScriptFrameIterator stack_iterator(isolate); + JavaScriptFrame *frame = stack_iterator.frame(); + + ASSERT_EQ(frame->function(), generator_object->function()); + + STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting <= 0); + STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed <= 0); + + Address pc = generator_object->function()->code()->instruction_start(); + int offset = generator_object->continuation(); + ASSERT(offset > 0); + frame->set_pc(pc + offset); + generator_object->set_continuation(JSGeneratorObject::kGeneratorExecuting); + + if (generator_object->operand_stack()->length() != 0) { + // TODO(wingo): Copy operand stack. Rewind handlers. + UNIMPLEMENTED(); + } + + JSGeneratorObject::ResumeMode resume_mode = + static_cast<JSGeneratorObject::ResumeMode>(resume_mode_int); + switch (resume_mode) { + case JSGeneratorObject::SEND: + return *value; + case JSGeneratorObject::THROW: + return isolate->Throw(*value); + } + + UNREACHABLE(); + return isolate->ThrowIllegalOperation(); +} + + +RUNTIME_FUNCTION(MaybeObject*, Runtime_ThrowGeneratorStateError) { + HandleScope scope(isolate); + ASSERT(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0); + int continuation = generator->continuation(); + const char *message = continuation == JSGeneratorObject::kGeneratorClosed ? + "generator_finished" : "generator_running"; + Vector< Handle<Object> > argv = HandleVector<Object>(NULL, 0); + Handle<Object> error = isolate->factory()->NewError(message, argv); + return isolate->Throw(*error); +} + + MUST_USE_RESULT static MaybeObject* CharFromCode(Isolate* isolate, Object* char_code) { if (char_code->IsNumber()) { @@ -3963,10 +4102,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToRadixString) { // Slow case. CONVERT_DOUBLE_ARG_CHECKED(value, 0); - if (isnan(value)) { + if (std::isnan(value)) { return *isolate->factory()->nan_string(); } - if (isinf(value)) { + if (std::isinf(value)) { if (value < 0) { return *isolate->factory()->minus_infinity_string(); } @@ -4041,6 +4180,14 @@ static Handle<Object> GetCharAt(Handle<String> string, uint32_t index) { } +MaybeObject* Runtime::GetElementOrCharAtOrFail(Isolate* isolate, + Handle<Object> object, + uint32_t index) { + CALL_HEAP_FUNCTION_PASS_EXCEPTION(isolate, + GetElementOrCharAt(isolate, object, index)); +} + + MaybeObject* Runtime::GetElementOrCharAt(Isolate* isolate, Handle<Object> object, uint32_t index) { @@ -6142,6 +6289,16 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberMod) { } +RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberImul) { + NoHandleAllocation ha(isolate); + ASSERT(args.length() == 2); + + CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]); + CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]); + return isolate->heap()->NumberFromInt32(x * y); +} + + RUNTIME_FUNCTION(MaybeObject*, Runtime_StringAdd) { NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); @@ -6604,8 +6761,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberEquals) { CONVERT_DOUBLE_ARG_CHECKED(x, 0); CONVERT_DOUBLE_ARG_CHECKED(y, 1); - if (isnan(x)) return Smi::FromInt(NOT_EQUAL); - if (isnan(y)) return Smi::FromInt(NOT_EQUAL); + if (std::isnan(x)) return Smi::FromInt(NOT_EQUAL); + if (std::isnan(y)) return Smi::FromInt(NOT_EQUAL); if (x == y) return Smi::FromInt(EQUAL); Object* result; if ((fpclassify(x) == FP_ZERO) && (fpclassify(y) == FP_ZERO)) { @@ -6641,7 +6798,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberCompare) { CONVERT_DOUBLE_ARG_CHECKED(x, 0); CONVERT_DOUBLE_ARG_CHECKED(y, 1); - if (isnan(x) || isnan(y)) return args[2]; + if (std::isnan(x) || std::isnan(y)) return args[2]; if (x == y) return Smi::FromInt(EQUAL); if (isless(x, y)) return Smi::FromInt(LESS); return Smi::FromInt(GREATER); @@ -6864,7 +7021,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_atan2) { CONVERT_DOUBLE_ARG_CHECKED(x, 0); CONVERT_DOUBLE_ARG_CHECKED(y, 1); double result; - if (isinf(x) && isinf(y)) { + if (std::isinf(x) && std::isinf(y)) { // Make sure that the result in case of two infinite arguments // is a multiple of Pi / 4. The sign of the result is determined // by the first argument (x) and the sign of the second argument @@ -6947,7 +7104,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow) { CONVERT_DOUBLE_ARG_CHECKED(y, 1); double result = power_helper(x, y); - if (isnan(result)) return isolate->heap()->nan_value(); + if (std::isnan(result)) return isolate->heap()->nan_value(); return isolate->heap()->AllocateHeapNumber(result); } @@ -6964,7 +7121,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow_cfunction) { return Smi::FromInt(1); } else { double result = power_double_double(x, y); - if (isnan(result)) return isolate->heap()->nan_value(); + if (std::isnan(result)) return isolate->heap()->nan_value(); return isolate->heap()->AllocateHeapNumber(result); } } @@ -7066,7 +7223,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DateSetValue) { Object* value = NULL; bool is_value_nan = false; - if (isnan(time)) { + if (std::isnan(time)) { value = isolate->heap()->nan_value(); is_value_nan = true; } else if (!is_utc && @@ -8824,7 +8981,7 @@ bool CodeGenerationFromStringsAllowed(Isolate* isolate, return false; } else { // Callback set. Let it decide if code generation is allowed. - VMState state(isolate, EXTERNAL); + VMState<EXTERNAL> state(isolate); return callback(v8::Utils::ToLocal(context)); } } diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h index 14133511ea..2a102e12f8 100644 --- a/deps/v8/src/runtime.h +++ b/deps/v8/src/runtime.h @@ -145,6 +145,7 @@ namespace internal { F(NumberMod, 2, 1) \ F(NumberUnaryMinus, 1, 1) \ F(NumberAlloc, 0, 1) \ + F(NumberImul, 2, 1) \ \ F(StringAdd, 2, 1) \ F(StringBuilderConcat, 3, 1) \ @@ -298,6 +299,9 @@ namespace internal { \ /* Harmony generators */ \ F(CreateJSGeneratorObject, 0, 1) \ + F(SuspendJSGeneratorObject, 1, 1) \ + F(ResumeJSGeneratorObject, 3, 1) \ + F(ThrowGeneratorStateError, 1, 1) \ \ /* Harmony modules */ \ F(IsJSModule, 1, 1) \ @@ -559,7 +563,9 @@ namespace internal { F(IsRegExpEquivalent, 2, 1) \ F(HasCachedArrayIndex, 1, 1) \ F(GetCachedArrayIndex, 1, 1) \ - F(FastAsciiArrayJoin, 2, 1) + F(FastAsciiArrayJoin, 2, 1) \ + F(GeneratorSend, 2, 1) \ + F(GeneratorThrow, 2, 1) // ---------------------------------------------------------------------------- @@ -694,6 +700,11 @@ class Runtime : public AllStatic { Handle<Object> object, uint32_t index); + MUST_USE_RESULT static MaybeObject* GetElementOrCharAtOrFail( + Isolate* isolate, + Handle<Object> object, + uint32_t index); + MUST_USE_RESULT static MaybeObject* SetObjectProperty( Isolate* isolate, Handle<Object> object, @@ -738,6 +749,16 @@ class Runtime : public AllStatic { Handle<Object> object, Handle<Object> key); + static bool SetupArrayBuffer(Isolate* isolate, + Handle<JSArrayBuffer> array_buffer, + void* data, + size_t allocated_length); + + static bool SetupArrayBufferAllocatingData( + Isolate* isolate, + Handle<JSArrayBuffer> array_buffer, + size_t allocated_length); + // Helper functions used stubs. static void PerformGC(Object* result); diff --git a/deps/v8/src/sampler.cc b/deps/v8/src/sampler.cc index 948b054073..e271470bd2 100644 --- a/deps/v8/src/sampler.cc +++ b/deps/v8/src/sampler.cc @@ -636,7 +636,16 @@ DISABLE_ASAN void TickSample::Trace(Isolate* isolate) { return; } - external_callback = isolate->external_callback(); + const Address callback = isolate->external_callback(); + if (callback != NULL) { + external_callback = callback; + has_external_callback = true; + } else { + // Sample potential return address value for frameless invocation of + // stubs (we'll figure out later, if this value makes sense). + tos = Memory::Address_at(sp); + has_external_callback = false; + } SafeStackTraceFrameIterator it(isolate, fp, sp, sp, js_entry_sp); int i = 0; diff --git a/deps/v8/src/sampler.h b/deps/v8/src/sampler.h index a76d8b9a57..1d9ac8723b 100644 --- a/deps/v8/src/sampler.h +++ b/deps/v8/src/sampler.h @@ -51,16 +51,21 @@ struct TickSample { sp(NULL), fp(NULL), external_callback(NULL), - frames_count(0) {} + frames_count(0), + has_external_callback(false) {} void Trace(Isolate* isolate); StateTag state; // The state of the VM. Address pc; // Instruction pointer. Address sp; // Stack pointer. Address fp; // Frame pointer. - Address external_callback; + union { + Address tos; // Top stack value (*sp). + Address external_callback; + }; static const int kMaxFramesCount = 64; Address stack[kMaxFramesCount]; // Call stack. int frames_count : 8; // Number of captured frames. + bool has_external_callback : 1; }; class Sampler { diff --git a/deps/v8/src/scanner.cc b/deps/v8/src/scanner.cc index ef2dc2c647..ef2dc2c647 100755..100644 --- a/deps/v8/src/scanner.cc +++ b/deps/v8/src/scanner.cc diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/scopes.cc index 10548f9938..5ad970ad81 100644 --- a/deps/v8/src/scopes.cc +++ b/deps/v8/src/scopes.cc @@ -726,7 +726,9 @@ int Scope::ContextChainLength(Scope* scope) { int n = 0; for (Scope* s = this; s != scope; s = s->outer_scope_) { ASSERT(s != NULL); // scope must be in the scope chain - if (s->num_heap_slots() > 0) n++; + if (s->is_with_scope() || s->num_heap_slots() > 0) n++; + // Catch scopes always have heap slots. + ASSERT(!s->is_catch_scope() || s->num_heap_slots() > 0); } return n; } diff --git a/deps/v8/src/serialize.cc b/deps/v8/src/serialize.cc index 0ba730fd72..d4f31c1e1a 100644 --- a/deps/v8/src/serialize.cc +++ b/deps/v8/src/serialize.cc @@ -472,7 +472,7 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) { UNCLASSIFIED, 37, "LDoubleConstant::one_half"); - Add(ExternalReference::isolate_address().address(), + Add(ExternalReference::isolate_address(isolate).address(), UNCLASSIFIED, 38, "isolate"); diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc index 7202e1bbc0..df1c3ef126 100644 --- a/deps/v8/src/spaces.cc +++ b/deps/v8/src/spaces.cc @@ -1817,6 +1817,7 @@ static void ReportCodeKindStatistics() { CASE(UNARY_OP_IC); CASE(BINARY_OP_IC); CASE(COMPARE_IC); + CASE(COMPARE_NIL_IC); CASE(TO_BOOLEAN_IC); } } diff --git a/deps/v8/src/store-buffer.cc b/deps/v8/src/store-buffer.cc index 8a69164039..7d73dd5ed1 100644 --- a/deps/v8/src/store-buffer.cc +++ b/deps/v8/src/store-buffer.cc @@ -25,9 +25,11 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -#include "v8.h" - #include "store-buffer.h" + +#include <algorithm> + +#include "v8.h" #include "store-buffer-inl.h" #include "v8-counters.h" @@ -122,33 +124,6 @@ void StoreBuffer::StoreBufferOverflow(Isolate* isolate) { } -#if V8_TARGET_ARCH_X64 -static int CompareAddresses(const void* void_a, const void* void_b) { - intptr_t a = - reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_a)); - intptr_t b = - reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_b)); - // Unfortunately if int is smaller than intptr_t there is no branch-free - // way to return a number with the same sign as the difference between the - // pointers. - if (a == b) return 0; - if (a < b) return -1; - ASSERT(a > b); - return 1; -} -#else -static int CompareAddresses(const void* void_a, const void* void_b) { - intptr_t a = - reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_a)); - intptr_t b = - reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_b)); - ASSERT(sizeof(1) == sizeof(a)); - // Shift down to avoid wraparound. - return (a >> kPointerSizeLog2) - (b >> kPointerSizeLog2); -} -#endif - - void StoreBuffer::Uniq() { // Remove adjacent duplicates and cells that do not point at new space. Address previous = NULL; @@ -283,10 +258,7 @@ void StoreBuffer::Filter(int flag) { void StoreBuffer::SortUniq() { Compact(); if (old_buffer_is_sorted_) return; - qsort(reinterpret_cast<void*>(old_start_), - old_top_ - old_start_, - sizeof(*old_top_), - &CompareAddresses); + std::sort(old_start_, old_top_); Uniq(); old_buffer_is_sorted_ = true; diff --git a/deps/v8/src/store-buffer.h b/deps/v8/src/store-buffer.h index 79046d1540..514534a1ed 100644 --- a/deps/v8/src/store-buffer.h +++ b/deps/v8/src/store-buffer.h @@ -37,6 +37,8 @@ namespace v8 { namespace internal { +class Page; +class PagedSpace; class StoreBuffer; typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to); diff --git a/deps/v8/src/strtod.cc b/deps/v8/src/strtod.cc index 0dc618a399..dfe2fb7359 100644 --- a/deps/v8/src/strtod.cc +++ b/deps/v8/src/strtod.cc @@ -26,7 +26,7 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include <stdarg.h> -#include <math.h> +#include <cmath> #include "globals.h" #include "utils.h" diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc index 396e92ce39..f928cf6163 100644 --- a/deps/v8/src/stub-cache.cc +++ b/deps/v8/src/stub-cache.cc @@ -110,18 +110,28 @@ Handle<JSObject> StubCache::StubHolder(Handle<JSObject> receiver, Handle<Code> StubCache::FindIC(Handle<Name> name, - Handle<JSObject> stub_holder, + Handle<Map> stub_holder_map, Code::Kind kind, Code::StubType type, - Code::ExtraICState extra_ic_state) { - Code::Flags flags = Code::ComputeMonomorphicFlags(kind, extra_ic_state, type); - Handle<Object> probe(stub_holder->map()->FindInCodeCache(*name, flags), + Code::ExtraICState extra_state) { + Code::Flags flags = Code::ComputeMonomorphicFlags(kind, extra_state, type); + Handle<Object> probe(stub_holder_map->FindInCodeCache(*name, flags), isolate_); if (probe->IsCode()) return Handle<Code>::cast(probe); return Handle<Code>::null(); } +Handle<Code> StubCache::FindIC(Handle<Name> name, + Handle<JSObject> stub_holder, + Code::Kind kind, + Code::StubType type, + Code::ExtraICState extra_ic_state) { + return FindIC(name, Handle<Map>(stub_holder->map()), kind, + type, extra_ic_state); +} + + Handle<Code> StubCache::FindHandler(Handle<Name> name, Handle<JSObject> receiver, Handle<JSObject> stub_holder, @@ -487,7 +497,8 @@ Handle<Code> StubCache::ComputeStoreGlobal(Handle<Name> name, Handle<JSGlobalPropertyCell> cell, StrictModeFlag strict_mode) { Handle<Code> stub = FindIC( - name, receiver, Code::STORE_IC, Code::NORMAL, strict_mode); + name, Handle<JSObject>::cast(receiver), + Code::STORE_IC, Code::NORMAL, strict_mode); if (!stub.is_null()) return stub; StoreStubCompiler compiler(isolate_, strict_mode); @@ -893,6 +904,32 @@ Handle<Code> StubCache::ComputeCallMiss(int argc, } +Handle<Code> StubCache::ComputeCompareNil(Handle<Map> receiver_map, + NilValue nil, + CompareNilICStub::Types types) { + CompareNilICStub stub(kNonStrictEquality, nil, types); + + Handle<String> name(isolate_->heap()->empty_string()); + if (!receiver_map->is_shared()) { + Handle<Code> cached_ic = FindIC(name, receiver_map, Code::COMPARE_NIL_IC, + Code::NORMAL, stub.GetExtraICState()); + if (!cached_ic.is_null()) return cached_ic; + } + + Handle<Code> ic = stub.GetCode(isolate_); + // For monomorphic maps, use the code as a template, copying and replacing + // the monomorphic map that checks the object's type. + ic = isolate_->factory()->CopyCode(ic); + ic->ReplaceFirstMap(*receiver_map); + + if (!receiver_map->is_shared()) { + Map::UpdateCodeCache(receiver_map, name, ic); + } + + return ic; +} + + Handle<Code> StubCache::ComputeLoadElementPolymorphic( MapHandleList* receiver_maps) { Code::Flags flags = Code::ComputeFlags(Code::KEYED_LOAD_IC, POLYMORPHIC); @@ -1074,7 +1111,7 @@ RUNTIME_FUNCTION(MaybeObject*, StoreCallbackProperty) { v8::AccessorInfo info(custom_args.end()); { // Leaving JavaScript. - VMState state(isolate, EXTERNAL); + VMState<EXTERNAL> state(isolate); ExternalCallbackScope call_scope(isolate, setter_address); fun(v8::Utils::ToLocal(str), v8::Utils::ToLocal(value), info); } @@ -1120,7 +1157,7 @@ RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorOnly) { v8::Handle<v8::Value> r; { // Leaving JavaScript. - VMState state(isolate, EXTERNAL); + VMState<EXTERNAL> state(isolate); r = getter(v8::Utils::ToLocal(name), info); } RETURN_IF_SCHEDULED_EXCEPTION(isolate); @@ -1183,7 +1220,7 @@ static MaybeObject* LoadWithInterceptor(Arguments* args, v8::Handle<v8::Value> r; { // Leaving JavaScript. - VMState state(isolate, EXTERNAL); + VMState<EXTERNAL> state(isolate); r = getter(v8::Utils::ToLocal(name), info); } RETURN_IF_SCHEDULED_EXCEPTION(isolate); diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h index 02bb541bd9..dbb5e90f23 100644 --- a/deps/v8/src/stub-cache.h +++ b/deps/v8/src/stub-cache.h @@ -30,6 +30,7 @@ #include "allocation.h" #include "arguments.h" +#include "code-stubs.h" #include "ic-inl.h" #include "macro-assembler.h" #include "objects.h" @@ -78,6 +79,12 @@ class StubCache { Handle<JSObject> holder); Handle<Code> FindIC(Handle<Name> name, + Handle<Map> stub_holder_map, + Code::Kind kind, + Code::StubType type, + Code::ExtraICState extra_state = Code::kNoExtraICState); + + Handle<Code> FindIC(Handle<Name> name, Handle<JSObject> stub_holder, Code::Kind kind, Code::StubType type, @@ -271,6 +278,12 @@ class StubCache { // --- + Handle<Code> ComputeCompareNil(Handle<Map> receiver_map, + NilValue nil, + CompareNilICStub::Types types); + + // --- + Handle<Code> ComputeLoadElementPolymorphic(MapHandleList* receiver_maps); Handle<Code> ComputeStoreElementPolymorphic(MapHandleList* receiver_maps, KeyedAccessStoreMode store_mode, diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc index 39a01f59e8..3bc509a618 100644 --- a/deps/v8/src/type-info.cc +++ b/deps/v8/src/type-info.cc @@ -218,6 +218,17 @@ Handle<Map> TypeFeedbackOracle::StoreMonomorphicReceiverType( } +Handle<Map> TypeFeedbackOracle::CompareNilMonomorphicReceiverType( + TypeFeedbackId id) { + Handle<Object> maybe_code = GetInfo(id); + if (maybe_code->IsCode()) { + Map* first_map = Handle<Code>::cast(maybe_code)->FindFirstMap(); + if (first_map != NULL) return Handle<Map>(first_map); + } + return Handle<Map>(); +} + + KeyedAccessStoreMode TypeFeedbackOracle::GetStoreMode( TypeFeedbackId ast_id) { Handle<Object> map_or_code = GetInfo(ast_id); @@ -625,12 +636,23 @@ void TypeFeedbackOracle::CollectKeyedReceiverTypes(TypeFeedbackId ast_id, } -byte TypeFeedbackOracle::ToBooleanTypes(TypeFeedbackId ast_id) { - Handle<Object> object = GetInfo(ast_id); +byte TypeFeedbackOracle::ToBooleanTypes(TypeFeedbackId id) { + Handle<Object> object = GetInfo(id); return object->IsCode() ? Handle<Code>::cast(object)->to_boolean_state() : 0; } +byte TypeFeedbackOracle::CompareNilTypes(TypeFeedbackId id) { + Handle<Object> object = GetInfo(id); + if (object->IsCode() && + Handle<Code>::cast(object)->is_compare_nil_ic_stub()) { + return Handle<Code>::cast(object)->compare_nil_state(); + } else { + return CompareNilICStub::kFullCompare; + } +} + + // Things are a bit tricky here: The iterator for the RelocInfos and the infos // themselves are not GC-safe, so we first get all infos, then we create the // dictionary (possibly triggering GC), and finally we relocate the collected @@ -724,6 +746,7 @@ void TypeFeedbackOracle::ProcessRelocInfos(ZoneList<RelocInfo>* infos) { case Code::BINARY_OP_IC: case Code::COMPARE_IC: case Code::TO_BOOLEAN_IC: + case Code::COMPARE_NIL_IC: SetInfo(ast_id, target); break; diff --git a/deps/v8/src/type-info.h b/deps/v8/src/type-info.h index 583c3fc520..d6d958d56d 100644 --- a/deps/v8/src/type-info.h +++ b/deps/v8/src/type-info.h @@ -253,7 +253,8 @@ class TypeFeedbackOracle: public ZoneObject { bool IsForInFastCase(ForInStatement* expr); Handle<Map> LoadMonomorphicReceiverType(Property* expr); - Handle<Map> StoreMonomorphicReceiverType(TypeFeedbackId ast_id); + Handle<Map> StoreMonomorphicReceiverType(TypeFeedbackId id); + Handle<Map> CompareNilMonomorphicReceiverType(TypeFeedbackId id); KeyedAccessStoreMode GetStoreMode(TypeFeedbackId ast_id); @@ -293,6 +294,11 @@ class TypeFeedbackOracle: public ZoneObject { // headers!! :-P byte ToBooleanTypes(TypeFeedbackId ast_id); + // TODO(1571) We can't use CompareNilICStub::Types as the return value because + // of various cylces in our headers. Death to tons of implementations in + // headers!! :-P + byte CompareNilTypes(TypeFeedbackId ast_id); + // Get type information for arithmetic operations and compares. TypeInfo UnaryType(UnaryOperation* expr); void BinaryType(BinaryOperation* expr, diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h index b84d592386..b2c2ff1098 100644 --- a/deps/v8/src/utils.h +++ b/deps/v8/src/utils.h @@ -30,6 +30,7 @@ #include <stdlib.h> #include <string.h> +#include <algorithm> #include <climits> #include "allocation.h" @@ -410,15 +411,11 @@ class Vector { } void Sort(int (*cmp)(const T*, const T*)) { - typedef int (*RawComparer)(const void*, const void*); - qsort(start(), - length(), - sizeof(T), - reinterpret_cast<RawComparer>(cmp)); + std::sort(start(), start() + length(), RawComparer(cmp)); } void Sort() { - Sort(PointerValueCompare<T>); + std::sort(start(), start() + length()); } void Truncate(int length) { @@ -454,6 +451,17 @@ class Vector { private: T* start_; int length_; + + class RawComparer { + public: + explicit RawComparer(int (*cmp)(const T*, const T*)) : cmp_(cmp) {} + bool operator()(const T& a, const T& b) { + return cmp_(&a, &b) < 0; + } + + private: + int (*cmp_)(const T*, const T*); + }; }; diff --git a/deps/v8/src/v8-counters.cc b/deps/v8/src/v8-counters.cc index 4107dd3e48..ca83e38f86 100644 --- a/deps/v8/src/v8-counters.cc +++ b/deps/v8/src/v8-counters.cc @@ -32,58 +32,48 @@ namespace v8 { namespace internal { -Counters::Counters() { +Counters::Counters(Isolate* isolate) { #define HT(name, caption) \ - HistogramTimer name = { {#caption, 0, 10000, 50, NULL, false}, 0, 0 }; \ - name##_ = name; + name##_ = HistogramTimer(#caption, 0, 10000, 50, isolate); HISTOGRAM_TIMER_LIST(HT) #undef HT #define HP(name, caption) \ - Histogram name = { #caption, 0, 101, 100, NULL, false }; \ - name##_ = name; + name##_ = Histogram(#caption, 0, 101, 100, isolate); HISTOGRAM_PERCENTAGE_LIST(HP) #undef HP #define HM(name, caption) \ - Histogram name = { #caption, 1000, 500000, 50, NULL, false }; \ - name##_ = name; + name##_ = Histogram(#caption, 1000, 500000, 50, isolate); HISTOGRAM_MEMORY_LIST(HM) #undef HM #define SC(name, caption) \ - StatsCounter name = { "c:" #caption, NULL, false };\ - name##_ = name; + name##_ = StatsCounter("c:" #caption); STATS_COUNTER_LIST_1(SC) STATS_COUNTER_LIST_2(SC) #undef SC #define SC(name) \ - StatsCounter count_of_##name = { "c:" "V8.CountOf_" #name, NULL, false };\ - count_of_##name##_ = count_of_##name; \ - StatsCounter size_of_##name = { "c:" "V8.SizeOf_" #name, NULL, false };\ - size_of_##name##_ = size_of_##name; + count_of_##name##_ = StatsCounter("c:" "V8.CountOf_" #name); \ + size_of_##name##_ = StatsCounter("c:" "V8.SizeOf_" #name); INSTANCE_TYPE_LIST(SC) #undef SC #define SC(name) \ - StatsCounter count_of_CODE_TYPE_##name = { \ - "c:" "V8.CountOf_CODE_TYPE-" #name, NULL, false }; \ - count_of_CODE_TYPE_##name##_ = count_of_CODE_TYPE_##name; \ - StatsCounter size_of_CODE_TYPE_##name = { \ - "c:" "V8.SizeOf_CODE_TYPE-" #name, NULL, false }; \ - size_of_CODE_TYPE_##name##_ = size_of_CODE_TYPE_##name; + count_of_CODE_TYPE_##name##_ = \ + StatsCounter("c:" "V8.CountOf_CODE_TYPE-" #name); \ + size_of_CODE_TYPE_##name##_ = \ + StatsCounter("c:" "V8.SizeOf_CODE_TYPE-" #name); CODE_KIND_LIST(SC) #undef SC #define SC(name) \ - StatsCounter count_of_FIXED_ARRAY_##name = { \ - "c:" "V8.CountOf_FIXED_ARRAY-" #name, NULL, false }; \ - count_of_FIXED_ARRAY_##name##_ = count_of_FIXED_ARRAY_##name; \ - StatsCounter size_of_FIXED_ARRAY_##name = { \ - "c:" "V8.SizeOf_FIXED_ARRAY-" #name, NULL, false }; \ - size_of_FIXED_ARRAY_##name##_ = size_of_FIXED_ARRAY_##name; + count_of_FIXED_ARRAY_##name##_ = \ + StatsCounter("c:" "V8.CountOf_FIXED_ARRAY-" #name); \ + size_of_FIXED_ARRAY_##name##_ = \ + StatsCounter("c:" "V8.SizeOf_FIXED_ARRAY-" #name); \ FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC) #undef SC } diff --git a/deps/v8/src/v8-counters.h b/deps/v8/src/v8-counters.h index 374ebbcd89..c810cbac79 100644 --- a/deps/v8/src/v8-counters.h +++ b/deps/v8/src/v8-counters.h @@ -420,6 +420,8 @@ class Counters { friend class Isolate; + explicit Counters(Isolate* isolate); + DISALLOW_IMPLICIT_CONSTRUCTORS(Counters); }; diff --git a/deps/v8/src/v8globals.h b/deps/v8/src/v8globals.h index 82e30f5048..17068937eb 100644 --- a/deps/v8/src/v8globals.h +++ b/deps/v8/src/v8globals.h @@ -162,7 +162,6 @@ class Variable; class RelocInfo; class Deserializer; class MessageLocation; -class ObjectGroup; class VirtualMemory; class Mutex; @@ -363,7 +362,6 @@ enum StateTag { JS, GC, COMPILER, - PARALLEL_COMPILER, OTHER, EXTERNAL }; diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc index 8a1b8b7517..dac2bf01b8 100644 --- a/deps/v8/src/version.cc +++ b/deps/v8/src/version.cc @@ -30,19 +30,19 @@ #include "version.h" // These macros define the version number for the current version. -// NOTE these macros are used by the SCons build script so their names -// cannot be changed without changing the SCons build script. +// NOTE these macros are used by some of the tool scripts and the build +// system so their names cannot be changed without changing the scripts. #define MAJOR_VERSION 3 #define MINOR_VERSION 18 -#define BUILD_NUMBER 1 +#define BUILD_NUMBER 4 #define PATCH_LEVEL 0 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) #define IS_CANDIDATE_VERSION 0 -// Define SONAME to have the SCons build the put a specific SONAME into the +// Define SONAME to have the build system put a specific SONAME into the // shared library instead the generic SONAME generated from the V8 version -// number. This define is mainly used by the SCons build script. +// number. This define is mainly used by the build system script. #define SONAME "" #if IS_CANDIDATE_VERSION diff --git a/deps/v8/src/vm-state-inl.h b/deps/v8/src/vm-state-inl.h index fae68ebeea..862c17e16e 100644 --- a/deps/v8/src/vm-state-inl.h +++ b/deps/v8/src/vm-state-inl.h @@ -47,8 +47,6 @@ inline const char* StateToString(StateTag state) { return "GC"; case COMPILER: return "COMPILER"; - case PARALLEL_COMPILER: - return "PARALLEL_COMPILER"; case OTHER: return "OTHER"; case EXTERNAL: @@ -60,36 +58,24 @@ inline const char* StateToString(StateTag state) { } -VMState::VMState(Isolate* isolate, StateTag tag) +template <StateTag Tag> +VMState<Tag>::VMState(Isolate* isolate) : isolate_(isolate), previous_tag_(isolate->current_vm_state()) { - if (FLAG_log_state_changes) { - LOG(isolate, UncheckedStringEvent("Entering", StateToString(tag))); - LOG(isolate, UncheckedStringEvent("From", StateToString(previous_tag_))); - } - - if (FLAG_log_timer_events && previous_tag_ != EXTERNAL && tag == EXTERNAL) { - LOG(isolate_, EnterExternal()); + if (FLAG_log_timer_events && previous_tag_ != EXTERNAL && Tag == EXTERNAL) { + LOG(isolate_, + TimerEvent(Logger::START, Logger::TimerEventScope::v8_external)); } - - isolate_->SetCurrentVMState(tag); + isolate_->set_current_vm_state(Tag); } -VMState::~VMState() { - if (FLAG_log_state_changes) { +template <StateTag Tag> +VMState<Tag>::~VMState() { + if (FLAG_log_timer_events && previous_tag_ != EXTERNAL && Tag == EXTERNAL) { LOG(isolate_, - UncheckedStringEvent("Leaving", - StateToString(isolate_->current_vm_state()))); - LOG(isolate_, - UncheckedStringEvent("To", StateToString(previous_tag_))); - } - - if (FLAG_log_timer_events && - previous_tag_ != EXTERNAL && isolate_->current_vm_state() == EXTERNAL) { - LOG(isolate_, LeaveExternal()); + TimerEvent(Logger::END, Logger::TimerEventScope::v8_external)); } - - isolate_->SetCurrentVMState(previous_tag_); + isolate_->set_current_vm_state(previous_tag_); } diff --git a/deps/v8/src/vm-state.h b/deps/v8/src/vm-state.h index 831e2d396b..765b570159 100644 --- a/deps/v8/src/vm-state.h +++ b/deps/v8/src/vm-state.h @@ -34,9 +34,10 @@ namespace v8 { namespace internal { +template <StateTag Tag> class VMState BASE_EMBEDDED { public: - inline VMState(Isolate* isolate, StateTag tag); + explicit inline VMState(Isolate* isolate); inline ~VMState(); private: diff --git a/deps/v8/src/win32-math.cc b/deps/v8/src/win32-math.cc index 3410872bb5..9ffc4ea73b 100644 --- a/deps/v8/src/win32-math.cc +++ b/deps/v8/src/win32-math.cc @@ -35,21 +35,14 @@ #define V8_WIN32_HEADERS_FULL #include "win32-headers.h" #include <limits.h> // Required for INT_MAX etc. -#include <math.h> #include <float.h> // Required for DBL_MAX and on Win32 for finite() +#include <cmath> #include "win32-math.h" #include "checks.h" -namespace v8 { - -// Test for finite value - usually defined in math.h -int isfinite(double x) { - return _finite(x); -} - -} // namespace v8 +namespace std { // Test for a NaN (not a number) value - usually defined in math.h int isnan(double x) { @@ -63,6 +56,12 @@ int isinf(double x) { } +// Test for finite value - usually defined in math.h +int isfinite(double x) { + return _finite(x); +} + + // Test if x is less than y and both nominal - usually defined in math.h int isless(double x, double y) { return isnan(x) || isnan(y) ? 0 : x < y; @@ -103,4 +102,6 @@ int signbit(double x) { return x < 0; } +} // namespace std + #endif // _MSC_VER diff --git a/deps/v8/src/win32-math.h b/deps/v8/src/win32-math.h index 68759990b4..0397c7e14e 100644 --- a/deps/v8/src/win32-math.h +++ b/deps/v8/src/win32-math.h @@ -45,17 +45,17 @@ enum { FP_NORMAL }; -namespace v8 { -int isfinite(double x); - -} // namespace v8 +namespace std { -int isnan(double x); +int isfinite(double x); int isinf(double x); +int isnan(double x); int isless(double x, double y); int isgreater(double x, double y); int fpclassify(double x); int signbit(double x); +} // namespace std + #endif // V8_WIN32_MATH_H_ diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc index 25979f9365..802696b54f 100644 --- a/deps/v8/src/x64/assembler-x64.cc +++ b/deps/v8/src/x64/assembler-x64.cc @@ -1590,6 +1590,7 @@ void Assembler::movl(const Operand& dst, Label* src) { void Assembler::movq(Register dst, Handle<Object> value, RelocInfo::Mode mode) { + ALLOW_HANDLE_DEREF(isolate(), "using and embedding raw address"); // If there is no relocation info, emit the value of the handle efficiently // (possibly using less that 8 bytes for the value). if (RelocInfo::IsNone(mode)) { diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h index 2445e2335c..5bb1292f2f 100644 --- a/deps/v8/src/x64/assembler-x64.h +++ b/deps/v8/src/x64/assembler-x64.h @@ -200,6 +200,19 @@ const Register r14 = { kRegister_r14_Code }; const Register r15 = { kRegister_r15_Code }; const Register no_reg = { kRegister_no_reg_Code }; +#ifdef _WIN64 + // Windows calling convention + const Register arg_reg_1 = rcx; + const Register arg_reg_2 = rdx; + const Register arg_reg_3 = r8; + const Register arg_reg_4 = r9; +#else + // AMD64 calling convention + const Register arg_reg_1 = rdi; + const Register arg_reg_2 = rsi; + const Register arg_reg_3 = rdx; + const Register arg_reg_4 = rcx; +#endif // _WIN64 struct XMMRegister { static const int kMaxNumRegisters = 16; diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc index ba7647bf30..e7daa7f9d6 100644 --- a/deps/v8/src/x64/builtins-x64.cc +++ b/deps/v8/src/x64/builtins-x64.cc @@ -648,11 +648,7 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) { // the stub returns. __ subq(Operand(rsp, 0), Immediate(5)); __ Pushad(); -#ifdef _WIN64 - __ movq(rcx, Operand(rsp, kNumSafepointRegisters * kPointerSize)); -#else - __ movq(rdi, Operand(rsp, kNumSafepointRegisters * kPointerSize)); -#endif + __ movq(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize)); { // NOLINT FrameScope scope(masm, StackFrame::MANUAL); __ PrepareCallCFunction(1); @@ -1287,8 +1283,7 @@ static void AllocateJSArray(MacroAssembler* masm, // entering the generic code. In both cases argc in rax needs to be preserved. // Both registers are preserved by this code so no need to differentiate between // a construct call and a normal call. -static void ArrayNativeCode(MacroAssembler* masm, - Label* call_generic_code) { +void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code) { Label argc_one_or_more, argc_two_or_more, empty_array, not_empty_array, has_non_smi_element, finish, cant_transition_map, not_double; @@ -1522,7 +1517,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) { } -void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) { +void Builtins::Generate_CommonArrayConstructCode(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- rax : argc // -- rdi : constructor @@ -1541,53 +1536,21 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) { __ Check(not_smi, "Unexpected initial map for Array function"); __ CmpObjectType(rcx, MAP_TYPE, rcx); __ Check(equal, "Unexpected initial map for Array function"); - - if (FLAG_optimize_constructed_arrays) { - // We should either have undefined in ebx or a valid jsglobalpropertycell - Label okay_here; - Handle<Object> undefined_sentinel( - masm->isolate()->factory()->undefined_value()); - Handle<Map> global_property_cell_map( - masm->isolate()->heap()->global_property_cell_map()); - __ Cmp(rbx, undefined_sentinel); - __ j(equal, &okay_here); - __ Cmp(FieldOperand(rbx, 0), global_property_cell_map); - __ Assert(equal, "Expected property cell in register rbx"); - __ bind(&okay_here); - } } - if (FLAG_optimize_constructed_arrays) { - Label not_zero_case, not_one_case; - __ testq(rax, rax); - __ j(not_zero, ¬_zero_case); - ArrayNoArgumentConstructorStub no_argument_stub; - __ TailCallStub(&no_argument_stub); - - __ bind(¬_zero_case); - __ cmpq(rax, Immediate(1)); - __ j(greater, ¬_one_case); - ArraySingleArgumentConstructorStub single_argument_stub; - __ TailCallStub(&single_argument_stub); - - __ bind(¬_one_case); - ArrayNArgumentsConstructorStub n_argument_stub; - __ TailCallStub(&n_argument_stub); - } else { - Label generic_constructor; - // Run the native code for the Array function called as constructor. - ArrayNativeCode(masm, &generic_constructor); - - // Jump to the generic construct code in case the specialized code cannot - // handle the construction. - __ bind(&generic_constructor); - Handle<Code> generic_construct_stub = - masm->isolate()->builtins()->JSConstructStubGeneric(); - __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); - } + Label generic_constructor; + // Run the native code for the Array function called as constructor. + ArrayNativeCode(masm, &generic_constructor); + // Jump to the generic construct code in case the specialized code cannot + // handle the construction. + __ bind(&generic_constructor); + Handle<Code> generic_construct_stub = + masm->isolate()->builtins()->JSConstructStubGeneric(); + __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); } + void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- rax : number of arguments diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc index 3958cf0324..3a9a0234e2 100644 --- a/deps/v8/src/x64/code-stubs-x64.cc +++ b/deps/v8/src/x64/code-stubs-x64.cc @@ -96,16 +96,20 @@ void TransitionElementsKindStub::InitializeInterfaceDescriptor( } -static void InitializeArrayConstructorDescriptor(Isolate* isolate, - CodeStubInterfaceDescriptor* descriptor) { +static void InitializeArrayConstructorDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor, + int constant_stack_parameter_count) { // register state - // rdi -- constructor function + // rax -- number of arguments // rbx -- type info cell with elements kind - // rax -- number of arguments to the constructor function - static Register registers[] = { rdi, rbx }; - descriptor->register_param_count_ = 2; - // stack param count needs (constructor pointer, and single argument) - descriptor->stack_parameter_count_ = &rax; + static Register registers[] = { rbx }; + descriptor->register_param_count_ = 1; + if (constant_stack_parameter_count != 0) { + // stack param count needs (constructor pointer, and single argument) + descriptor->stack_parameter_count_ = &rax; + } + descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count; descriptor->register_params_ = registers; descriptor->function_mode_ = JS_FUNCTION_STUB_MODE; descriptor->deoptimization_handler_ = @@ -116,26 +120,64 @@ static void InitializeArrayConstructorDescriptor(Isolate* isolate, void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor( Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeArrayConstructorDescriptor(isolate, descriptor); + InitializeArrayConstructorDescriptor(isolate, descriptor, 0); } void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor( Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeArrayConstructorDescriptor(isolate, descriptor); + InitializeArrayConstructorDescriptor(isolate, descriptor, 1); } void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( Isolate* isolate, CodeStubInterfaceDescriptor* descriptor) { - InitializeArrayConstructorDescriptor(isolate, descriptor); + InitializeArrayConstructorDescriptor(isolate, descriptor, -1); +} + + +void CompareNilICStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + static Register registers[] = { rax }; + descriptor->register_param_count_ = 1; + descriptor->register_params_ = registers; + descriptor->deoptimization_handler_ = + FUNCTION_ADDR(CompareNilIC_Miss); + descriptor->miss_handler_ = + ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate); } #define __ ACCESS_MASM(masm) + +void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) { + // Update the static counter each time a new code stub is generated. + Isolate* isolate = masm->isolate(); + isolate->counters()->code_stubs()->Increment(); + + CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate); + int param_count = descriptor->register_param_count_; + { + // Call the runtime system in a fresh internal frame. + FrameScope scope(masm, StackFrame::INTERNAL); + ASSERT(descriptor->register_param_count_ == 0 || + rax.is(descriptor->register_params_[param_count - 1])); + // Push arguments + for (int i = 0; i < param_count; ++i) { + __ push(descriptor->register_params_[i]); + } + ExternalReference miss = descriptor->miss_handler_; + __ CallExternalReference(miss, descriptor->register_param_count_); + } + + __ Ret(); +} + + void ToNumberStub::Generate(MacroAssembler* masm) { // The ToNumber stub takes one argument in eax. Label check_heap_number, call_builtin; @@ -500,11 +542,8 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { __ PushCallerSaved(save_doubles_); const int argument_count = 1; __ PrepareCallCFunction(argument_count); -#ifdef _WIN64 - __ LoadAddress(rcx, ExternalReference::isolate_address()); -#else - __ LoadAddress(rdi, ExternalReference::isolate_address()); -#endif + __ LoadAddress(arg_reg_1, + ExternalReference::isolate_address(masm->isolate())); AllowExternalCallThatCantCauseGC scope(masm); __ CallCFunction( @@ -1974,12 +2013,7 @@ void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm, void MathPowStub::Generate(MacroAssembler* masm) { - // Choose register conforming to calling convention (when bailing out). -#ifdef _WIN64 const Register exponent = rdx; -#else - const Register exponent = rdi; -#endif const Register base = rax; const Register scratch = rcx; const XMMRegister double_result = xmm3; @@ -2954,9 +2988,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ EnterApiExitFrame(argument_slots_on_stack); // Argument 9: Pass current isolate address. - // __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize), - // Immediate(ExternalReference::isolate_address())); - __ LoadAddress(kScratchRegister, ExternalReference::isolate_address()); + __ LoadAddress(kScratchRegister, + ExternalReference::isolate_address(masm->isolate())); __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize), kScratchRegister); @@ -2989,20 +3022,6 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ movq(Operand(rsp, (argument_slots_on_stack - 5) * kPointerSize), r8); #endif - // First four arguments are passed in registers on both Linux and Windows. -#ifdef _WIN64 - Register arg4 = r9; - Register arg3 = r8; - Register arg2 = rdx; - Register arg1 = rcx; -#else - Register arg4 = rcx; - Register arg3 = rdx; - Register arg2 = rsi; - Register arg1 = rdi; -#endif - - // Keep track on aliasing between argX defined above and the registers used. // rdi: subject string // rbx: previous index // rcx: encoding of subject string (1 if ASCII 0 if two_byte); @@ -3011,7 +3030,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // r15: original subject string // Argument 2: Previous index. - __ movq(arg2, rbx); + __ movq(arg_reg_2, rbx); // Argument 4: End of string data // Argument 3: Start of string data @@ -3019,20 +3038,24 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Prepare start and end index of the input. // Load the length from the original sliced string if that is the case. __ addq(rbx, r14); - __ SmiToInteger32(arg3, FieldOperand(r15, String::kLengthOffset)); - __ addq(r14, arg3); // Using arg3 as scratch. + __ SmiToInteger32(arg_reg_3, FieldOperand(r15, String::kLengthOffset)); + __ addq(r14, arg_reg_3); // Using arg3 as scratch. // rbx: start index of the input // r14: end index of the input // r15: original subject string __ testb(rcx, rcx); // Last use of rcx as encoding of subject string. __ j(zero, &setup_two_byte, Label::kNear); - __ lea(arg4, FieldOperand(rdi, r14, times_1, SeqOneByteString::kHeaderSize)); - __ lea(arg3, FieldOperand(rdi, rbx, times_1, SeqOneByteString::kHeaderSize)); + __ lea(arg_reg_4, + FieldOperand(rdi, r14, times_1, SeqOneByteString::kHeaderSize)); + __ lea(arg_reg_3, + FieldOperand(rdi, rbx, times_1, SeqOneByteString::kHeaderSize)); __ jmp(&setup_rest, Label::kNear); __ bind(&setup_two_byte); - __ lea(arg4, FieldOperand(rdi, r14, times_2, SeqTwoByteString::kHeaderSize)); - __ lea(arg3, FieldOperand(rdi, rbx, times_2, SeqTwoByteString::kHeaderSize)); + __ lea(arg_reg_4, + FieldOperand(rdi, r14, times_2, SeqTwoByteString::kHeaderSize)); + __ lea(arg_reg_3, + FieldOperand(rdi, rbx, times_2, SeqTwoByteString::kHeaderSize)); __ bind(&setup_rest); // Argument 1: Original subject string. @@ -3040,7 +3063,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // use rbp, which points exactly to one pointer size below the previous rsp. // (Because creating a new stack frame pushes the previous rbp onto the stack // and thereby moves up rsp by one kPointerSize.) - __ movq(arg1, r15); + __ movq(arg_reg_1, r15); // Locate the code entry and call it. __ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag)); @@ -3796,7 +3819,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) { TypeFeedbackCells::MonomorphicArraySentinel(isolate, LAST_FAST_ELEMENTS_KIND); __ Cmp(rcx, terminal_kind_sentinel); - __ j(not_equal, &miss); + __ j(above, &miss); // Make sure the function is the Array() function __ LoadArrayFunction(rcx); __ cmpq(rdi, rcx); @@ -4008,6 +4031,9 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { StubFailureTrampolineStub::GenerateAheadOfTime(isolate); // It is important that the store buffer overflow stubs are generated first. RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate); + if (FLAG_optimize_constructed_arrays) { + ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); + } } @@ -4065,11 +4091,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the // stack is known to be aligned. This function takes one argument which is // passed in register. -#ifdef _WIN64 - __ movq(rcx, rax); -#else // _WIN64 - __ movq(rdi, rax); -#endif + __ movq(arg_reg_1, rax); __ movq(kScratchRegister, ExternalReference::perform_gc_function(masm->isolate())); __ call(kScratchRegister); @@ -4092,21 +4114,21 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, // Pass a pointer to the Arguments object as the first argument. // Return result in single register (rax). __ lea(rcx, StackSpaceOperand(0)); - __ LoadAddress(rdx, ExternalReference::isolate_address()); + __ LoadAddress(rdx, ExternalReference::isolate_address(masm->isolate())); } else { ASSERT_EQ(2, result_size_); // Pass a pointer to the result location as the first argument. __ lea(rcx, StackSpaceOperand(2)); // Pass a pointer to the Arguments object as the second argument. __ lea(rdx, StackSpaceOperand(0)); - __ LoadAddress(r8, ExternalReference::isolate_address()); + __ LoadAddress(r8, ExternalReference::isolate_address(masm->isolate())); } #else // _WIN64 // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9. __ movq(rdi, r14); // argc. __ movq(rsi, r15); // argv. - __ movq(rdx, ExternalReference::isolate_address()); + __ movq(rdx, ExternalReference::isolate_address(masm->isolate())); #endif __ call(rbx); // Result is in rax - do not destroy this register! @@ -4891,16 +4913,16 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ ret(2 * kPointerSize); __ bind(&non_ascii); // At least one of the strings is two-byte. Check whether it happens - // to contain only ASCII characters. + // to contain only one byte characters. // rcx: first instance type AND second instance type. // r8: first instance type. // r9: second instance type. - __ testb(rcx, Immediate(kAsciiDataHintMask)); + __ testb(rcx, Immediate(kOneByteDataHintMask)); __ j(not_zero, &ascii_data); __ xor_(r8, r9); - STATIC_ASSERT(kOneByteStringTag != 0 && kAsciiDataHintTag != 0); - __ andb(r8, Immediate(kOneByteStringTag | kAsciiDataHintTag)); - __ cmpb(r8, Immediate(kOneByteStringTag | kAsciiDataHintTag)); + STATIC_ASSERT(kOneByteStringTag != 0 && kOneByteDataHintTag != 0); + __ andb(r8, Immediate(kOneByteStringTag | kOneByteDataHintTag)); + __ cmpb(r8, Immediate(kOneByteStringTag | kOneByteDataHintTag)); __ j(equal, &ascii_data); // Allocate a two byte cons string. __ AllocateTwoByteConsString(rcx, rdi, no_reg, &call_runtime); @@ -6457,24 +6479,16 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) { regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_); -#ifdef _WIN64 - Register arg3 = r8; - Register arg2 = rdx; - Register arg1 = rcx; -#else - Register arg3 = rdx; - Register arg2 = rsi; - Register arg1 = rdi; -#endif Register address = - arg1.is(regs_.address()) ? kScratchRegister : regs_.address(); + arg_reg_1.is(regs_.address()) ? kScratchRegister : regs_.address(); ASSERT(!address.is(regs_.object())); - ASSERT(!address.is(arg1)); + ASSERT(!address.is(arg_reg_1)); __ Move(address, regs_.address()); - __ Move(arg1, regs_.object()); + __ Move(arg_reg_1, regs_.object()); // TODO(gc) Can we just set address arg2 in the beginning? - __ Move(arg2, address); - __ LoadAddress(arg3, ExternalReference::isolate_address()); + __ Move(arg_reg_2, address); + __ LoadAddress(arg_reg_3, + ExternalReference::isolate_address(masm->isolate())); int argument_count = 3; AllowExternalCallThatCantCauseGC scope(masm); @@ -6745,6 +6759,198 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) { __ Ret(); } + +template<class T> +static void CreateArrayDispatch(MacroAssembler* masm) { + int last_index = GetSequenceIndexFromFastElementsKind( + TERMINAL_FAST_ELEMENTS_KIND); + for (int i = 0; i <= last_index; ++i) { + Label next; + ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); + __ cmpl(rdx, Immediate(kind)); + __ j(not_equal, &next); + T stub(kind); + __ TailCallStub(&stub); + __ bind(&next); + } + + // If we reached this point there is a problem. + __ Abort("Unexpected ElementsKind in array constructor"); +} + + +static void CreateArrayDispatchOneArgument(MacroAssembler* masm) { + // rbx - type info cell + // rdx - kind + // rax - number of arguments + // rdi - constructor? + // esp[0] - return address + // esp[4] - last argument + ASSERT(FAST_SMI_ELEMENTS == 0); + ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); + ASSERT(FAST_ELEMENTS == 2); + ASSERT(FAST_HOLEY_ELEMENTS == 3); + ASSERT(FAST_DOUBLE_ELEMENTS == 4); + ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5); + + Handle<Object> undefined_sentinel( + masm->isolate()->heap()->undefined_value(), + masm->isolate()); + + // is the low bit set? If so, we are holey and that is good. + __ testb(rdx, Immediate(1)); + Label normal_sequence; + __ j(not_zero, &normal_sequence); + + // look at the first argument + __ movq(rcx, Operand(rsp, kPointerSize)); + __ testq(rcx, rcx); + __ j(zero, &normal_sequence); + + // We are going to create a holey array, but our kind is non-holey. + // Fix kind and retry + __ incl(rdx); + __ Cmp(rbx, undefined_sentinel); + __ j(equal, &normal_sequence); + + // Save the resulting elements kind in type info + __ Integer32ToSmi(rdx, rdx); + __ movq(FieldOperand(rbx, kPointerSize), rdx); + __ SmiToInteger32(rdx, rdx); + + __ bind(&normal_sequence); + int last_index = GetSequenceIndexFromFastElementsKind( + TERMINAL_FAST_ELEMENTS_KIND); + for (int i = 0; i <= last_index; ++i) { + Label next; + ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); + __ cmpl(rdx, Immediate(kind)); + __ j(not_equal, &next); + ArraySingleArgumentConstructorStub stub(kind); + __ TailCallStub(&stub); + __ bind(&next); + } + + // If we reached this point there is a problem. + __ Abort("Unexpected ElementsKind in array constructor"); +} + + +template<class T> +static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) { + int to_index = GetSequenceIndexFromFastElementsKind( + TERMINAL_FAST_ELEMENTS_KIND); + for (int i = 0; i <= to_index; ++i) { + ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); + T stub(kind); + stub.GetCode(isolate)->set_is_pregenerated(true); + } +} + + +void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) { + ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>( + isolate); + ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>( + isolate); + ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>( + isolate); +} + + + +void ArrayConstructorStub::Generate(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- rax : argc + // -- rbx : type info cell + // -- rdi : constructor + // -- rsp[0] : return address + // -- rsp[4] : last argument + // ----------------------------------- + Handle<Object> undefined_sentinel( + masm->isolate()->heap()->undefined_value(), + masm->isolate()); + + if (FLAG_debug_code) { + // The array construct code is only set for the global and natives + // builtin Array functions which always have maps. + + // Initial map for the builtin Array function should be a map. + __ movq(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset)); + // Will both indicate a NULL and a Smi. + STATIC_ASSERT(kSmiTag == 0); + Condition not_smi = NegateCondition(masm->CheckSmi(rcx)); + __ Check(not_smi, "Unexpected initial map for Array function"); + __ CmpObjectType(rcx, MAP_TYPE, rcx); + __ Check(equal, "Unexpected initial map for Array function"); + + // We should either have undefined in ebx or a valid jsglobalpropertycell + Label okay_here; + Handle<Map> global_property_cell_map( + masm->isolate()->heap()->global_property_cell_map()); + __ Cmp(rbx, undefined_sentinel); + __ j(equal, &okay_here); + __ Cmp(FieldOperand(rbx, 0), global_property_cell_map); + __ Assert(equal, "Expected property cell in register rbx"); + __ bind(&okay_here); + } + + if (FLAG_optimize_constructed_arrays) { + Label no_info, switch_ready; + // Get the elements kind and case on that. + __ Cmp(rbx, undefined_sentinel); + __ j(equal, &no_info); + __ movq(rdx, FieldOperand(rbx, kPointerSize)); + + // There is no info if the call site went megamorphic either + + // TODO(mvstanton): Really? I thought if it was the array function that + // the cell wouldn't get stamped as megamorphic. + __ Cmp(rdx, TypeFeedbackCells::MegamorphicSentinel(masm->isolate())); + __ j(equal, &no_info); + __ SmiToInteger32(rdx, rdx); + __ jmp(&switch_ready); + __ bind(&no_info); + __ movq(rdx, Immediate(GetInitialFastElementsKind())); + __ bind(&switch_ready); + + if (argument_count_ == ANY) { + Label not_zero_case, not_one_case; + __ testq(rax, rax); + __ j(not_zero, ¬_zero_case); + CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm); + + __ bind(¬_zero_case); + __ cmpl(rax, Immediate(1)); + __ j(greater, ¬_one_case); + CreateArrayDispatchOneArgument(masm); + + __ bind(¬_one_case); + CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm); + } else if (argument_count_ == NONE) { + CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm); + } else if (argument_count_ == ONE) { + CreateArrayDispatchOneArgument(masm); + } else if (argument_count_ == MORE_THAN_ONE) { + CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm); + } else { + UNREACHABLE(); + } + } else { + Label generic_constructor; + // Run the native code for the Array function called as constructor. + ArrayNativeCode(masm, &generic_constructor); + + // Jump to the generic construct code in case the specialized code cannot + // handle the construction. + __ bind(&generic_constructor); + Handle<Code> generic_construct_stub = + masm->isolate()->builtins()->JSConstructStubGeneric(); + __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET); + } +} + + #undef __ } } // namespace v8::internal diff --git a/deps/v8/src/x64/code-stubs-x64.h b/deps/v8/src/x64/code-stubs-x64.h index 72a3a95d1b..eafb960255 100644 --- a/deps/v8/src/x64/code-stubs-x64.h +++ b/deps/v8/src/x64/code-stubs-x64.h @@ -35,6 +35,8 @@ namespace v8 { namespace internal { +void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code); + // Compute a transcendental math function natively, or call the // TranscendentalCache runtime function. class TranscendentalCacheStub: public PlatformCodeStub { diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc index bae97cd815..bec158b5b2 100644 --- a/deps/v8/src/x64/deoptimizer-x64.cc +++ b/deps/v8/src/x64/deoptimizer-x64.cc @@ -609,37 +609,22 @@ void Deoptimizer::EntryGenerator::Generate() { const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize + kDoubleRegsSize; - // When calling new_deoptimizer_function we need to pass the last argument - // on the stack on windows and in r8 on linux. The remaining arguments are - // all passed in registers (different ones on linux and windows though). - -#ifdef _WIN64 - Register arg4 = r9; - Register arg3 = r8; - Register arg2 = rdx; - Register arg1 = rcx; -#else - Register arg4 = rcx; - Register arg3 = rdx; - Register arg2 = rsi; - Register arg1 = rdi; -#endif - // We use this to keep the value of the fifth argument temporarily. // Unfortunately we can't store it directly in r8 (used for passing // this on linux), since it is another parameter passing register on windows. Register arg5 = r11; // Get the bailout id from the stack. - __ movq(arg3, Operand(rsp, kSavedRegistersAreaSize)); + __ movq(arg_reg_3, Operand(rsp, kSavedRegistersAreaSize)); // Get the address of the location in the code object if possible // and compute the fp-to-sp delta in register arg5. if (type() == EAGER) { - __ Set(arg4, 0); + __ Set(arg_reg_4, 0); __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize)); } else { - __ movq(arg4, Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize)); + __ movq(arg_reg_4, + Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize)); __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 2 * kPointerSize)); } @@ -649,26 +634,23 @@ void Deoptimizer::EntryGenerator::Generate() { // Allocate a new deoptimizer object. __ PrepareCallCFunction(6); __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset)); - __ movq(arg1, rax); - __ Set(arg2, type()); + __ movq(arg_reg_1, rax); + __ Set(arg_reg_2, type()); // Args 3 and 4 are already in the right registers. // On windows put the arguments on the stack (PrepareCallCFunction // has created space for this). On linux pass the arguments in r8 and r9. #ifdef _WIN64 __ movq(Operand(rsp, 4 * kPointerSize), arg5); - __ LoadAddress(arg5, ExternalReference::isolate_address()); + __ LoadAddress(arg5, ExternalReference::isolate_address(isolate())); __ movq(Operand(rsp, 5 * kPointerSize), arg5); #else __ movq(r8, arg5); - __ LoadAddress(r9, ExternalReference::isolate_address()); + __ LoadAddress(r9, ExternalReference::isolate_address(isolate())); #endif - Isolate* isolate = masm()->isolate(); - - { - AllowExternalCallThatCantCauseGC scope(masm()); - __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6); + { AllowExternalCallThatCantCauseGC scope(masm()); + __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6); } // Preserve deoptimizer object in register rax and get the input // frame descriptor pointer. @@ -716,12 +698,12 @@ void Deoptimizer::EntryGenerator::Generate() { // Compute the output frame in the deoptimizer. __ push(rax); __ PrepareCallCFunction(2); - __ movq(arg1, rax); - __ LoadAddress(arg2, ExternalReference::isolate_address()); + __ movq(arg_reg_1, rax); + __ LoadAddress(arg_reg_2, ExternalReference::isolate_address(isolate())); { AllowExternalCallThatCantCauseGC scope(masm()); __ CallCFunction( - ExternalReference::compute_output_frames_function(isolate), 2); + ExternalReference::compute_output_frames_function(isolate()), 2); } __ pop(rax); diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc index 8ca173b30f..a20d468bae 100644 --- a/deps/v8/src/x64/full-codegen-x64.cc +++ b/deps/v8/src/x64/full-codegen-x64.cc @@ -1907,6 +1907,157 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) { } +void FullCodeGenerator::VisitYield(Yield* expr) { + Comment cmnt(masm_, "[ Yield"); + // Evaluate yielded value first; the initial iterator definition depends on + // this. It stays on the stack while we update the iterator. + VisitForStackValue(expr->expression()); + + switch (expr->yield_kind()) { + case Yield::INITIAL: + case Yield::SUSPEND: { + VisitForStackValue(expr->generator_object()); + __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1); + __ movq(context_register(), + Operand(rbp, StandardFrameConstants::kContextOffset)); + + Label resume; + __ CompareRoot(result_register(), Heap::kTheHoleValueRootIndex); + __ j(not_equal, &resume); + __ pop(result_register()); + if (expr->yield_kind() == Yield::SUSPEND) { + // TODO(wingo): Box into { value: VALUE, done: false }. + } + EmitReturnSequence(); + + __ bind(&resume); + context()->Plug(result_register()); + break; + } + + case Yield::FINAL: { + VisitForAccumulatorValue(expr->generator_object()); + __ Move(FieldOperand(result_register(), + JSGeneratorObject::kContinuationOffset), + Smi::FromInt(JSGeneratorObject::kGeneratorClosed)); + __ pop(result_register()); + // TODO(wingo): Box into { value: VALUE, done: true }. + + // Exit all nested statements. + NestedStatement* current = nesting_stack_; + int stack_depth = 0; + int context_length = 0; + while (current != NULL) { + current = current->Exit(&stack_depth, &context_length); + } + __ Drop(stack_depth); + EmitReturnSequence(); + break; + } + + case Yield::DELEGATING: + UNIMPLEMENTED(); + } +} + + +void FullCodeGenerator::EmitGeneratorResume(Expression *generator, + Expression *value, + JSGeneratorObject::ResumeMode resume_mode) { + // The value stays in rax, and is ultimately read by the resumed generator, as + // if the CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. rbx + // will hold the generator object until the activation has been resumed. + VisitForStackValue(generator); + VisitForAccumulatorValue(value); + __ pop(rbx); + + // Check generator state. + Label wrong_state, done; + STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting <= 0); + STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed <= 0); + __ SmiCompare(FieldOperand(rbx, JSGeneratorObject::kContinuationOffset), + Smi::FromInt(0)); + __ j(less_equal, &wrong_state); + + // Load suspended function and context. + __ movq(rsi, FieldOperand(rbx, JSGeneratorObject::kContextOffset)); + __ movq(rdi, FieldOperand(rbx, JSGeneratorObject::kFunctionOffset)); + + // Push receiver. + __ push(FieldOperand(rbx, JSGeneratorObject::kReceiverOffset)); + + // Push holes for arguments to generator function. + __ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); + __ movsxlq(rdx, + FieldOperand(rdx, + SharedFunctionInfo::kFormalParameterCountOffset)); + __ LoadRoot(rcx, Heap::kTheHoleValueRootIndex); + Label push_argument_holes, push_frame; + __ bind(&push_argument_holes); + __ subq(rdx, Immediate(1)); + __ j(carry, &push_frame); + __ push(rcx); + __ jmp(&push_argument_holes); + + // Enter a new JavaScript frame, and initialize its slots as they were when + // the generator was suspended. + Label resume_frame; + __ bind(&push_frame); + __ call(&resume_frame); + __ jmp(&done); + __ bind(&resume_frame); + __ push(rbp); // Caller's frame pointer. + __ movq(rbp, rsp); + __ push(rsi); // Callee's context. + __ push(rdi); // Callee's JS Function. + + // Load the operand stack size. + __ movq(rdx, FieldOperand(rbx, JSGeneratorObject::kOperandStackOffset)); + __ movq(rdx, FieldOperand(rdx, FixedArray::kLengthOffset)); + __ SmiToInteger32(rdx, rdx); + + // If we are sending a value and there is no operand stack, we can jump back + // in directly. + if (resume_mode == JSGeneratorObject::SEND) { + Label slow_resume; + __ cmpq(rdx, Immediate(0)); + __ j(not_zero, &slow_resume); + __ movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset)); + __ SmiToInteger64(rcx, + FieldOperand(rbx, JSGeneratorObject::kContinuationOffset)); + __ addq(rdx, rcx); + __ Move(FieldOperand(rbx, JSGeneratorObject::kContinuationOffset), + Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)); + __ jmp(rdx); + __ bind(&slow_resume); + } + + // Otherwise, we push holes for the operand stack and call the runtime to fix + // up the stack and the handlers. + Label push_operand_holes, call_resume; + __ bind(&push_operand_holes); + __ subq(rdx, Immediate(1)); + __ j(carry, &call_resume); + __ push(rcx); + __ jmp(&push_operand_holes); + __ bind(&call_resume); + __ push(rbx); + __ push(result_register()); + __ Push(Smi::FromInt(resume_mode)); + __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3); + // Not reached: the runtime call returns elsewhere. + __ Abort("Generator failed to resume."); + + // Throw error if we attempt to operate on a running generator. + __ bind(&wrong_state); + __ push(rbx); + __ CallRuntime(Runtime::kThrowGeneratorStateError, 1); + + __ bind(&done); + context()->Plug(result_register()); +} + + void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) { SetSourcePosition(prop->position()); Literal* key = prop->key()->AsLiteral(); @@ -2947,16 +3098,10 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) { // Return a random uint32 number in rax. // The fresh HeapNumber is in rbx, which is callee-save on both x64 ABIs. __ PrepareCallCFunction(1); -#ifdef _WIN64 - __ movq(rcx, - ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX)); - __ movq(rcx, FieldOperand(rcx, GlobalObject::kNativeContextOffset)); - -#else - __ movq(rdi, + __ movq(arg_reg_1, ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX)); - __ movq(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset)); -#endif + __ movq(arg_reg_1, + FieldOperand(arg_reg_1, GlobalObject::kNativeContextOffset)); __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1); // Convert 32 random bits in rax to 0.(32 random bits) in a double @@ -3054,13 +3199,8 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) { } __ bind(&runtime); __ PrepareCallCFunction(2); -#ifdef _WIN64 - __ movq(rcx, object); - __ movq(rdx, index, RelocInfo::NONE64); -#else - __ movq(rdi, object); - __ movq(rsi, index, RelocInfo::NONE64); -#endif + __ movq(arg_reg_1, object); + __ movq(arg_reg_2, index, RelocInfo::NONE64); __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2); __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); __ jmp(&done); @@ -4381,24 +4521,20 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr, VisitForAccumulatorValue(sub_expr); PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); - Heap::RootListIndex nil_value = nil == kNullValue ? - Heap::kNullValueRootIndex : - Heap::kUndefinedValueRootIndex; - __ CompareRoot(rax, nil_value); - if (expr->op() == Token::EQ_STRICT) { + EqualityKind kind = expr->op() == Token::EQ_STRICT + ? kStrictEquality : kNonStrictEquality; + if (kind == kStrictEquality) { + Heap::RootListIndex nil_value = nil == kNullValue ? + Heap::kNullValueRootIndex : + Heap::kUndefinedValueRootIndex; + __ CompareRoot(rax, nil_value); Split(equal, if_true, if_false, fall_through); } else { - Heap::RootListIndex other_nil_value = nil == kNullValue ? - Heap::kUndefinedValueRootIndex : - Heap::kNullValueRootIndex; - __ j(equal, if_true); - __ CompareRoot(rax, other_nil_value); - __ j(equal, if_true); - __ JumpIfSmi(rax, if_false); - // It can be an undetectable object. - __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset)); - __ testb(FieldOperand(rdx, Map::kBitFieldOffset), - Immediate(1 << Map::kIsUndetectable)); + Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), + kNonStrictEquality, + nil); + CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId()); + __ testq(rax, rax); Split(not_zero, if_true, if_false, fall_through); } context()->Plug(if_true, if_false); diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc index 3e8fd5c72f..fbb7c28979 100644 --- a/deps/v8/src/x64/lithium-codegen-x64.cc +++ b/deps/v8/src/x64/lithium-codegen-x64.cc @@ -261,38 +261,21 @@ bool LCodeGen::GenerateBody() { !is_aborted() && current_instruction_ < instructions_->length(); current_instruction_++) { LInstruction* instr = instructions_->at(current_instruction_); + + // Don't emit code for basic blocks with a replacement. if (instr->IsLabel()) { - LLabel* label = LLabel::cast(instr); - emit_instructions = !label->HasReplacement(); + emit_instructions = !LLabel::cast(instr)->HasReplacement(); } + if (!emit_instructions) continue; - if (emit_instructions) { - if (FLAG_code_comments) { - HValue* hydrogen = instr->hydrogen_value(); - if (hydrogen != NULL) { - if (hydrogen->IsChange()) { - HValue* changed_value = HChange::cast(hydrogen)->value(); - int use_id = 0; - const char* use_mnemo = "dead"; - if (hydrogen->UseCount() >= 1) { - HValue* use_value = hydrogen->uses().value(); - use_id = use_value->id(); - use_mnemo = use_value->Mnemonic(); - } - Comment(";;; @%d: %s. <of #%d %s for #%d %s>", - current_instruction_, instr->Mnemonic(), - changed_value->id(), changed_value->Mnemonic(), - use_id, use_mnemo); - } else { - Comment(";;; @%d: %s. <#%d>", current_instruction_, - instr->Mnemonic(), hydrogen->id()); - } - } else { - Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic()); - } - } - instr->CompileToNative(this); + if (FLAG_code_comments && instr->HasInterestingComment(this)) { + Comment(";;; <@%d,#%d> %s", + current_instruction_, + instr->hydrogen_value()->id(), + instr->Mnemonic()); } + + instr->CompileToNative(this); } EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); return !is_aborted(); @@ -302,6 +285,9 @@ bool LCodeGen::GenerateBody() { bool LCodeGen::GenerateJumpTable() { Label needs_frame_not_call; Label needs_frame_is_call; + if (jump_table_.length() > 0) { + Comment(";;; -------------------- Jump table --------------------"); + } for (int i = 0; i < jump_table_.length(); i++) { __ bind(&jump_table_[i].label); Address entry = jump_table_[i].address; @@ -368,11 +354,14 @@ bool LCodeGen::GenerateDeferredCode() { if (deferred_.length() > 0) { for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { LDeferredCode* code = deferred_[i]; + Comment(";;; <@%d,#%d> " + "-------------------- Deferred %s --------------------", + code->instruction_index(), + code->instr()->hydrogen_value()->id(), + code->instr()->Mnemonic()); __ bind(code->entry()); if (NeedsDeferredFrame()) { - Comment(";;; Deferred build frame @%d: %s.", - code->instruction_index(), - code->instr()->Mnemonic()); + Comment(";;; Build frame"); ASSERT(!frame_is_built_); ASSERT(info()->IsStub()); frame_is_built_ = true; @@ -381,15 +370,11 @@ bool LCodeGen::GenerateDeferredCode() { __ push(Operand(rbp, StandardFrameConstants::kContextOffset)); __ Push(Smi::FromInt(StackFrame::STUB)); __ lea(rbp, Operand(rsp, 2 * kPointerSize)); + Comment(";;; Deferred code"); } - Comment(";;; Deferred code @%d: %s.", - code->instruction_index(), - code->instr()->Mnemonic()); code->Generate(); if (NeedsDeferredFrame()) { - Comment(";;; Deferred destroy frame @%d: %s.", - code->instruction_index(), - code->instr()->Mnemonic()); + Comment(";;; Destroy frame"); ASSERT(frame_is_built_); frame_is_built_ = false; __ movq(rsp, rbp); @@ -498,7 +483,7 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, pushed_arguments_index, pushed_arguments_count); bool has_closure_id = !info()->closure().is_null() && - *info()->closure() != *environment->closure(); + !info()->closure().is_identical_to(environment->closure()); int closure_id = has_closure_id ? DefineDeoptimizationLiteral(environment->closure()) : Translation::kSelfLiteralId; @@ -815,10 +800,13 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { Handle<FixedArray> literals = factory()->NewFixedArray(deoptimization_literals_.length(), TENURED); - for (int i = 0; i < deoptimization_literals_.length(); i++) { - literals->set(i, *deoptimization_literals_[i]); + { ALLOW_HANDLE_DEREF(isolate(), + "copying a ZoneList of handles into a FixedArray"); + for (int i = 0; i < deoptimization_literals_.length(); i++) { + literals->set(i, *deoptimization_literals_[i]); + } + data->SetLiteralArray(*literals); } - data->SetLiteralArray(*literals); data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt())); data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_)); @@ -925,10 +913,19 @@ void LCodeGen::RecordPosition(int position) { } +static const char* LabelType(LLabel* label) { + if (label->is_loop_header()) return " (loop header)"; + if (label->is_osr_entry()) return " (OSR entry)"; + return ""; +} + + void LCodeGen::DoLabel(LLabel* label) { - Comment(";;; -------------------- B%d%s --------------------", + Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------", + current_instruction_, + label->hydrogen_value()->id(), label->block_id(), - label->is_loop_header() ? " (loop header)" : ""); + LabelType(label)); __ bind(label->label()); current_block_ = label->block_id(); DoGap(label); @@ -1563,6 +1560,7 @@ void LCodeGen::DoConstantD(LConstantD* instr) { void LCodeGen::DoConstantT(LConstantT* instr) { Handle<Object> value = instr->value(); + ALLOW_HANDLE_DEREF(isolate(), "smi check"); if (value->IsSmi()) { __ Move(ToRegister(instr->result()), value); } else { @@ -1646,13 +1644,8 @@ void LCodeGen::DoDateField(LDateField* instr) { } __ bind(&runtime); __ PrepareCallCFunction(2); -#ifdef _WIN64 - __ movq(rcx, object); - __ movq(rdx, index, RelocInfo::NONE64); -#else - __ movq(rdi, object); - __ movq(rsi, index, RelocInfo::NONE64); -#endif + __ movq(arg_reg_1, object); + __ movq(arg_reg_2, index, RelocInfo::NONE64); __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2); __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); __ bind(&done); @@ -1822,17 +1815,16 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) { } -int LCodeGen::GetNextEmittedBlock(int block) { - for (int i = block + 1; i < graph()->blocks()->length(); ++i) { - LLabel* label = chunk_->GetLabel(i); - if (!label->HasReplacement()) return i; +int LCodeGen::GetNextEmittedBlock() const { + for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) { + if (!chunk_->GetLabel(i)->HasReplacement()) return i; } return -1; } void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) { - int next_block = GetNextEmittedBlock(current_block_); + int next_block = GetNextEmittedBlock(); right_block = chunk_->LookupDestination(right_block); left_block = chunk_->LookupDestination(left_block); @@ -1962,10 +1954,8 @@ void LCodeGen::DoBranch(LBranch* instr) { void LCodeGen::EmitGoto(int block) { - block = chunk_->LookupDestination(block); - int next_block = GetNextEmittedBlock(current_block_); - if (block != next_block) { - __ jmp(chunk_->GetAssemblyLabel(block)); + if (!IsNextEmittedBlock(block)) { + __ jmp(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block))); } } @@ -2561,6 +2551,8 @@ void LCodeGen::DoReturn(LReturn* instr) { rcx); } else { Register reg = ToRegister(instr->parameter_count()); + // The argument count parameter is a smi + __ SmiToInteger32(reg, reg); Register return_addr_reg = reg.is(rcx) ? rbx : rcx; __ pop(return_addr_reg); __ shl(reg, Immediate(kPointerSizeLog2)); @@ -2901,16 +2893,26 @@ void LCodeGen::DoLoadExternalArrayPointer( void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { Register arguments = ToRegister(instr->arguments()); - Register length = ToRegister(instr->length()); Register result = ToRegister(instr->result()); - // There are two words between the frame pointer and the last argument. - // Subtracting from length accounts for one of them add one more. - if (instr->index()->IsRegister()) { - __ subl(length, ToRegister(instr->index())); + + if (instr->length()->IsConstantOperand() && + instr->index()->IsConstantOperand()) { + int const_index = ToInteger32(LConstantOperand::cast(instr->index())); + int const_length = ToInteger32(LConstantOperand::cast(instr->length())); + int index = (const_length - const_index) + 1; + __ movq(result, Operand(arguments, index * kPointerSize)); } else { - __ subl(length, ToOperand(instr->index())); + Register length = ToRegister(instr->length()); + // There are two words between the frame pointer and the last argument. + // Subtracting from length accounts for one of them add one more. + if (instr->index()->IsRegister()) { + __ subl(length, ToRegister(instr->index())); + } else { + __ subl(length, ToOperand(instr->index())); + } + __ movq(result, + Operand(arguments, length, times_pointer_size, kPointerSize)); } - __ movq(result, Operand(arguments, length, times_pointer_size, kPointerSize)); } @@ -3312,12 +3314,15 @@ void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) { void LCodeGen::CallKnownFunction(Handle<JSFunction> function, + int formal_parameter_count, int arity, LInstruction* instr, CallKind call_kind, RDIState rdi_state) { - bool can_invoke_directly = !function->NeedsArgumentsAdaption() || - function->shared()->formal_parameter_count() == arity; + bool dont_adapt_arguments = + formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel; + bool can_invoke_directly = + dont_adapt_arguments || formal_parameter_count == arity; LPointerMap* pointers = instr->pointer_map(); RecordPosition(pointers->position()); @@ -3332,13 +3337,13 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function, // Set rax to arguments count if adaption is not needed. Assumes that rax // is available to write to at this point. - if (!function->NeedsArgumentsAdaption()) { + if (dont_adapt_arguments) { __ Set(rax, arity); } // Invoke function. __ SetCallKind(rcx, call_kind); - if (*function == *info()->closure()) { + if (function.is_identical_to(info()->closure())) { __ CallSelf(); } else { __ call(FieldOperand(rdi, JSFunction::kCodeEntryOffset)); @@ -3351,7 +3356,9 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function, SafepointGenerator generator( this, pointers, Safepoint::kLazyDeopt); ParameterCount count(arity); - __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind); + ParameterCount expected(formal_parameter_count); + __ InvokeFunction( + function, expected, count, CALL_FUNCTION, generator, call_kind); } // Restore context. @@ -3361,7 +3368,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function, void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) { ASSERT(ToRegister(instr->result()).is(rax)); - CallKnownFunction(instr->function(), + CallKnownFunction(instr->hydrogen()->function(), + instr->hydrogen()->formal_parameter_count(), instr->arity(), instr, CALL_AS_METHOD, @@ -3635,12 +3643,7 @@ void LCodeGen::DoPower(LPower* instr) { // Having marked this as a call, we can use any registers. // Just make sure that the input/output registers are the expected ones. - // Choose register conforming to calling convention (when bailing out). -#ifdef _WIN64 Register exponent = rdx; -#else - Register exponent = rdi; -#endif ASSERT(!instr->right()->IsRegister() || ToRegister(instr->right()).is(exponent)); ASSERT(!instr->right()->IsDoubleRegister() || @@ -3801,7 +3804,8 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { ASSERT(ToRegister(instr->function()).is(rdi)); ASSERT(instr->HasPointerMap()); - if (instr->known_function().is_null()) { + Handle<JSFunction> known_function = instr->hydrogen()->known_function(); + if (known_function.is_null()) { LPointerMap* pointers = instr->pointer_map(); RecordPosition(pointers->position()); SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); @@ -3809,7 +3813,8 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { __ InvokeFunction(rdi, count, CALL_FUNCTION, generator, CALL_AS_METHOD); __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); } else { - CallKnownFunction(instr->known_function(), + CallKnownFunction(known_function, + instr->hydrogen()->formal_parameter_count(), instr->arity(), instr, CALL_AS_METHOD, @@ -3868,7 +3873,8 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) { void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) { ASSERT(ToRegister(instr->result()).is(rax)); - CallKnownFunction(instr->target(), + CallKnownFunction(instr->hydrogen()->target(), + instr->hydrogen()->formal_parameter_count(), instr->arity(), instr, CALL_AS_FUNCTION, @@ -3898,9 +3904,18 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) { __ Set(rax, instr->arity()); __ Move(rbx, instr->hydrogen()->property_cell()); - Handle<Code> array_construct_code = - isolate()->builtins()->ArrayConstructCode(); - CallCode(array_construct_code, RelocInfo::CONSTRUCT_CALL, instr); + Object* cell_value = instr->hydrogen()->property_cell()->value(); + ElementsKind kind = static_cast<ElementsKind>(Smi::cast(cell_value)->value()); + if (instr->arity() == 0) { + ArrayNoArgumentConstructorStub stub(kind); + CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + } else if (instr->arity() == 1) { + ArraySingleArgumentConstructorStub stub(kind); + CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + } else { + ArrayNArgumentsConstructorStub stub(kind); + CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); + } } @@ -3918,7 +3933,6 @@ void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) { void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { Register object = ToRegister(instr->object()); - Register value = ToRegister(instr->value()); int offset = instr->offset(); if (!instr->transition().is_null()) { @@ -3944,34 +3958,42 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { HType type = instr->hydrogen()->value()->type(); SmiCheck check_needed = type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; - if (instr->is_in_object()) { - __ movq(FieldOperand(object, offset), value); - if (instr->hydrogen()->NeedsWriteBarrier()) { - Register temp = ToRegister(instr->temp()); - // Update the write barrier for the object for in-object properties. - __ RecordWriteField(object, - offset, - value, - temp, - kSaveFPRegs, - EMIT_REMEMBERED_SET, - check_needed); + + Register write_register = object; + if (!instr->is_in_object()) { + write_register = ToRegister(instr->temp()); + __ movq(write_register, FieldOperand(object, JSObject::kPropertiesOffset)); + } + + if (instr->value()->IsConstantOperand()) { + LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); + if (IsInteger32Constant(operand_value)) { + // In lithium register preparation, we made sure that the constant integer + // operand fits into smi range. + Smi* smi_value = Smi::FromInt(ToInteger32(operand_value)); + __ Move(FieldOperand(write_register, offset), smi_value); + } else if (operand_value->IsRegister()) { + __ movq(FieldOperand(write_register, offset), + ToRegister(operand_value)); + } else { + Handle<Object> handle_value = ToHandle(operand_value); + __ Move(FieldOperand(write_register, offset), handle_value); } } else { - Register temp = ToRegister(instr->temp()); - __ movq(temp, FieldOperand(object, JSObject::kPropertiesOffset)); - __ movq(FieldOperand(temp, offset), value); - if (instr->hydrogen()->NeedsWriteBarrier()) { - // Update the write barrier for the properties array. - // object is used as a scratch register. - __ RecordWriteField(temp, - offset, - value, - object, - kSaveFPRegs, - EMIT_REMEMBERED_SET, - check_needed); - } + __ movq(FieldOperand(write_register, offset), ToRegister(instr->value())); + } + + if (instr->hydrogen()->NeedsWriteBarrier()) { + Register value = ToRegister(instr->value()); + Register temp = instr->is_in_object() ? ToRegister(instr->temp()) : object; + // Update the write barrier for the object for in-object properties. + __ RecordWriteField(write_register, + offset, + value, + temp, + kSaveFPRegs, + EMIT_REMEMBERED_SET, + check_needed); } } @@ -4137,7 +4159,6 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { - Register value = ToRegister(instr->value()); Register elements = ToRegister(instr->elements()); LOperand* key = instr->key(); if (!key->IsConstantOperand()) { @@ -4162,8 +4183,22 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { FAST_ELEMENTS, FixedArray::kHeaderSize - kHeapObjectTag, instr->additional_index()); + if (instr->value()->IsRegister()) { + __ movq(operand, ToRegister(instr->value())); + } else { + LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); + if (IsInteger32Constant(operand_value)) { + Smi* smi_value = Smi::FromInt(ToInteger32(operand_value)); + __ Move(operand, smi_value); + } else { + Handle<Object> handle_value = ToHandle(operand_value); + __ Move(operand, handle_value); + } + } if (instr->hydrogen()->NeedsWriteBarrier()) { + ASSERT(instr->value()->IsRegister()); + Register value = ToRegister(instr->value()); ASSERT(!instr->key()->IsConstantOperand()); HType type = instr->hydrogen()->value()->type(); SmiCheck check_needed = @@ -4171,15 +4206,12 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { // Compute address of modified element and store it into key register. Register key_reg(ToRegister(key)); __ lea(key_reg, operand); - __ movq(Operand(key_reg, 0), value); __ RecordWrite(elements, key_reg, value, kSaveFPRegs, EMIT_REMEMBERED_SET, check_needed); - } else { - __ movq(operand, value); } } @@ -4852,6 +4884,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { void LCodeGen::DoCheckFunction(LCheckFunction* instr) { Register reg = ToRegister(instr->value()); Handle<JSFunction> target = instr->hydrogen()->target(); + ALLOW_HANDLE_DEREF(isolate(), "using raw address"); if (isolate()->heap()->InNewSpace(*target)) { Handle<JSGlobalPropertyCell> cell = isolate()->factory()->NewJSGlobalPropertyCell(target); @@ -4980,16 +5013,12 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) { Register result = ToRegister(instr->result()); Register scratch = ToRegister(instr->temp()); Handle<JSFunction> constructor = instr->hydrogen()->constructor(); - Handle<Map> initial_map(constructor->initial_map()); + Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map(); int instance_size = initial_map->instance_size(); ASSERT(initial_map->pre_allocated_property_fields() + initial_map->unused_property_fields() - initial_map->inobject_properties() == 0); - // Allocate memory for the object. The initial map might change when - // the constructor's prototype changes, but instance size and property - // counts remain unchanged (if slack tracking finished). - ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress()); __ Allocate(instance_size, result, no_reg, scratch, deferred->entry(), TAG_OBJECT); @@ -5040,8 +5069,7 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) { void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) { Register result = ToRegister(instr->result()); - Handle<JSFunction> constructor = instr->hydrogen()->constructor(); - Handle<Map> initial_map(constructor->initial_map()); + Handle<Map> initial_map = instr->hydrogen()->constructor_initial_map(); int instance_size = initial_map->instance_size(); // TODO(3095996): Get rid of this. For now, we need to make the @@ -5117,7 +5145,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) { void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { - Handle<FixedArray> literals(instr->environment()->closure()->literals()); + Handle<FixedArray> literals = instr->hydrogen()->literals(); ElementsKind boilerplate_elements_kind = instr->hydrogen()->boilerplate_elements_kind(); AllocationSiteMode allocation_site_mode = @@ -5177,7 +5205,7 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) { - Handle<FixedArray> literals(instr->environment()->closure()->literals()); + Handle<FixedArray> literals = instr->hydrogen()->literals(); Handle<FixedArray> constant_properties = instr->hydrogen()->constant_properties(); @@ -5190,7 +5218,7 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) { // Set up the parameters to the stub/runtime call and pick the right // runtime function or stub to call. - int properties_count = constant_properties->length() / 2; + int properties_count = instr->hydrogen()->constant_properties_length() / 2; if (instr->hydrogen()->depth() > 1) { __ PushHeapObject(literals); __ Push(Smi::FromInt(instr->hydrogen()->literal_index())); @@ -5275,19 +5303,17 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { // Use the fast case closure allocation code that allocates in new // space for nested functions that don't need literals cloning. - Handle<SharedFunctionInfo> shared_info = instr->shared_info(); bool pretenure = instr->hydrogen()->pretenure(); - if (!pretenure && shared_info->num_literals() == 0) { - FastNewClosureStub stub(shared_info->language_mode(), - shared_info->is_generator()); - __ Push(shared_info); + if (!pretenure && instr->hydrogen()->has_no_literals()) { + FastNewClosureStub stub(instr->hydrogen()->language_mode(), + instr->hydrogen()->is_generator()); + __ Push(instr->hydrogen()->shared_info()); CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } else { __ push(rsi); - __ Push(shared_info); - __ PushRoot(pretenure ? - Heap::kTrueValueRootIndex : - Heap::kFalseValueRootIndex); + __ Push(instr->hydrogen()->shared_info()); + __ PushRoot(pretenure ? Heap::kTrueValueRootIndex : + Heap::kFalseValueRootIndex); CallRuntime(Runtime::kNewClosure, 3, instr); } } @@ -5304,6 +5330,7 @@ void LCodeGen::EmitPushTaggedOperand(LOperand* operand) { ASSERT(!operand->IsDoubleRegister()); if (operand->IsConstantOperand()) { Handle<Object> object = ToHandle(LConstantOperand::cast(operand)); + ALLOW_HANDLE_DEREF(isolate(), "smi check"); if (object->IsSmi()) { __ Push(Handle<Smi>::cast(object)); } else { diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h index adcc3e5b26..887c788954 100644 --- a/deps/v8/src/x64/lithium-codegen-x64.h +++ b/deps/v8/src/x64/lithium-codegen-x64.h @@ -80,10 +80,20 @@ class LCodeGen BASE_EMBEDDED { Heap* heap() const { return isolate()->heap(); } Zone* zone() const { return zone_; } + // TODO(svenpanne) Use this consistently. + int LookupDestination(int block_id) const { + return chunk()->LookupDestination(block_id); + } + + bool IsNextEmittedBlock(int block_id) const { + return LookupDestination(block_id) == GetNextEmittedBlock(); + } + bool NeedsEagerFrame() const { return GetStackSlotCount() > 0 || info()->is_non_deferred_calling() || - !info()->IsStub(); + !info()->IsStub() || + info()->requires_frame(); } bool NeedsDeferredFrame() const { return !NeedsEagerFrame() && info()->is_deferred_calling(); @@ -159,9 +169,9 @@ class LCodeGen BASE_EMBEDDED { LPlatformChunk* chunk() const { return chunk_; } Scope* scope() const { return scope_; } - HGraph* graph() const { return chunk_->graph(); } + HGraph* graph() const { return chunk()->graph(); } - int GetNextEmittedBlock(int block); + int GetNextEmittedBlock() const; void EmitClassOfTest(Label* if_true, Label* if_false, @@ -224,6 +234,7 @@ class LCodeGen BASE_EMBEDDED { // Generate a direct call to a known function. Expects the function // to be in rdi. void CallKnownFunction(Handle<JSFunction> function, + int formal_parameter_count, int arity, LInstruction* instr, CallKind call_kind, diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc index 1d42455ce3..6707455efb 100644 --- a/deps/v8/src/x64/lithium-x64.cc +++ b/deps/v8/src/x64/lithium-x64.cc @@ -194,6 +194,11 @@ const char* LArithmeticT::Mnemonic() const { } +bool LGoto::HasInterestingComment(LCodeGen* gen) const { + return !gen->IsNextEmittedBlock(block_id()); +} + + void LGoto::PrintDataTo(StringStream* stream) { stream->Add("B%d", block_id()); } @@ -997,11 +1002,13 @@ LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) { LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) { + info()->MarkAsRequiresFrame(); return DefineAsRegister(new(zone()) LArgumentsLength(Use(length->value()))); } LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) { + info()->MarkAsRequiresFrame(); return DefineAsRegister(new(zone()) LArgumentsElements); } @@ -1549,12 +1556,7 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) { ASSERT(instr->left()->representation().IsDouble()); LOperand* left = UseFixedDouble(instr->left(), xmm2); LOperand* right = exponent_type.IsDouble() ? - UseFixedDouble(instr->right(), xmm1) : -#ifdef _WIN64 - UseFixed(instr->right(), rdx); -#else - UseFixed(instr->right(), rdi); -#endif + UseFixedDouble(instr->right(), xmm1) : UseFixed(instr->right(), rdx); LPower* result = new(zone()) LPower(left, right); return MarkAsCall(DefineFixedDouble(result, xmm3), instr, CAN_DEOPTIMIZE_EAGERLY); @@ -1564,11 +1566,7 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) { LInstruction* LChunkBuilder::DoRandom(HRandom* instr) { ASSERT(instr->representation().IsDouble()); ASSERT(instr->global_object()->representation().IsTagged()); -#ifdef _WIN64 - LOperand* global_object = UseFixed(instr->global_object(), rcx); -#else - LOperand* global_object = UseFixed(instr->global_object(), rdi); -#endif + LOperand* global_object = UseFixed(instr->global_object(), arg_reg_1); LRandom* result = new(zone()) LRandom(global_object); return MarkAsCall(DefineFixedDouble(result, xmm1), instr); } @@ -2117,6 +2115,19 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) { } +// DoStoreKeyed and DoStoreNamedField have special considerations for allowing +// use of a constant instead of a register. +static bool StoreConstantValueAllowed(HValue* value) { + if (value->IsConstant()) { + HConstant* constant_value = HConstant::cast(value); + return constant_value->HasSmiValue() + || constant_value->HasDoubleValue() + || constant_value->ImmortalImmovable(); + } + return false; +} + + LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) { ElementsKind elements_kind = instr->elements_kind(); bool clobbers_key = instr->key()->representation().IsTagged(); @@ -2136,11 +2147,24 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) { } else { ASSERT(instr->value()->representation().IsTagged()); object = UseTempRegister(instr->elements()); - val = needs_write_barrier ? UseTempRegister(instr->value()) - : UseRegisterAtStart(instr->value()); - key = (clobbers_key || needs_write_barrier) - ? UseTempRegister(instr->key()) - : UseRegisterOrConstantAtStart(instr->key()); + if (needs_write_barrier) { + val = UseTempRegister(instr->value()); + key = UseTempRegister(instr->key()); + } else { + if (StoreConstantValueAllowed(instr->value())) { + val = UseRegisterOrConstantAtStart(instr->value()); + } else { + val = UseRegisterAtStart(instr->value()); + } + + if (clobbers_key) { + key = UseTempRegister(instr->key()); + } else if (StoreConstantValueAllowed(instr->key())) { + key = UseRegisterOrConstantAtStart(instr->key()); + } else { + key = UseRegisterAtStart(instr->key()); + } + } } return new(zone()) LStoreKeyed(object, key, val); @@ -2234,9 +2258,14 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) { : UseRegisterAtStart(instr->object()); } - LOperand* val = needs_write_barrier - ? UseTempRegister(instr->value()) - : UseRegister(instr->value()); + LOperand* val; + if (needs_write_barrier) { + val = UseTempRegister(instr->value()); + } else if (StoreConstantValueAllowed(instr->value())) { + val = UseRegisterOrConstant(instr->value()); + } else { + val = UseRegister(instr->value()); + } // We only need a scratch register if we have a write barrier or we // have a store into the properties array (not in-object-property). @@ -2346,7 +2375,8 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) { ASSERT(info()->IsStub()); CodeStubInterfaceDescriptor* descriptor = info()->code_stub()->GetInterfaceDescriptor(info()->isolate()); - Register reg = descriptor->register_params_[instr->index()]; + int index = static_cast<int>(instr->index()); + Register reg = DESCRIPTOR_GET_PARAMETER_REGISTER(descriptor, index); return DefineFixed(result, reg); } } @@ -2378,9 +2408,17 @@ LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) { LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) { + info()->MarkAsRequiresFrame(); LOperand* args = UseRegister(instr->arguments()); - LOperand* length = UseTempRegister(instr->length()); - LOperand* index = Use(instr->index()); + LOperand* length; + LOperand* index; + if (instr->length()->IsConstant() && instr->index()->IsConstant()) { + length = UseRegisterOrConstant(instr->length()); + index = UseOrConstant(instr->index()); + } else { + length = UseTempRegister(instr->length()); + index = Use(instr->index()); + } return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index)); } diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h index 54f117c0d9..9154b04cf5 100644 --- a/deps/v8/src/x64/lithium-x64.h +++ b/deps/v8/src/x64/lithium-x64.h @@ -280,6 +280,8 @@ class LInstruction: public ZoneObject { LOperand* FirstInput() { return InputAt(0); } LOperand* Output() { return HasResult() ? result() : NULL; } + virtual bool HasInterestingComment(LCodeGen* gen) const { return true; } + #ifdef DEBUG void VerifyCall(); #endif @@ -381,6 +383,10 @@ class LInstructionGap: public LGap { public: explicit LInstructionGap(HBasicBlock* block) : LGap(block) { } + virtual bool HasInterestingComment(LCodeGen* gen) const { + return !IsRedundant(); + } + DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap") }; @@ -389,6 +395,7 @@ class LGoto: public LTemplateInstruction<0, 0, 0> { public: explicit LGoto(int block_id) : block_id_(block_id) { } + virtual bool HasInterestingComment(LCodeGen* gen) const; DECLARE_CONCRETE_INSTRUCTION(Goto, "goto") virtual void PrintDataTo(StringStream* stream); virtual bool IsControl() const { return true; } @@ -436,12 +443,14 @@ class LLabel: public LGap { explicit LLabel(HBasicBlock* block) : LGap(block), replacement_(NULL) { } + virtual bool HasInterestingComment(LCodeGen* gen) const { return false; } DECLARE_CONCRETE_INSTRUCTION(Label, "label") virtual void PrintDataTo(StringStream* stream); int block_id() const { return block()->block_id(); } bool is_loop_header() const { return block()->IsLoopHeader(); } + bool is_osr_entry() const { return block()->is_osr_entry(); } Label* label() { return &label_; } LLabel* replacement() const { return replacement_; } void set_replacement(LLabel* label) { replacement_ = label; } @@ -455,6 +464,7 @@ class LLabel: public LGap { class LParameter: public LTemplateInstruction<1, 0, 0> { public: + virtual bool HasInterestingComment(LCodeGen* gen) const { return false; } DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter") }; @@ -472,6 +482,7 @@ class LCallStub: public LTemplateInstruction<1, 0, 0> { class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> { public: + virtual bool HasInterestingComment(LCodeGen* gen) const { return false; } DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value") }; @@ -1444,6 +1455,7 @@ class LReturn: public LTemplateInstruction<0, 2, 0> { LOperand* parameter_count() { return inputs_[1]; } DECLARE_CONCRETE_INSTRUCTION(Return, "return") + DECLARE_HYDROGEN_ACCESSOR(Return) }; @@ -1774,7 +1786,6 @@ class LInvokeFunction: public LTemplateInstruction<1, 1, 0> { virtual void PrintDataTo(StringStream* stream); int arity() const { return hydrogen()->argument_count() - 1; } - Handle<JSFunction> known_function() { return hydrogen()->known_function(); } }; @@ -1840,7 +1851,6 @@ class LCallKnownGlobal: public LTemplateInstruction<1, 0, 0> { virtual void PrintDataTo(StringStream* stream); - Handle<JSFunction> target() const { return hydrogen()->target(); } int arity() const { return hydrogen()->argument_count() - 1; } }; @@ -2392,8 +2402,6 @@ class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal") DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral) - - Handle<SharedFunctionInfo> shared_info() { return hydrogen()->shared_info(); } }; @@ -2471,6 +2479,7 @@ class LOsrEntry: public LTemplateInstruction<0, 0, 0> { public: LOsrEntry(); + virtual bool HasInterestingComment(LCodeGen* gen) const { return false; } DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry") LOperand** SpilledRegisterArray() { return register_spills_; } diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc index 58659241a6..76491a331c 100644 --- a/deps/v8/src/x64/macro-assembler-x64.cc +++ b/deps/v8/src/x64/macro-assembler-x64.cc @@ -725,8 +725,9 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address, if (FLAG_log_timer_events) { FrameScope frame(this, StackFrame::MANUAL); PushSafepointRegisters(); - PrepareCallCFunction(0); - CallCFunction(ExternalReference::log_enter_external_function(isolate()), 0); + PrepareCallCFunction(1); + LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate())); + CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1); PopSafepointRegisters(); } @@ -738,8 +739,9 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address, if (FLAG_log_timer_events) { FrameScope frame(this, StackFrame::MANUAL); PushSafepointRegisters(); - PrepareCallCFunction(0); - CallCFunction(ExternalReference::log_leave_external_function(isolate()), 0); + PrepareCallCFunction(1); + LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate())); + CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1); PopSafepointRegisters(); } @@ -817,11 +819,7 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address, bind(&delete_allocated_handles); movq(Operand(base_reg, kLimitOffset), prev_limit_reg); movq(prev_limit_reg, rax); -#ifdef _WIN64 - LoadAddress(rcx, ExternalReference::isolate_address()); -#else - LoadAddress(rdi, ExternalReference::isolate_address()); -#endif + LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate())); LoadAddress(rax, ExternalReference::delete_handle_scope_extensions(isolate())); call(rax); @@ -2286,7 +2284,7 @@ void MacroAssembler::Move(Register dst, Register src) { void MacroAssembler::Move(Register dst, Handle<Object> source) { - ASSERT(!source->IsFailure()); + ALLOW_HANDLE_DEREF(isolate(), "smi check"); if (source->IsSmi()) { Move(dst, Smi::cast(*source)); } else { @@ -2296,7 +2294,7 @@ void MacroAssembler::Move(Register dst, Handle<Object> source) { void MacroAssembler::Move(const Operand& dst, Handle<Object> source) { - ASSERT(!source->IsFailure()); + ALLOW_HANDLE_DEREF(isolate(), "smi check"); if (source->IsSmi()) { Move(dst, Smi::cast(*source)); } else { @@ -2307,6 +2305,7 @@ void MacroAssembler::Move(const Operand& dst, Handle<Object> source) { void MacroAssembler::Cmp(Register dst, Handle<Object> source) { + ALLOW_HANDLE_DEREF(isolate(), "smi check"); if (source->IsSmi()) { Cmp(dst, Smi::cast(*source)); } else { @@ -2317,6 +2316,7 @@ void MacroAssembler::Cmp(Register dst, Handle<Object> source) { void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) { + ALLOW_HANDLE_DEREF(isolate(), "smi check"); if (source->IsSmi()) { Cmp(dst, Smi::cast(*source)); } else { @@ -2328,6 +2328,7 @@ void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) { void MacroAssembler::Push(Handle<Object> source) { + ALLOW_HANDLE_DEREF(isolate(), "smi check"); if (source->IsSmi()) { Push(Smi::cast(*source)); } else { @@ -2340,6 +2341,7 @@ void MacroAssembler::Push(Handle<Object> source) { void MacroAssembler::LoadHeapObject(Register result, Handle<HeapObject> object) { + ALLOW_HANDLE_DEREF(isolate(), "using raw address"); if (isolate()->heap()->InNewSpace(*object)) { Handle<JSGlobalPropertyCell> cell = isolate()->factory()->NewJSGlobalPropertyCell(object); @@ -2352,6 +2354,7 @@ void MacroAssembler::LoadHeapObject(Register result, void MacroAssembler::PushHeapObject(Handle<HeapObject> object) { + ALLOW_HANDLE_DEREF(isolate(), "using raw address"); if (isolate()->heap()->InNewSpace(*object)) { Handle<JSGlobalPropertyCell> cell = isolate()->factory()->NewJSGlobalPropertyCell(object); @@ -2367,6 +2370,7 @@ void MacroAssembler::PushHeapObject(Handle<HeapObject> object) { void MacroAssembler::LoadGlobalCell(Register dst, Handle<JSGlobalPropertyCell> cell) { if (dst.is(rax)) { + ALLOW_HANDLE_DEREF(isolate(), "embedding raw address"); load_rax(cell.location(), RelocInfo::GLOBAL_PROPERTY_CELL); } else { movq(dst, cell, RelocInfo::GLOBAL_PROPERTY_CELL); @@ -3280,6 +3284,7 @@ void MacroAssembler::InvokeFunction(Register function, void MacroAssembler::InvokeFunction(Handle<JSFunction> function, + const ParameterCount& expected, const ParameterCount& actual, InvokeFlag flag, const CallWrapper& call_wrapper, @@ -3295,7 +3300,6 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function, // allow recompilation to take effect without changing any of the // call sites. movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset)); - ParameterCount expected(function->shared()->formal_parameter_count()); InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind); } diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h index 3381a5b7b6..f640beb3ee 100644 --- a/deps/v8/src/x64/macro-assembler-x64.h +++ b/deps/v8/src/x64/macro-assembler-x64.h @@ -349,6 +349,7 @@ class MacroAssembler: public Assembler { CallKind call_kind); void InvokeFunction(Handle<JSFunction> function, + const ParameterCount& expected, const ParameterCount& actual, InvokeFlag flag, const CallWrapper& call_wrapper, @@ -786,6 +787,7 @@ class MacroAssembler: public Assembler { void PushHeapObject(Handle<HeapObject> object); void LoadObject(Register result, Handle<Object> object) { + ALLOW_HANDLE_DEREF(isolate(), "heap object check"); if (object->IsHeapObject()) { LoadHeapObject(result, Handle<HeapObject>::cast(object)); } else { diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/x64/regexp-macro-assembler-x64.cc index 914241ecdc..012dcc8b62 100644 --- a/deps/v8/src/x64/regexp-macro-assembler-x64.cc +++ b/deps/v8/src/x64/regexp-macro-assembler-x64.cc @@ -437,7 +437,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase( // Set byte_length. __ movq(r8, rbx); // Isolate. - __ LoadAddress(r9, ExternalReference::isolate_address()); + __ LoadAddress(r9, ExternalReference::isolate_address(isolate())); #else // AMD64 calling convention // Compute byte_offset2 (current position = rsi+rdi). __ lea(rax, Operand(rsi, rdi, times_1, 0)); @@ -448,14 +448,14 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase( // Set byte_length. __ movq(rdx, rbx); // Isolate. - __ LoadAddress(rcx, ExternalReference::isolate_address()); + __ LoadAddress(rcx, ExternalReference::isolate_address(isolate())); #endif { // NOLINT: Can't find a way to open this scope without confusing the // linter. AllowExternalCallThatCantCauseGC scope(&masm_); ExternalReference compare = - ExternalReference::re_case_insensitive_compare_uc16(masm_.isolate()); + ExternalReference::re_case_insensitive_compare_uc16(isolate()); __ CallCFunction(compare, num_arguments); } @@ -810,7 +810,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) { Label stack_ok; ExternalReference stack_limit = - ExternalReference::address_of_stack_limit(masm_.isolate()); + ExternalReference::address_of_stack_limit(isolate()); __ movq(rcx, rsp); __ movq(kScratchRegister, stack_limit); __ subq(rcx, Operand(kScratchRegister, 0)); @@ -1055,15 +1055,15 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) { // Microsoft passes parameters in rcx, rdx, r8. // First argument, backtrack stackpointer, is already in rcx. __ lea(rdx, Operand(rbp, kStackHighEnd)); // Second argument - __ LoadAddress(r8, ExternalReference::isolate_address()); + __ LoadAddress(r8, ExternalReference::isolate_address(isolate())); #else // AMD64 ABI passes parameters in rdi, rsi, rdx. __ movq(rdi, backtrack_stackpointer()); // First argument. __ lea(rsi, Operand(rbp, kStackHighEnd)); // Second argument. - __ LoadAddress(rdx, ExternalReference::isolate_address()); + __ LoadAddress(rdx, ExternalReference::isolate_address(isolate())); #endif ExternalReference grow_stack = - ExternalReference::re_grow_stack(masm_.isolate()); + ExternalReference::re_grow_stack(isolate()); __ CallCFunction(grow_stack, num_arguments); // If return NULL, we have failed to grow the stack, and // must exit with a stack-overflow exception. @@ -1266,7 +1266,7 @@ void RegExpMacroAssemblerX64::CallCheckStackGuardState() { __ lea(rdi, Operand(rsp, -kPointerSize)); #endif ExternalReference stack_check = - ExternalReference::re_check_stack_guard_state(masm_.isolate()); + ExternalReference::re_check_stack_guard_state(isolate()); __ CallCFunction(stack_check, num_arguments); } @@ -1485,7 +1485,7 @@ void RegExpMacroAssemblerX64::CheckPreemption() { // Check for preemption. Label no_preempt; ExternalReference stack_limit = - ExternalReference::address_of_stack_limit(masm_.isolate()); + ExternalReference::address_of_stack_limit(isolate()); __ load_rax(stack_limit); __ cmpq(rsp, rax); __ j(above, &no_preempt); @@ -1499,7 +1499,7 @@ void RegExpMacroAssemblerX64::CheckPreemption() { void RegExpMacroAssemblerX64::CheckStackLimit() { Label no_stack_overflow; ExternalReference stack_limit = - ExternalReference::address_of_regexp_stack_limit(masm_.isolate()); + ExternalReference::address_of_regexp_stack_limit(isolate()); __ load_rax(stack_limit); __ cmpq(backtrack_stackpointer(), rax); __ j(above, &no_stack_overflow); diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.h b/deps/v8/src/x64/regexp-macro-assembler-x64.h index a082cf2df3..296c866019 100644 --- a/deps/v8/src/x64/regexp-macro-assembler-x64.h +++ b/deps/v8/src/x64/regexp-macro-assembler-x64.h @@ -272,6 +272,8 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler { // Increments the stack pointer (rcx) by a word size. inline void Drop(); + Isolate* isolate() const { return masm_.isolate(); } + MacroAssembler masm_; MacroAssembler::NoRootArrayScope no_root_array_scope_; diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc index 41e5b88264..54d2a113a7 100644 --- a/deps/v8/src/x64/stub-cache-x64.cc +++ b/deps/v8/src/x64/stub-cache-x64.cc @@ -372,7 +372,7 @@ static void PushInterceptorArguments(MacroAssembler* masm, __ push(receiver); __ push(holder); __ push(FieldOperand(kScratchRegister, InterceptorInfo::kDataOffset)); - __ PushAddress(ExternalReference::isolate_address()); + __ PushAddress(ExternalReference::isolate_address(masm->isolate())); } @@ -468,7 +468,8 @@ static void GenerateFastApiCall(MacroAssembler* masm, } else { __ Move(Operand(rsp, 3 * kPointerSize), call_data); } - __ movq(kScratchRegister, ExternalReference::isolate_address()); + __ movq(kScratchRegister, + ExternalReference::isolate_address(masm->isolate())); __ movq(Operand(rsp, 4 * kPointerSize), kScratchRegister); // Prepare arguments. @@ -621,7 +622,9 @@ class CallInterceptorCompiler BASE_EMBEDDED { CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_) ? CALL_AS_FUNCTION : CALL_AS_METHOD; - __ InvokeFunction(optimization.constant_function(), arguments_, + Handle<JSFunction> fun = optimization.constant_function(); + ParameterCount expected(fun); + __ InvokeFunction(fun, expected, arguments_, JUMP_FUNCTION, NullCallWrapper(), call_kind); } @@ -1177,7 +1180,7 @@ void BaseLoadStubCompiler::GenerateLoadCallback( } else { __ Push(Handle<Object>(callback->data(), isolate())); } - __ PushAddress(ExternalReference::isolate_address()); // isolate + __ PushAddress(ExternalReference::isolate_address(isolate())); // isolate __ push(name()); // name // Save a pointer to where we pushed the arguments pointer. This will be // passed as the const ExecutableAccessorInfo& to the C++ callback. @@ -1991,8 +1994,9 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall( CallKind call_kind = CallICBase::Contextual::decode(extra_state_) ? CALL_AS_FUNCTION : CALL_AS_METHOD; - __ InvokeFunction(function, arguments(), JUMP_FUNCTION, - NullCallWrapper(), call_kind); + ParameterCount expected(function); + __ InvokeFunction(function, expected, arguments(), + JUMP_FUNCTION, NullCallWrapper(), call_kind); __ bind(&miss); // rcx: function name. @@ -2106,8 +2110,9 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall( CallKind call_kind = CallICBase::Contextual::decode(extra_state_) ? CALL_AS_FUNCTION : CALL_AS_METHOD; - __ InvokeFunction(function, arguments(), JUMP_FUNCTION, - NullCallWrapper(), call_kind); + ParameterCount expected(function); + __ InvokeFunction(function, expected, arguments(), + JUMP_FUNCTION, NullCallWrapper(), call_kind); __ bind(&miss); // rcx: function name. @@ -2290,8 +2295,9 @@ void CallStubCompiler::CompileHandlerBackend(Handle<JSFunction> function) { CallKind call_kind = CallICBase::Contextual::decode(extra_state_) ? CALL_AS_FUNCTION : CALL_AS_METHOD; - __ InvokeFunction(function, arguments(), JUMP_FUNCTION, - NullCallWrapper(), call_kind); + ParameterCount expected(function); + __ InvokeFunction(function, expected, arguments(), + JUMP_FUNCTION, NullCallWrapper(), call_kind); } @@ -2503,8 +2509,9 @@ void StoreStubCompiler::GenerateStoreViaSetter( __ push(rdx); __ push(rax); ParameterCount actual(1); - __ InvokeFunction(setter, actual, CALL_FUNCTION, NullCallWrapper(), - CALL_AS_METHOD); + ParameterCount expected(setter); + __ InvokeFunction(setter, expected, actual, + CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); } else { // If we generate a global code snippet for deoptimization only, remember // the place to continue after deoptimization. @@ -2723,8 +2730,9 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm, // Call the JavaScript getter with the receiver on the stack. __ push(rax); ParameterCount actual(0); - __ InvokeFunction(getter, actual, CALL_FUNCTION, NullCallWrapper(), - CALL_AS_METHOD); + ParameterCount expected(getter); + __ InvokeFunction(getter, expected, actual, + CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); } else { // If we generate a global code snippet for deoptimization only, remember // the place to continue after deoptimization. |